1 // RUN: %clang_cc1 -ffreestanding -fms-extensions -fms-compatibility -fms-compatibility-version=17.00 \
2 // RUN: -triple i686--windows -Oz -emit-llvm %s -o - \
3 // RUN: | FileCheck %s -check-prefixes CHECK,CHECK-I386,CHECK-INTEL
4 // RUN: %clang_cc1 -ffreestanding -fms-extensions -fms-compatibility -fms-compatibility-version=17.00 \
5 // RUN: -triple thumbv7--windows -Oz -emit-llvm %s -o - \
6 // RUN: | FileCheck %s --check-prefixes CHECK,CHECK-ARM,CHECK-ARM-ARM64,CHECK-ARM-X64
7 // RUN: %clang_cc1 -ffreestanding -fms-extensions -fms-compatibility -fms-compatibility-version=17.00 \
8 // RUN: -triple x86_64--windows -Oz -emit-llvm -target-feature +cx16 %s -o - \
9 // RUN: | FileCheck %s --check-prefixes CHECK,CHECK-X64,CHECK-ARM-X64,CHECK-INTEL
10 // RUN: %clang_cc1 -ffreestanding -fms-extensions -fms-compatibility -fms-compatibility-version=17.00 \
11 // RUN: -triple aarch64-windows -Oz -emit-llvm %s -o - \
12 // RUN: | FileCheck %s --check-prefixes CHECK-ARM-ARM64,CHECK-ARM-X64,CHECK-ARM64
14 // intrin.h needs size_t, but -ffreestanding prevents us from getting it from
15 // stddef.h. Work around it with this typedef.
16 typedef __SIZE_TYPE__ size_t;
20 #if defined(__i386__) || defined(__x86_64__)
21 void test__stosb(unsigned char *Dest, unsigned char Data, size_t Count) {
22 return __stosb(Dest, Data, Count);
25 // CHECK-I386: define{{.*}}void @test__stosb
26 // CHECK-I386: tail call void @llvm.memset.p0i8.i32(i8* align 1 %Dest, i8 %Data, i32 %Count, i1 true)
27 // CHECK-I386: ret void
30 // CHECK-X64: define{{.*}}void @test__stosb
31 // CHECK-X64: tail call void @llvm.memset.p0i8.i64(i8* align 1 %Dest, i8 %Data, i64 %Count, i1 true)
32 // CHECK-X64: ret void
35 void test__movsb(unsigned char *Dest, unsigned char *Src, size_t Count) {
36 return __movsb(Dest, Src, Count);
38 // CHECK-I386-LABEL: define{{.*}} void @test__movsb
39 // CHECK-I386: call { i8*, i8*, i32 } asm sideeffect "rep movsb", "={di},={si},={cx},0,1,2,~{memory},~{dirflag},~{fpsr},~{flags}"(i8* %Dest, i8* %Src, i32 %Count)
40 // CHECK-I386: ret void
43 // CHECK-X64-LABEL: define{{.*}} void @test__movsb
44 // CHECK-X64: call { i8*, i8*, i64 } asm sideeffect "rep movsb", "={di},={si},={cx},0,1,2,~{memory},~{dirflag},~{fpsr},~{flags}"(i8* %Dest, i8* %Src, i64 %Count)
45 // CHECK-X64: ret void
48 void test__stosw(unsigned short *Dest, unsigned short Data, size_t Count) {
49 return __stosw(Dest, Data, Count);
51 // CHECK-I386-LABEL: define{{.*}} void @test__stosw
52 // CHECK-I386: call { i16*, i32 } asm sideeffect "rep stosw", "={di},={cx},{ax},0,1,~{memory},~{dirflag},~{fpsr},~{flags}"(i16 %Data, i16* %Dest, i32 %Count)
53 // CHECK-I386: ret void
56 // CHECK-X64-LABEL: define{{.*}} void @test__stosw
57 // CHECK-X64: call { i16*, i64 } asm sideeffect "rep stosw", "={di},={cx},{ax},0,1,~{memory},~{dirflag},~{fpsr},~{flags}"(i16 %Data, i16* %Dest, i64 %Count)
58 // CHECK-X64: ret void
61 void test__movsw(unsigned short *Dest, unsigned short *Src, size_t Count) {
62 return __movsw(Dest, Src, Count);
64 // CHECK-I386-LABEL: define{{.*}} void @test__movsw
65 // CHECK-I386: call { i16*, i16*, i32 } asm sideeffect "rep movsw", "={di},={si},={cx},0,1,2,~{memory},~{dirflag},~{fpsr},~{flags}"(i16* %Dest, i16* %Src, i32 %Count)
66 // CHECK-I386: ret void
69 // CHECK-X64-LABEL: define{{.*}} void @test__movsw
70 // CHECK-X64: call { i16*, i16*, i64 } asm sideeffect "rep movsw", "={di},={si},={cx},0,1,2,~{memory},~{dirflag},~{fpsr},~{flags}"(i16* %Dest, i16* %Src, i64 %Count)
71 // CHECK-X64: ret void
74 void test__stosd(unsigned long *Dest, unsigned long Data, size_t Count) {
75 return __stosd(Dest, Data, Count);
77 // CHECK-I386-LABEL: define{{.*}} void @test__stosd
78 // CHECK-I386: call { i32*, i32 } asm sideeffect "rep stosl", "={di},={cx},{ax},0,1,~{memory},~{dirflag},~{fpsr},~{flags}"(i32 %Data, i32* %Dest, i32 %Count)
79 // CHECK-I386: ret void
82 // CHECK-X64-LABEL: define{{.*}} void @test__stosd
83 // CHECK-X64: call { i32*, i64 } asm sideeffect "rep stosl", "={di},={cx},{ax},0,1,~{memory},~{dirflag},~{fpsr},~{flags}"(i32 %Data, i32* %Dest, i64 %Count)
84 // CHECK-X64: ret void
87 void test__movsd(unsigned long *Dest, unsigned long *Src, size_t Count) {
88 return __movsd(Dest, Src, Count);
90 // CHECK-I386-LABEL: define{{.*}} void @test__movsd
91 // CHECK-I386: call { i32*, i32*, i32 } asm sideeffect "rep movsl", "={di},={si},={cx},0,1,2,~{memory},~{dirflag},~{fpsr},~{flags}"(i32* %Dest, i32* %Src, i32 %Count)
92 // CHECK-I386: ret void
95 // CHECK-X64-LABEL: define{{.*}} void @test__movsd
96 // CHECK-X64: call { i32*, i32*, i64 } asm sideeffect "rep movsl", "={di},={si},={cx},0,1,2,~{memory},~{dirflag},~{fpsr},~{flags}"(i32* %Dest, i32* %Src, i64 %Count)
97 // CHECK-X64: ret void
101 void test__stosq(unsigned __int64 *Dest, unsigned __int64 Data, size_t Count) {
102 return __stosq(Dest, Data, Count);
104 // CHECK-X64-LABEL: define{{.*}} void @test__stosq
105 // CHECK-X64: call { i64*, i64 } asm sideeffect "rep stosq", "={di},={cx},{ax},0,1,~{memory},~{dirflag},~{fpsr},~{flags}"(i64 %Data, i64* %Dest, i64 %Count)
106 // CHECK-X64: ret void
109 void test__movsq(unsigned __int64 *Dest, unsigned __int64 *Src, size_t Count) {
110 return __movsq(Dest, Src, Count);
112 // CHECK-X64-LABEL: define{{.*}} void @test__movsq
113 // CHECK-X64: call { i64*, i64*, i64 } asm sideeffect "rep movsq", "={di},={si},={cx},0,1,2,~{memory},~{dirflag},~{fpsr},~{flags}"(i64* %Dest, i64* %Src, i64 %Count)
114 // CHECK-X64: ret void
118 void test__ud2(void) {
121 // CHECK-INTEL-LABEL: define{{.*}} void @test__ud2()
122 // CHECK-INTEL: call void @llvm.trap()
124 void test__int2c(void) {
127 // CHECK-INTEL-LABEL: define{{.*}} void @test__int2c()
128 // CHECK-INTEL: call void asm sideeffect "int $$0x2c", ""() #[[NORETURN:[0-9]+]]
133 void *test_ReturnAddress() {
134 return _ReturnAddress();
136 // CHECK-LABEL: define{{.*}}i8* @test_ReturnAddress()
137 // CHECK: = tail call i8* @llvm.returnaddress(i32 0)
140 #if defined(__i386__) || defined(__x86_64__) || defined (__aarch64__)
141 void *test_AddressOfReturnAddress() {
142 return _AddressOfReturnAddress();
144 // CHECK-INTEL-LABEL: define dso_local i8* @test_AddressOfReturnAddress()
145 // CHECK-INTEL: = tail call i8* @llvm.addressofreturnaddress.p0i8()
146 // CHECK-INTEL: ret i8*
149 unsigned char test_BitScanForward(unsigned long *Index, unsigned long Mask) {
150 return _BitScanForward(++Index, Mask);
152 // CHECK: define{{.*}}i8 @test_BitScanForward(i32* {{[a-z_ ]*}}%Index, i32 {{[a-z_ ]*}}%Mask){{.*}}{
153 // CHECK: [[ISNOTZERO:%[a-z0-9._]+]] = icmp eq i32 %Mask, 0
154 // CHECK: br i1 [[ISNOTZERO]], label %[[END_LABEL:[a-z0-9._]+]], label %[[ISNOTZERO_LABEL:[a-z0-9._]+]]
155 // CHECK: [[END_LABEL]]:
156 // CHECK: [[RESULT:%[a-z0-9._]+]] = phi i8 [ 0, %[[ISZERO_LABEL:[a-z0-9._]+]] ], [ 1, %[[ISNOTZERO_LABEL]] ]
157 // CHECK: ret i8 [[RESULT]]
158 // CHECK: [[ISNOTZERO_LABEL]]:
159 // CHECK: [[IDXGEP:%[a-z0-9._]+]] = getelementptr inbounds i32, i32* %Index, {{i64|i32}} 1
160 // CHECK: [[INDEX:%[0-9]+]] = tail call i32 @llvm.cttz.i32(i32 %Mask, i1 true)
161 // CHECK: store i32 [[INDEX]], i32* [[IDXGEP]], align 4
162 // CHECK: br label %[[END_LABEL]]
164 unsigned char test_BitScanReverse(unsigned long *Index, unsigned long Mask) {
165 return _BitScanReverse(++Index, Mask);
167 // CHECK: define{{.*}}i8 @test_BitScanReverse(i32* {{[a-z_ ]*}}%Index, i32 {{[a-z_ ]*}}%Mask){{.*}}{
168 // CHECK: [[ISNOTZERO:%[0-9]+]] = icmp eq i32 %Mask, 0
169 // CHECK: br i1 [[ISNOTZERO]], label %[[END_LABEL:[a-z0-9._]+]], label %[[ISNOTZERO_LABEL:[a-z0-9._]+]]
170 // CHECK: [[END_LABEL]]:
171 // CHECK: [[RESULT:%[a-z0-9._]+]] = phi i8 [ 0, %[[ISZERO_LABEL:[a-z0-9._]+]] ], [ 1, %[[ISNOTZERO_LABEL]] ]
172 // CHECK: ret i8 [[RESULT]]
173 // CHECK: [[ISNOTZERO_LABEL]]:
174 // CHECK: [[IDXGEP:%[a-z0-9._]+]] = getelementptr inbounds i32, i32* %Index, {{i64|i32}} 1
175 // CHECK: [[REVINDEX:%[0-9]+]] = tail call i32 @llvm.ctlz.i32(i32 %Mask, i1 true)
176 // CHECK: [[INDEX:%[0-9]+]] = xor i32 [[REVINDEX]], 31
177 // CHECK: store i32 [[INDEX]], i32* [[IDXGEP]], align 4
178 // CHECK: br label %[[END_LABEL]]
180 #if defined(__x86_64__) || defined(__arm__) || defined(__aarch64__)
181 unsigned char test_BitScanForward64(unsigned long *Index, unsigned __int64 Mask) {
182 return _BitScanForward64(Index, Mask);
184 // CHECK-ARM-X64: define{{.*}}i8 @test_BitScanForward64(i32* {{[a-z_ ]*}}%Index, i64 {{[a-z_ ]*}}%Mask){{.*}}{
185 // CHECK-ARM-X64: [[ISNOTZERO:%[a-z0-9._]+]] = icmp eq i64 %Mask, 0
186 // CHECK-ARM-X64: br i1 [[ISNOTZERO]], label %[[END_LABEL:[a-z0-9._]+]], label %[[ISNOTZERO_LABEL:[a-z0-9._]+]]
187 // CHECK-ARM-X64: [[END_LABEL]]:
188 // CHECK-ARM-X64: [[RESULT:%[a-z0-9._]+]] = phi i8 [ 0, %[[ISZERO_LABEL:[a-z0-9._]+]] ], [ 1, %[[ISNOTZERO_LABEL]] ]
189 // CHECK-ARM-X64: ret i8 [[RESULT]]
190 // CHECK-ARM-X64: [[ISNOTZERO_LABEL]]:
191 // CHECK-ARM-X64: [[INDEX:%[0-9]+]] = tail call i64 @llvm.cttz.i64(i64 %Mask, i1 true)
192 // CHECK-ARM-X64: [[TRUNC_INDEX:%[0-9]+]] = trunc i64 [[INDEX]] to i32
193 // CHECK-ARM-X64: store i32 [[TRUNC_INDEX]], i32* %Index, align 4
194 // CHECK-ARM-X64: br label %[[END_LABEL]]
196 unsigned char test_BitScanReverse64(unsigned long *Index, unsigned __int64 Mask) {
197 return _BitScanReverse64(Index, Mask);
199 // CHECK-ARM-X64: define{{.*}}i8 @test_BitScanReverse64(i32* {{[a-z_ ]*}}%Index, i64 {{[a-z_ ]*}}%Mask){{.*}}{
200 // CHECK-ARM-X64: [[ISNOTZERO:%[0-9]+]] = icmp eq i64 %Mask, 0
201 // CHECK-ARM-X64: br i1 [[ISNOTZERO]], label %[[END_LABEL:[a-z0-9._]+]], label %[[ISNOTZERO_LABEL:[a-z0-9._]+]]
202 // CHECK-ARM-X64: [[END_LABEL]]:
203 // CHECK-ARM-X64: [[RESULT:%[a-z0-9._]+]] = phi i8 [ 0, %[[ISZERO_LABEL:[a-z0-9._]+]] ], [ 1, %[[ISNOTZERO_LABEL]] ]
204 // CHECK-ARM-X64: ret i8 [[RESULT]]
205 // CHECK-ARM-X64: [[ISNOTZERO_LABEL]]:
206 // CHECK-ARM-X64: [[REVINDEX:%[0-9]+]] = tail call i64 @llvm.ctlz.i64(i64 %Mask, i1 true)
207 // CHECK-ARM-X64: [[TRUNC_REVINDEX:%[0-9]+]] = trunc i64 [[REVINDEX]] to i32
208 // CHECK-ARM-X64: [[INDEX:%[0-9]+]] = xor i32 [[TRUNC_REVINDEX]], 63
209 // CHECK-ARM-X64: store i32 [[INDEX]], i32* %Index, align 4
210 // CHECK-ARM-X64: br label %[[END_LABEL]]
213 void *test_InterlockedExchangePointer(void * volatile *Target, void *Value) {
214 return _InterlockedExchangePointer(Target, Value);
217 // CHECK: define{{.*}}i8* @test_InterlockedExchangePointer(i8** {{[a-z_ ]*}}%Target, i8* {{[a-z_ ]*}}%Value){{.*}}{
218 // CHECK: %[[TARGET:[0-9]+]] = bitcast i8** %Target to [[iPTR:i[0-9]+]]*
219 // CHECK: %[[VALUE:[0-9]+]] = ptrtoint i8* %Value to [[iPTR]]
220 // CHECK: %[[EXCHANGE:[0-9]+]] = atomicrmw xchg [[iPTR]]* %[[TARGET]], [[iPTR]] %[[VALUE]] seq_cst
221 // CHECK: %[[RESULT:[0-9]+]] = inttoptr [[iPTR]] %[[EXCHANGE]] to i8*
222 // CHECK: ret i8* %[[RESULT]]
225 void *test_InterlockedCompareExchangePointer(void * volatile *Destination,
226 void *Exchange, void *Comparand) {
227 return _InterlockedCompareExchangePointer(Destination, Exchange, Comparand);
230 // CHECK: define{{.*}}i8* @test_InterlockedCompareExchangePointer(i8** {{[a-z_ ]*}}%Destination, i8* {{[a-z_ ]*}}%Exchange, i8* {{[a-z_ ]*}}%Comparand){{.*}}{
231 // CHECK: %[[DEST:[0-9]+]] = bitcast i8** %Destination to [[iPTR]]*
232 // CHECK: %[[EXCHANGE:[0-9]+]] = ptrtoint i8* %Exchange to [[iPTR]]
233 // CHECK: %[[COMPARAND:[0-9]+]] = ptrtoint i8* %Comparand to [[iPTR]]
234 // CHECK: %[[XCHG:[0-9]+]] = cmpxchg volatile [[iPTR]]* %[[DEST:[0-9]+]], [[iPTR]] %[[COMPARAND:[0-9]+]], [[iPTR]] %[[EXCHANGE:[0-9]+]] seq_cst seq_cst
235 // CHECK: %[[EXTRACT:[0-9]+]] = extractvalue { [[iPTR]], i1 } %[[XCHG]], 0
236 // CHECK: %[[RESULT:[0-9]+]] = inttoptr [[iPTR]] %[[EXTRACT]] to i8*
237 // CHECK: ret i8* %[[RESULT:[0-9]+]]
240 void *test_InterlockedCompareExchangePointer_nf(void * volatile *Destination,
241 void *Exchange, void *Comparand) {
242 return _InterlockedCompareExchangePointer_nf(Destination, Exchange, Comparand);
245 // CHECK: define{{.*}}i8* @test_InterlockedCompareExchangePointer_nf(i8** {{[a-z_ ]*}}%Destination, i8* {{[a-z_ ]*}}%Exchange, i8* {{[a-z_ ]*}}%Comparand){{.*}}{
246 // CHECK: %[[DEST:[0-9]+]] = bitcast i8** %Destination to [[iPTR]]*
247 // CHECK: %[[EXCHANGE:[0-9]+]] = ptrtoint i8* %Exchange to [[iPTR]]
248 // CHECK: %[[COMPARAND:[0-9]+]] = ptrtoint i8* %Comparand to [[iPTR]]
249 // CHECK: %[[XCHG:[0-9]+]] = cmpxchg volatile [[iPTR]]* %[[DEST:[0-9]+]], [[iPTR]] %[[COMPARAND:[0-9]+]], [[iPTR]] %[[EXCHANGE:[0-9]+]] monotonic monotonic
250 // CHECK: %[[EXTRACT:[0-9]+]] = extractvalue { [[iPTR]], i1 } %[[XCHG]], 0
251 // CHECK: %[[RESULT:[0-9]+]] = inttoptr [[iPTR]] %[[EXTRACT]] to i8*
252 // CHECK: ret i8* %[[RESULT:[0-9]+]]
255 char test_InterlockedExchange8(char volatile *value, char mask) {
256 return _InterlockedExchange8(value, mask);
258 // CHECK: define{{.*}}i8 @test_InterlockedExchange8(i8*{{[a-z_ ]*}}%value, i8{{[a-z_ ]*}}%mask){{.*}}{
259 // CHECK: [[RESULT:%[0-9]+]] = atomicrmw xchg i8* %value, i8 %mask seq_cst
260 // CHECK: ret i8 [[RESULT:%[0-9]+]]
263 short test_InterlockedExchange16(short volatile *value, short mask) {
264 return _InterlockedExchange16(value, mask);
266 // CHECK: define{{.*}}i16 @test_InterlockedExchange16(i16*{{[a-z_ ]*}}%value, i16{{[a-z_ ]*}}%mask){{.*}}{
267 // CHECK: [[RESULT:%[0-9]+]] = atomicrmw xchg i16* %value, i16 %mask seq_cst
268 // CHECK: ret i16 [[RESULT:%[0-9]+]]
271 long test_InterlockedExchange(long volatile *value, long mask) {
272 return _InterlockedExchange(value, mask);
274 // CHECK: define{{.*}}i32 @test_InterlockedExchange(i32*{{[a-z_ ]*}}%value, i32{{[a-z_ ]*}}%mask){{.*}}{
275 // CHECK: [[RESULT:%[0-9]+]] = atomicrmw xchg i32* %value, i32 %mask seq_cst
276 // CHECK: ret i32 [[RESULT:%[0-9]+]]
279 char test_InterlockedExchangeAdd8(char volatile *value, char mask) {
280 return _InterlockedExchangeAdd8(value, mask);
282 // CHECK: define{{.*}}i8 @test_InterlockedExchangeAdd8(i8*{{[a-z_ ]*}}%value, i8{{[a-z_ ]*}}%mask){{.*}}{
283 // CHECK: [[RESULT:%[0-9]+]] = atomicrmw add i8* %value, i8 %mask seq_cst
284 // CHECK: ret i8 [[RESULT:%[0-9]+]]
287 short test_InterlockedExchangeAdd16(short volatile *value, short mask) {
288 return _InterlockedExchangeAdd16(value, mask);
290 // CHECK: define{{.*}}i16 @test_InterlockedExchangeAdd16(i16*{{[a-z_ ]*}}%value, i16{{[a-z_ ]*}}%mask){{.*}}{
291 // CHECK: [[RESULT:%[0-9]+]] = atomicrmw add i16* %value, i16 %mask seq_cst
292 // CHECK: ret i16 [[RESULT:%[0-9]+]]
295 long test_InterlockedExchangeAdd(long volatile *value, long mask) {
296 return _InterlockedExchangeAdd(value, mask);
298 // CHECK: define{{.*}}i32 @test_InterlockedExchangeAdd(i32*{{[a-z_ ]*}}%value, i32{{[a-z_ ]*}}%mask){{.*}}{
299 // CHECK: [[RESULT:%[0-9]+]] = atomicrmw add i32* %value, i32 %mask seq_cst
300 // CHECK: ret i32 [[RESULT:%[0-9]+]]
303 char test_InterlockedExchangeSub8(char volatile *value, char mask) {
304 return _InterlockedExchangeSub8(value, mask);
306 // CHECK: define{{.*}}i8 @test_InterlockedExchangeSub8(i8*{{[a-z_ ]*}}%value, i8{{[a-z_ ]*}}%mask){{.*}}{
307 // CHECK: [[RESULT:%[0-9]+]] = atomicrmw sub i8* %value, i8 %mask seq_cst
308 // CHECK: ret i8 [[RESULT:%[0-9]+]]
311 short test_InterlockedExchangeSub16(short volatile *value, short mask) {
312 return _InterlockedExchangeSub16(value, mask);
314 // CHECK: define{{.*}}i16 @test_InterlockedExchangeSub16(i16*{{[a-z_ ]*}}%value, i16{{[a-z_ ]*}}%mask){{.*}}{
315 // CHECK: [[RESULT:%[0-9]+]] = atomicrmw sub i16* %value, i16 %mask seq_cst
316 // CHECK: ret i16 [[RESULT:%[0-9]+]]
319 long test_InterlockedExchangeSub(long volatile *value, long mask) {
320 return _InterlockedExchangeSub(value, mask);
322 // CHECK: define{{.*}}i32 @test_InterlockedExchangeSub(i32*{{[a-z_ ]*}}%value, i32{{[a-z_ ]*}}%mask){{.*}}{
323 // CHECK: [[RESULT:%[0-9]+]] = atomicrmw sub i32* %value, i32 %mask seq_cst
324 // CHECK: ret i32 [[RESULT:%[0-9]+]]
327 char test_InterlockedOr8(char volatile *value, char mask) {
328 return _InterlockedOr8(value, mask);
330 // CHECK: define{{.*}}i8 @test_InterlockedOr8(i8*{{[a-z_ ]*}}%value, i8{{[a-z_ ]*}}%mask){{.*}}{
331 // CHECK: [[RESULT:%[0-9]+]] = atomicrmw or i8* %value, i8 %mask seq_cst
332 // CHECK: ret i8 [[RESULT:%[0-9]+]]
335 short test_InterlockedOr16(short volatile *value, short mask) {
336 return _InterlockedOr16(value, mask);
338 // CHECK: define{{.*}}i16 @test_InterlockedOr16(i16*{{[a-z_ ]*}}%value, i16{{[a-z_ ]*}}%mask){{.*}}{
339 // CHECK: [[RESULT:%[0-9]+]] = atomicrmw or i16* %value, i16 %mask seq_cst
340 // CHECK: ret i16 [[RESULT:%[0-9]+]]
343 long test_InterlockedOr(long volatile *value, long mask) {
344 return _InterlockedOr(value, mask);
346 // CHECK: define{{.*}}i32 @test_InterlockedOr(i32*{{[a-z_ ]*}}%value, i32{{[a-z_ ]*}}%mask){{.*}}{
347 // CHECK: [[RESULT:%[0-9]+]] = atomicrmw or i32* %value, i32 %mask seq_cst
348 // CHECK: ret i32 [[RESULT:%[0-9]+]]
351 char test_InterlockedXor8(char volatile *value, char mask) {
352 return _InterlockedXor8(value, mask);
354 // CHECK: define{{.*}}i8 @test_InterlockedXor8(i8*{{[a-z_ ]*}}%value, i8{{[a-z_ ]*}}%mask){{.*}}{
355 // CHECK: [[RESULT:%[0-9]+]] = atomicrmw xor i8* %value, i8 %mask seq_cst
356 // CHECK: ret i8 [[RESULT:%[0-9]+]]
359 short test_InterlockedXor16(short volatile *value, short mask) {
360 return _InterlockedXor16(value, mask);
362 // CHECK: define{{.*}}i16 @test_InterlockedXor16(i16*{{[a-z_ ]*}}%value, i16{{[a-z_ ]*}}%mask){{.*}}{
363 // CHECK: [[RESULT:%[0-9]+]] = atomicrmw xor i16* %value, i16 %mask seq_cst
364 // CHECK: ret i16 [[RESULT:%[0-9]+]]
367 long test_InterlockedXor(long volatile *value, long mask) {
368 return _InterlockedXor(value, mask);
370 // CHECK: define{{.*}}i32 @test_InterlockedXor(i32*{{[a-z_ ]*}}%value, i32{{[a-z_ ]*}}%mask){{.*}}{
371 // CHECK: [[RESULT:%[0-9]+]] = atomicrmw xor i32* %value, i32 %mask seq_cst
372 // CHECK: ret i32 [[RESULT:%[0-9]+]]
375 char test_InterlockedAnd8(char volatile *value, char mask) {
376 return _InterlockedAnd8(value, mask);
378 // CHECK: define{{.*}}i8 @test_InterlockedAnd8(i8*{{[a-z_ ]*}}%value, i8{{[a-z_ ]*}}%mask){{.*}}{
379 // CHECK: [[RESULT:%[0-9]+]] = atomicrmw and i8* %value, i8 %mask seq_cst
380 // CHECK: ret i8 [[RESULT:%[0-9]+]]
383 short test_InterlockedAnd16(short volatile *value, short mask) {
384 return _InterlockedAnd16(value, mask);
386 // CHECK: define{{.*}}i16 @test_InterlockedAnd16(i16*{{[a-z_ ]*}}%value, i16{{[a-z_ ]*}}%mask){{.*}}{
387 // CHECK: [[RESULT:%[0-9]+]] = atomicrmw and i16* %value, i16 %mask seq_cst
388 // CHECK: ret i16 [[RESULT:%[0-9]+]]
391 long test_InterlockedAnd(long volatile *value, long mask) {
392 return _InterlockedAnd(value, mask);
394 // CHECK: define{{.*}}i32 @test_InterlockedAnd(i32*{{[a-z_ ]*}}%value, i32{{[a-z_ ]*}}%mask){{.*}}{
395 // CHECK: [[RESULT:%[0-9]+]] = atomicrmw and i32* %value, i32 %mask seq_cst
396 // CHECK: ret i32 [[RESULT:%[0-9]+]]
399 char test_InterlockedCompareExchange8(char volatile *Destination, char Exchange, char Comperand) {
400 return _InterlockedCompareExchange8(Destination, Exchange, Comperand);
402 // CHECK: define{{.*}}i8 @test_InterlockedCompareExchange8(i8*{{[a-z_ ]*}}%Destination, i8{{[a-z_ ]*}}%Exchange, i8{{[a-z_ ]*}}%Comperand){{.*}}{
403 // CHECK: [[TMP:%[0-9]+]] = cmpxchg volatile i8* %Destination, i8 %Comperand, i8 %Exchange seq_cst seq_cst
404 // CHECK: [[RESULT:%[0-9]+]] = extractvalue { i8, i1 } [[TMP]], 0
405 // CHECK: ret i8 [[RESULT]]
408 short test_InterlockedCompareExchange16(short volatile *Destination, short Exchange, short Comperand) {
409 return _InterlockedCompareExchange16(Destination, Exchange, Comperand);
411 // CHECK: define{{.*}}i16 @test_InterlockedCompareExchange16(i16*{{[a-z_ ]*}}%Destination, i16{{[a-z_ ]*}}%Exchange, i16{{[a-z_ ]*}}%Comperand){{.*}}{
412 // CHECK: [[TMP:%[0-9]+]] = cmpxchg volatile i16* %Destination, i16 %Comperand, i16 %Exchange seq_cst seq_cst
413 // CHECK: [[RESULT:%[0-9]+]] = extractvalue { i16, i1 } [[TMP]], 0
414 // CHECK: ret i16 [[RESULT]]
417 long test_InterlockedCompareExchange(long volatile *Destination, long Exchange, long Comperand) {
418 return _InterlockedCompareExchange(Destination, Exchange, Comperand);
420 // CHECK: define{{.*}}i32 @test_InterlockedCompareExchange(i32*{{[a-z_ ]*}}%Destination, i32{{[a-z_ ]*}}%Exchange, i32{{[a-z_ ]*}}%Comperand){{.*}}{
421 // CHECK: [[TMP:%[0-9]+]] = cmpxchg volatile i32* %Destination, i32 %Comperand, i32 %Exchange seq_cst seq_cst
422 // CHECK: [[RESULT:%[0-9]+]] = extractvalue { i32, i1 } [[TMP]], 0
423 // CHECK: ret i32 [[RESULT]]
426 __int64 test_InterlockedCompareExchange64(__int64 volatile *Destination, __int64 Exchange, __int64 Comperand) {
427 return _InterlockedCompareExchange64(Destination, Exchange, Comperand);
429 // CHECK: define{{.*}}i64 @test_InterlockedCompareExchange64(i64*{{[a-z_ ]*}}%Destination, i64{{[a-z_ ]*}}%Exchange, i64{{[a-z_ ]*}}%Comperand){{.*}}{
430 // CHECK: [[TMP:%[0-9]+]] = cmpxchg volatile i64* %Destination, i64 %Comperand, i64 %Exchange seq_cst seq_cst
431 // CHECK: [[RESULT:%[0-9]+]] = extractvalue { i64, i1 } [[TMP]], 0
432 // CHECK: ret i64 [[RESULT]]
435 #if defined(__x86_64__)
436 unsigned char test_InterlockedCompareExchange128(
437 __int64 volatile *Destination, __int64 ExchangeHigh,
438 __int64 ExchangeLow, __int64 *ComparandResult) {
439 return _InterlockedCompareExchange128(++Destination, ++ExchangeHigh,
440 ++ExchangeLow, ++ComparandResult);
442 // CHECK-X64: define{{.*}}i8 @test_InterlockedCompareExchange128(i64*{{[a-z_ ]*}}%Destination, i64{{[a-z_ ]*}}%ExchangeHigh, i64{{[a-z_ ]*}}%ExchangeLow, i64*{{[a-z_ ]*}}%ComparandResult){{.*}}{
443 // CHECK-X64: %incdec.ptr = getelementptr inbounds i64, i64* %Destination, i64 1
444 // CHECK-X64: %inc = add nsw i64 %ExchangeHigh, 1
445 // CHECK-X64: %inc1 = add nsw i64 %ExchangeLow, 1
446 // CHECK-X64: %incdec.ptr2 = getelementptr inbounds i64, i64* %ComparandResult, i64 1
447 // CHECK-X64: [[DST:%[0-9]+]] = bitcast i64* %incdec.ptr to i128*
448 // CHECK-X64: [[EH:%[0-9]+]] = zext i64 %inc to i128
449 // CHECK-X64: [[EL:%[0-9]+]] = zext i64 %inc1 to i128
450 // CHECK-X64: [[CNR:%[0-9]+]] = bitcast i64* %incdec.ptr2 to i128*
451 // CHECK-X64: [[EHS:%[0-9]+]] = shl nuw i128 [[EH]], 64
452 // CHECK-X64: [[EXP:%[0-9]+]] = or i128 [[EHS]], [[EL]]
453 // CHECK-X64: [[ORG:%[0-9]+]] = load i128, i128* [[CNR]], align 16
454 // CHECK-X64: [[RES:%[0-9]+]] = cmpxchg volatile i128* [[DST]], i128 [[ORG]], i128 [[EXP]] seq_cst seq_cst
455 // CHECK-X64: [[OLD:%[0-9]+]] = extractvalue { i128, i1 } [[RES]], 0
456 // CHECK-X64: store i128 [[OLD]], i128* [[CNR]], align 16
457 // CHECK-X64: [[SUC1:%[0-9]+]] = extractvalue { i128, i1 } [[RES]], 1
458 // CHECK-X64: [[SUC8:%[0-9]+]] = zext i1 [[SUC1]] to i8
459 // CHECK-X64: ret i8 [[SUC8]]
463 short test_InterlockedIncrement16(short volatile *Addend) {
464 return _InterlockedIncrement16(++Addend);
466 // CHECK: define{{.*}}i16 @test_InterlockedIncrement16(i16*{{[a-z_ ]*}}%Addend){{.*}}{
467 // CHECK: %incdec.ptr = getelementptr inbounds i16, i16* %Addend, {{i64|i32}} 1
468 // CHECK: [[TMP:%[0-9]+]] = atomicrmw add i16* %incdec.ptr, i16 1 seq_cst
469 // CHECK: [[RESULT:%[0-9]+]] = add i16 [[TMP]], 1
470 // CHECK: ret i16 [[RESULT]]
473 long test_InterlockedIncrement(long volatile *Addend) {
474 return _InterlockedIncrement(++Addend);
476 // CHECK: define{{.*}}i32 @test_InterlockedIncrement(i32*{{[a-z_ ]*}}%Addend){{.*}}{
477 // CHECK: %incdec.ptr = getelementptr inbounds i32, i32* %Addend, {{i64|i32}} 1
478 // CHECK: [[TMP:%[0-9]+]] = atomicrmw add i32* %incdec.ptr, i32 1 seq_cst
479 // CHECK: [[RESULT:%[0-9]+]] = add i32 [[TMP]], 1
480 // CHECK: ret i32 [[RESULT]]
483 short test_InterlockedDecrement16(short volatile *Addend) {
484 return _InterlockedDecrement16(Addend);
486 // CHECK: define{{.*}}i16 @test_InterlockedDecrement16(i16*{{[a-z_ ]*}}%Addend){{.*}}{
487 // CHECK: [[TMP:%[0-9]+]] = atomicrmw sub i16* %Addend, i16 1 seq_cst
488 // CHECK: [[RESULT:%[0-9]+]] = add i16 [[TMP]], -1
489 // CHECK: ret i16 [[RESULT]]
492 long test_InterlockedDecrement(long volatile *Addend) {
493 return _InterlockedDecrement(Addend);
495 // CHECK: define{{.*}}i32 @test_InterlockedDecrement(i32*{{[a-z_ ]*}}%Addend){{.*}}{
496 // CHECK: [[TMP:%[0-9]+]] = atomicrmw sub i32* %Addend, i32 1 seq_cst
497 // CHECK: [[RESULT:%[0-9]+]] = add i32 [[TMP]], -1
498 // CHECK: ret i32 [[RESULT]]
501 char test_iso_volatile_load8(char volatile *p) { return __iso_volatile_load8(p); }
502 short test_iso_volatile_load16(short volatile *p) { return __iso_volatile_load16(p); }
503 int test_iso_volatile_load32(int volatile *p) { return __iso_volatile_load32(p); }
504 __int64 test_iso_volatile_load64(__int64 volatile *p) { return __iso_volatile_load64(p); }
506 // CHECK: define{{.*}}i8 @test_iso_volatile_load8(i8*{{[a-z_ ]*}}%p)
507 // CHECK: = load volatile i8, i8* %p
508 // CHECK: define{{.*}}i16 @test_iso_volatile_load16(i16*{{[a-z_ ]*}}%p)
509 // CHECK: = load volatile i16, i16* %p
510 // CHECK: define{{.*}}i32 @test_iso_volatile_load32(i32*{{[a-z_ ]*}}%p)
511 // CHECK: = load volatile i32, i32* %p
512 // CHECK: define{{.*}}i64 @test_iso_volatile_load64(i64*{{[a-z_ ]*}}%p)
513 // CHECK: = load volatile i64, i64* %p
515 void test_iso_volatile_store8(char volatile *p, char v) { __iso_volatile_store8(p, v); }
516 void test_iso_volatile_store16(short volatile *p, short v) { __iso_volatile_store16(p, v); }
517 void test_iso_volatile_store32(int volatile *p, int v) { __iso_volatile_store32(p, v); }
518 void test_iso_volatile_store64(__int64 volatile *p, __int64 v) { __iso_volatile_store64(p, v); }
520 // CHECK: define{{.*}}void @test_iso_volatile_store8(i8*{{[a-z_ ]*}}%p, i8 {{[a-z_ ]*}}%v)
521 // CHECK: store volatile i8 %v, i8* %p
522 // CHECK: define{{.*}}void @test_iso_volatile_store16(i16*{{[a-z_ ]*}}%p, i16 {{[a-z_ ]*}}%v)
523 // CHECK: store volatile i16 %v, i16* %p
524 // CHECK: define{{.*}}void @test_iso_volatile_store32(i32*{{[a-z_ ]*}}%p, i32 {{[a-z_ ]*}}%v)
525 // CHECK: store volatile i32 %v, i32* %p
526 // CHECK: define{{.*}}void @test_iso_volatile_store64(i64*{{[a-z_ ]*}}%p, i64 {{[a-z_ ]*}}%v)
527 // CHECK: store volatile i64 %v, i64* %p
530 #if defined(__i386__) || defined(__x86_64__) || defined(__arm__) || defined(__aarch64__)
531 __int64 test_InterlockedExchange64(__int64 volatile *value, __int64 mask) {
532 return _InterlockedExchange64(value, mask);
534 // CHECK: define{{.*}}i64 @test_InterlockedExchange64(i64*{{[a-z_ ]*}}%value, i64{{[a-z_ ]*}}%mask){{.*}}{
535 // CHECK: [[RESULT:%[0-9]+]] = atomicrmw xchg i64* %value, i64 %mask seq_cst
536 // CHECK: ret i64 [[RESULT:%[0-9]+]]
539 __int64 test_InterlockedExchangeAdd64(__int64 volatile *value, __int64 mask) {
540 return _InterlockedExchangeAdd64(value, mask);
542 // CHECK: define{{.*}}i64 @test_InterlockedExchangeAdd64(i64*{{[a-z_ ]*}}%value, i64{{[a-z_ ]*}}%mask){{.*}}{
543 // CHECK: [[RESULT:%[0-9]+]] = atomicrmw add i64* %value, i64 %mask seq_cst
544 // CHECK: ret i64 [[RESULT:%[0-9]+]]
547 __int64 test_InterlockedExchangeSub64(__int64 volatile *value, __int64 mask) {
548 return _InterlockedExchangeSub64(value, mask);
550 // CHECK: define{{.*}}i64 @test_InterlockedExchangeSub64(i64*{{[a-z_ ]*}}%value, i64{{[a-z_ ]*}}%mask){{.*}}{
551 // CHECK: [[RESULT:%[0-9]+]] = atomicrmw sub i64* %value, i64 %mask seq_cst
552 // CHECK: ret i64 [[RESULT:%[0-9]+]]
555 __int64 test_InterlockedOr64(__int64 volatile *value, __int64 mask) {
556 return _InterlockedOr64(value, mask);
558 // CHECK: define{{.*}}i64 @test_InterlockedOr64(i64*{{[a-z_ ]*}}%value, i64{{[a-z_ ]*}}%mask){{.*}}{
559 // CHECK: [[RESULT:%[0-9]+]] = atomicrmw or i64* %value, i64 %mask seq_cst
560 // CHECK: ret i64 [[RESULT:%[0-9]+]]
563 __int64 test_InterlockedXor64(__int64 volatile *value, __int64 mask) {
564 return _InterlockedXor64(value, mask);
566 // CHECK: define{{.*}}i64 @test_InterlockedXor64(i64*{{[a-z_ ]*}}%value, i64{{[a-z_ ]*}}%mask){{.*}}{
567 // CHECK: [[RESULT:%[0-9]+]] = atomicrmw xor i64* %value, i64 %mask seq_cst
568 // CHECK: ret i64 [[RESULT:%[0-9]+]]
571 __int64 test_InterlockedAnd64(__int64 volatile *value, __int64 mask) {
572 return _InterlockedAnd64(value, mask);
574 // CHECK: define{{.*}}i64 @test_InterlockedAnd64(i64*{{[a-z_ ]*}}%value, i64{{[a-z_ ]*}}%mask){{.*}}{
575 // CHECK: [[RESULT:%[0-9]+]] = atomicrmw and i64* %value, i64 %mask seq_cst
576 // CHECK: ret i64 [[RESULT:%[0-9]+]]
579 __int64 test_InterlockedIncrement64(__int64 volatile *Addend) {
580 return _InterlockedIncrement64(Addend);
582 // CHECK: define{{.*}}i64 @test_InterlockedIncrement64(i64*{{[a-z_ ]*}}%Addend){{.*}}{
583 // CHECK: [[TMP:%[0-9]+]] = atomicrmw add i64* %Addend, i64 1 seq_cst
584 // CHECK: [[RESULT:%[0-9]+]] = add i64 [[TMP]], 1
585 // CHECK: ret i64 [[RESULT]]
588 __int64 test_InterlockedDecrement64(__int64 volatile *Addend) {
589 return _InterlockedDecrement64(Addend);
591 // CHECK: define{{.*}}i64 @test_InterlockedDecrement64(i64*{{[a-z_ ]*}}%Addend){{.*}}{
592 // CHECK: [[TMP:%[0-9]+]] = atomicrmw sub i64* %Addend, i64 1 seq_cst
593 // CHECK: [[RESULT:%[0-9]+]] = add i64 [[TMP]], -1
594 // CHECK: ret i64 [[RESULT]]
599 #if defined(__i386__) || defined(__x86_64__)
600 long test_InterlockedExchange_HLEAcquire(long volatile *Target, long Value) {
601 // CHECK-INTEL: define{{.*}} i32 @test_InterlockedExchange_HLEAcquire(i32*{{[a-z_ ]*}}%Target, i32{{[a-z_ ]*}}%Value)
602 // CHECK-INTEL: call i32 asm sideeffect ".byte 0xf2 ; lock ; xchg $0, $1", "=r,=*m,0,*m,~{memory},~{dirflag},~{fpsr},~{flags}"(i32* %Target, i32 %Value, i32* %Target)
603 return _InterlockedExchange_HLEAcquire(Target, Value);
605 long test_InterlockedExchange_HLERelease(long volatile *Target, long Value) {
606 // CHECK-INTEL: define{{.*}} i32 @test_InterlockedExchange_HLERelease(i32*{{[a-z_ ]*}}%Target, i32{{[a-z_ ]*}}%Value)
607 // CHECK-INTEL: call i32 asm sideeffect ".byte 0xf3 ; lock ; xchg $0, $1", "=r,=*m,0,*m,~{memory},~{dirflag},~{fpsr},~{flags}"(i32* %Target, i32 %Value, i32* %Target)
608 return _InterlockedExchange_HLERelease(Target, Value);
610 long test_InterlockedCompareExchange_HLEAcquire(long volatile *Destination,
611 long Exchange, long Comparand) {
612 // CHECK-INTEL: define{{.*}} i32 @test_InterlockedCompareExchange_HLEAcquire(i32*{{[a-z_ ]*}}%Destination, i32{{[a-z_ ]*}}%Exchange, i32{{[a-z_ ]*}}%Comparand)
613 // CHECK-INTEL: call i32 asm sideeffect ".byte 0xf2 ; lock ; cmpxchg $2, $1", "={ax},=*m,r,0,*m,~{memory},~{dirflag},~{fpsr},~{flags}"(i32* %Destination, i32 %Exchange, i32 %Comparand, i32* %Destination)
614 return _InterlockedCompareExchange_HLEAcquire(Destination, Exchange, Comparand);
616 long test_InterlockedCompareExchange_HLERelease(long volatile *Destination,
617 long Exchange, long Comparand) {
618 // CHECK-INTEL: define{{.*}} i32 @test_InterlockedCompareExchange_HLERelease(i32*{{[a-z_ ]*}}%Destination, i32{{[a-z_ ]*}}%Exchange, i32{{[a-z_ ]*}}%Comparand)
619 // CHECK-INTEL: call i32 asm sideeffect ".byte 0xf3 ; lock ; cmpxchg $2, $1", "={ax},=*m,r,0,*m,~{memory},~{dirflag},~{fpsr},~{flags}"(i32* %Destination, i32 %Exchange, i32 %Comparand, i32* %Destination)
620 return _InterlockedCompareExchange_HLERelease(Destination, Exchange, Comparand);
623 #if defined(__x86_64__)
624 __int64 test_InterlockedExchange64_HLEAcquire(__int64 volatile *Target, __int64 Value) {
625 // CHECK-X64: define{{.*}} i64 @test_InterlockedExchange64_HLEAcquire(i64*{{[a-z_ ]*}}%Target, i64{{[a-z_ ]*}}%Value)
626 // CHECK-X64: call i64 asm sideeffect ".byte 0xf2 ; lock ; xchg $0, $1", "=r,=*m,0,*m,~{memory},~{dirflag},~{fpsr},~{flags}"(i64* %Target, i64 %Value, i64* %Target)
627 return _InterlockedExchange64_HLEAcquire(Target, Value);
629 __int64 test_InterlockedExchange64_HLERelease(__int64 volatile *Target, __int64 Value) {
630 // CHECK-X64: define{{.*}} i64 @test_InterlockedExchange64_HLERelease(i64*{{[a-z_ ]*}}%Target, i64{{[a-z_ ]*}}%Value)
631 // CHECK-X64: call i64 asm sideeffect ".byte 0xf3 ; lock ; xchg $0, $1", "=r,=*m,0,*m,~{memory},~{dirflag},~{fpsr},~{flags}"(i64* %Target, i64 %Value, i64* %Target)
632 return _InterlockedExchange64_HLERelease(Target, Value);
634 __int64 test_InterlockedCompareExchange64_HLEAcquire(__int64 volatile *Destination,
635 __int64 Exchange, __int64 Comparand) {
636 // CHECK-X64: define{{.*}} i64 @test_InterlockedCompareExchange64_HLEAcquire(i64*{{[a-z_ ]*}}%Destination, i64{{[a-z_ ]*}}%Exchange, i64{{[a-z_ ]*}}%Comparand)
637 // CHECK-X64: call i64 asm sideeffect ".byte 0xf2 ; lock ; cmpxchg $2, $1", "={ax},=*m,r,0,*m,~{memory},~{dirflag},~{fpsr},~{flags}"(i64* %Destination, i64 %Exchange, i64 %Comparand, i64* %Destination)
638 return _InterlockedCompareExchange64_HLEAcquire(Destination, Exchange, Comparand);
640 __int64 test_InterlockedCompareExchange64_HLERelease(__int64 volatile *Destination,
641 __int64 Exchange, __int64 Comparand) {
642 // CHECK-X64: define{{.*}} i64 @test_InterlockedCompareExchange64_HLERelease(i64*{{[a-z_ ]*}}%Destination, i64{{[a-z_ ]*}}%Exchange, i64{{[a-z_ ]*}}%Comparand)
643 // CHECK-X64: call i64 asm sideeffect ".byte 0xf3 ; lock ; cmpxchg $2, $1", "={ax},=*m,r,0,*m,~{memory},~{dirflag},~{fpsr},~{flags}"(i64* %Destination, i64 %Exchange, i64 %Comparand, i64* %Destination)
644 return _InterlockedCompareExchange64_HLERelease(Destination, Exchange, Comparand);
648 #if defined(__arm__) || defined(__aarch64__)
649 char test_InterlockedExchangeAdd8_acq(char volatile *value, char mask) {
650 return _InterlockedExchangeAdd8_acq(value, mask);
652 // CHECK-ARM-ARM64: define{{.*}}i8 @test_InterlockedExchangeAdd8_acq(i8*{{[a-z_ ]*}}%value, i8{{[a-z_ ]*}}%mask){{.*}}{
653 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw add i8* %value, i8 %mask acquire
654 // CHECK-ARM-ARM64: ret i8 [[RESULT:%[0-9]+]]
655 // CHECK-ARM-ARM64: }
656 char test_InterlockedExchangeAdd8_rel(char volatile *value, char mask) {
657 return _InterlockedExchangeAdd8_rel(value, mask);
659 // CHECK-ARM-ARM64: define{{.*}}i8 @test_InterlockedExchangeAdd8_rel(i8*{{[a-z_ ]*}}%value, i8{{[a-z_ ]*}}%mask){{.*}}{
660 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw add i8* %value, i8 %mask release
661 // CHECK-ARM-ARM64: ret i8 [[RESULT:%[0-9]+]]
662 // CHECK-ARM-ARM64: }
663 char test_InterlockedExchangeAdd8_nf(char volatile *value, char mask) {
664 return _InterlockedExchangeAdd8_nf(value, mask);
666 // CHECK-ARM-ARM64: define{{.*}}i8 @test_InterlockedExchangeAdd8_nf(i8*{{[a-z_ ]*}}%value, i8{{[a-z_ ]*}}%mask){{.*}}{
667 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw add i8* %value, i8 %mask monotonic
668 // CHECK-ARM-ARM64: ret i8 [[RESULT:%[0-9]+]]
669 // CHECK-ARM-ARM64: }
670 short test_InterlockedExchangeAdd16_acq(short volatile *value, short mask) {
671 return _InterlockedExchangeAdd16_acq(value, mask);
673 // CHECK-ARM-ARM64: define{{.*}}i16 @test_InterlockedExchangeAdd16_acq(i16*{{[a-z_ ]*}}%value, i16{{[a-z_ ]*}}%mask){{.*}}{
674 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw add i16* %value, i16 %mask acquire
675 // CHECK-ARM-ARM64: ret i16 [[RESULT:%[0-9]+]]
676 // CHECK-ARM-ARM64: }
677 short test_InterlockedExchangeAdd16_rel(short volatile *value, short mask) {
678 return _InterlockedExchangeAdd16_rel(value, mask);
680 // CHECK-ARM-ARM64: define{{.*}}i16 @test_InterlockedExchangeAdd16_rel(i16*{{[a-z_ ]*}}%value, i16{{[a-z_ ]*}}%mask){{.*}}{
681 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw add i16* %value, i16 %mask release
682 // CHECK-ARM-ARM64: ret i16 [[RESULT:%[0-9]+]]
683 // CHECK-ARM-ARM64: }
684 short test_InterlockedExchangeAdd16_nf(short volatile *value, short mask) {
685 return _InterlockedExchangeAdd16_nf(value, mask);
687 // CHECK-ARM-ARM64: define{{.*}}i16 @test_InterlockedExchangeAdd16_nf(i16*{{[a-z_ ]*}}%value, i16{{[a-z_ ]*}}%mask){{.*}}{
688 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw add i16* %value, i16 %mask monotonic
689 // CHECK-ARM-ARM64: ret i16 [[RESULT:%[0-9]+]]
690 // CHECK-ARM-ARM64: }
691 long test_InterlockedExchangeAdd_acq(long volatile *value, long mask) {
692 return _InterlockedExchangeAdd_acq(value, mask);
694 // CHECK-ARM-ARM64: define{{.*}}i32 @test_InterlockedExchangeAdd_acq(i32*{{[a-z_ ]*}}%value, i32{{[a-z_ ]*}}%mask){{.*}}{
695 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw add i32* %value, i32 %mask acquire
696 // CHECK-ARM-ARM64: ret i32 [[RESULT:%[0-9]+]]
697 // CHECK-ARM-ARM64: }
698 long test_InterlockedExchangeAdd_rel(long volatile *value, long mask) {
699 return _InterlockedExchangeAdd_rel(value, mask);
701 // CHECK-ARM-ARM64: define{{.*}}i32 @test_InterlockedExchangeAdd_rel(i32*{{[a-z_ ]*}}%value, i32{{[a-z_ ]*}}%mask){{.*}}{
702 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw add i32* %value, i32 %mask release
703 // CHECK-ARM-ARM64: ret i32 [[RESULT:%[0-9]+]]
704 // CHECK-ARM-ARM64: }
705 long test_InterlockedExchangeAdd_nf(long volatile *value, long mask) {
706 return _InterlockedExchangeAdd_nf(value, mask);
708 // CHECK-ARM-ARM64: define{{.*}}i32 @test_InterlockedExchangeAdd_nf(i32*{{[a-z_ ]*}}%value, i32{{[a-z_ ]*}}%mask){{.*}}{
709 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw add i32* %value, i32 %mask monotonic
710 // CHECK-ARM-ARM64: ret i32 [[RESULT:%[0-9]+]]
711 // CHECK-ARM-ARM64: }
712 __int64 test_InterlockedExchangeAdd64_acq(__int64 volatile *value, __int64 mask) {
713 return _InterlockedExchangeAdd64_acq(value, mask);
715 // CHECK-ARM-ARM64: define{{.*}}i64 @test_InterlockedExchangeAdd64_acq(i64*{{[a-z_ ]*}}%value, i64{{[a-z_ ]*}}%mask){{.*}}{
716 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw add i64* %value, i64 %mask acquire
717 // CHECK-ARM-ARM64: ret i64 [[RESULT:%[0-9]+]]
718 // CHECK-ARM-ARM64: }
719 __int64 test_InterlockedExchangeAdd64_rel(__int64 volatile *value, __int64 mask) {
720 return _InterlockedExchangeAdd64_rel(value, mask);
722 // CHECK-ARM-ARM64: define{{.*}}i64 @test_InterlockedExchangeAdd64_rel(i64*{{[a-z_ ]*}}%value, i64{{[a-z_ ]*}}%mask){{.*}}{
723 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw add i64* %value, i64 %mask release
724 // CHECK-ARM-ARM64: ret i64 [[RESULT:%[0-9]+]]
725 // CHECK-ARM-ARM64: }
726 __int64 test_InterlockedExchangeAdd64_nf(__int64 volatile *value, __int64 mask) {
727 return _InterlockedExchangeAdd64_nf(value, mask);
729 // CHECK-ARM-ARM64: define{{.*}}i64 @test_InterlockedExchangeAdd64_nf(i64*{{[a-z_ ]*}}%value, i64{{[a-z_ ]*}}%mask){{.*}}{
730 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw add i64* %value, i64 %mask monotonic
731 // CHECK-ARM-ARM64: ret i64 [[RESULT:%[0-9]+]]
732 // CHECK-ARM-ARM64: }
734 char test_InterlockedExchange8_acq(char volatile *value, char mask) {
735 return _InterlockedExchange8_acq(value, mask);
737 // CHECK-ARM-ARM64: define{{.*}}i8 @test_InterlockedExchange8_acq(i8*{{[a-z_ ]*}}%value, i8{{[a-z_ ]*}}%mask){{.*}}{
738 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw xchg i8* %value, i8 %mask acquire
739 // CHECK-ARM-ARM64: ret i8 [[RESULT:%[0-9]+]]
740 // CHECK-ARM-ARM64: }
741 char test_InterlockedExchange8_rel(char volatile *value, char mask) {
742 return _InterlockedExchange8_rel(value, mask);
744 // CHECK-ARM-ARM64: define{{.*}}i8 @test_InterlockedExchange8_rel(i8*{{[a-z_ ]*}}%value, i8{{[a-z_ ]*}}%mask){{.*}}{
745 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw xchg i8* %value, i8 %mask release
746 // CHECK-ARM-ARM64: ret i8 [[RESULT:%[0-9]+]]
747 // CHECK-ARM-ARM64: }
748 char test_InterlockedExchange8_nf(char volatile *value, char mask) {
749 return _InterlockedExchange8_nf(value, mask);
751 // CHECK-ARM-ARM64: define{{.*}}i8 @test_InterlockedExchange8_nf(i8*{{[a-z_ ]*}}%value, i8{{[a-z_ ]*}}%mask){{.*}}{
752 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw xchg i8* %value, i8 %mask monotonic
753 // CHECK-ARM-ARM64: ret i8 [[RESULT:%[0-9]+]]
754 // CHECK-ARM-ARM64: }
755 short test_InterlockedExchange16_acq(short volatile *value, short mask) {
756 return _InterlockedExchange16_acq(value, mask);
758 // CHECK-ARM-ARM64: define{{.*}}i16 @test_InterlockedExchange16_acq(i16*{{[a-z_ ]*}}%value, i16{{[a-z_ ]*}}%mask){{.*}}{
759 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw xchg i16* %value, i16 %mask acquire
760 // CHECK-ARM-ARM64: ret i16 [[RESULT:%[0-9]+]]
761 // CHECK-ARM-ARM64: }
762 short test_InterlockedExchange16_rel(short volatile *value, short mask) {
763 return _InterlockedExchange16_rel(value, mask);
765 // CHECK-ARM-ARM64: define{{.*}}i16 @test_InterlockedExchange16_rel(i16*{{[a-z_ ]*}}%value, i16{{[a-z_ ]*}}%mask){{.*}}{
766 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw xchg i16* %value, i16 %mask release
767 // CHECK-ARM-ARM64: ret i16 [[RESULT:%[0-9]+]]
768 // CHECK-ARM-ARM64: }
769 short test_InterlockedExchange16_nf(short volatile *value, short mask) {
770 return _InterlockedExchange16_nf(value, mask);
772 // CHECK-ARM-ARM64: define{{.*}}i16 @test_InterlockedExchange16_nf(i16*{{[a-z_ ]*}}%value, i16{{[a-z_ ]*}}%mask){{.*}}{
773 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw xchg i16* %value, i16 %mask monotonic
774 // CHECK-ARM-ARM64: ret i16 [[RESULT:%[0-9]+]]
775 // CHECK-ARM-ARM64: }
776 long test_InterlockedExchange_acq(long volatile *value, long mask) {
777 return _InterlockedExchange_acq(value, mask);
779 // CHECK-ARM-ARM64: define{{.*}}i32 @test_InterlockedExchange_acq(i32*{{[a-z_ ]*}}%value, i32{{[a-z_ ]*}}%mask){{.*}}{
780 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw xchg i32* %value, i32 %mask acquire
781 // CHECK-ARM-ARM64: ret i32 [[RESULT:%[0-9]+]]
782 // CHECK-ARM-ARM64: }
783 long test_InterlockedExchange_rel(long volatile *value, long mask) {
784 return _InterlockedExchange_rel(value, mask);
786 // CHECK-ARM-ARM64: define{{.*}}i32 @test_InterlockedExchange_rel(i32*{{[a-z_ ]*}}%value, i32{{[a-z_ ]*}}%mask){{.*}}{
787 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw xchg i32* %value, i32 %mask release
788 // CHECK-ARM-ARM64: ret i32 [[RESULT:%[0-9]+]]
789 // CHECK-ARM-ARM64: }
790 long test_InterlockedExchange_nf(long volatile *value, long mask) {
791 return _InterlockedExchange_nf(value, mask);
793 // CHECK-ARM-ARM64: define{{.*}}i32 @test_InterlockedExchange_nf(i32*{{[a-z_ ]*}}%value, i32{{[a-z_ ]*}}%mask){{.*}}{
794 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw xchg i32* %value, i32 %mask monotonic
795 // CHECK-ARM-ARM64: ret i32 [[RESULT:%[0-9]+]]
796 // CHECK-ARM-ARM64: }
797 __int64 test_InterlockedExchange64_acq(__int64 volatile *value, __int64 mask) {
798 return _InterlockedExchange64_acq(value, mask);
800 // CHECK-ARM-ARM64: define{{.*}}i64 @test_InterlockedExchange64_acq(i64*{{[a-z_ ]*}}%value, i64{{[a-z_ ]*}}%mask){{.*}}{
801 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw xchg i64* %value, i64 %mask acquire
802 // CHECK-ARM-ARM64: ret i64 [[RESULT:%[0-9]+]]
803 // CHECK-ARM-ARM64: }
804 __int64 test_InterlockedExchange64_rel(__int64 volatile *value, __int64 mask) {
805 return _InterlockedExchange64_rel(value, mask);
807 // CHECK-ARM-ARM64: define{{.*}}i64 @test_InterlockedExchange64_rel(i64*{{[a-z_ ]*}}%value, i64{{[a-z_ ]*}}%mask){{.*}}{
808 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw xchg i64* %value, i64 %mask release
809 // CHECK-ARM-ARM64: ret i64 [[RESULT:%[0-9]+]]
810 // CHECK-ARM-ARM64: }
811 __int64 test_InterlockedExchange64_nf(__int64 volatile *value, __int64 mask) {
812 return _InterlockedExchange64_nf(value, mask);
814 // CHECK-ARM-ARM64: define{{.*}}i64 @test_InterlockedExchange64_nf(i64*{{[a-z_ ]*}}%value, i64{{[a-z_ ]*}}%mask){{.*}}{
815 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw xchg i64* %value, i64 %mask monotonic
816 // CHECK-ARM-ARM64: ret i64 [[RESULT:%[0-9]+]]
817 // CHECK-ARM-ARM64: }
819 char test_InterlockedCompareExchange8_acq(char volatile *Destination, char Exchange, char Comperand) {
820 return _InterlockedCompareExchange8_acq(Destination, Exchange, Comperand);
822 // CHECK-ARM-ARM64: define{{.*}}i8 @test_InterlockedCompareExchange8_acq(i8*{{[a-z_ ]*}}%Destination, i8{{[a-z_ ]*}}%Exchange, i8{{[a-z_ ]*}}%Comperand){{.*}}{
823 // CHECK-ARM-ARM64: [[TMP:%[0-9]+]] = cmpxchg volatile i8* %Destination, i8 %Comperand, i8 %Exchange acquire acquire
824 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = extractvalue { i8, i1 } [[TMP]], 0
825 // CHECK-ARM-ARM64: ret i8 [[RESULT]]
826 // CHECK-ARM-ARM64: }
828 char test_InterlockedCompareExchange8_rel(char volatile *Destination, char Exchange, char Comperand) {
829 return _InterlockedCompareExchange8_rel(Destination, Exchange, Comperand);
831 // CHECK-ARM-ARM64: define{{.*}}i8 @test_InterlockedCompareExchange8_rel(i8*{{[a-z_ ]*}}%Destination, i8{{[a-z_ ]*}}%Exchange, i8{{[a-z_ ]*}}%Comperand){{.*}}{
832 // CHECK-ARM-ARM64: [[TMP:%[0-9]+]] = cmpxchg volatile i8* %Destination, i8 %Comperand, i8 %Exchange release monotonic
833 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = extractvalue { i8, i1 } [[TMP]], 0
834 // CHECK-ARM-ARM64: ret i8 [[RESULT]]
835 // CHECK-ARM-ARM64: }
837 char test_InterlockedCompareExchange8_nf(char volatile *Destination, char Exchange, char Comperand) {
838 return _InterlockedCompareExchange8_nf(Destination, Exchange, Comperand);
840 // CHECK-ARM-ARM64: define{{.*}}i8 @test_InterlockedCompareExchange8_nf(i8*{{[a-z_ ]*}}%Destination, i8{{[a-z_ ]*}}%Exchange, i8{{[a-z_ ]*}}%Comperand){{.*}}{
841 // CHECK-ARM-ARM64: [[TMP:%[0-9]+]] = cmpxchg volatile i8* %Destination, i8 %Comperand, i8 %Exchange monotonic monotonic
842 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = extractvalue { i8, i1 } [[TMP]], 0
843 // CHECK-ARM-ARM64: ret i8 [[RESULT]]
844 // CHECK-ARM-ARM64: }
846 short test_InterlockedCompareExchange16_acq(short volatile *Destination, short Exchange, short Comperand) {
847 return _InterlockedCompareExchange16_acq(Destination, Exchange, Comperand);
849 // CHECK-ARM-ARM64: define{{.*}}i16 @test_InterlockedCompareExchange16_acq(i16*{{[a-z_ ]*}}%Destination, i16{{[a-z_ ]*}}%Exchange, i16{{[a-z_ ]*}}%Comperand){{.*}}{
850 // CHECK-ARM-ARM64: [[TMP:%[0-9]+]] = cmpxchg volatile i16* %Destination, i16 %Comperand, i16 %Exchange acquire acquire
851 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = extractvalue { i16, i1 } [[TMP]], 0
852 // CHECK-ARM-ARM64: ret i16 [[RESULT]]
853 // CHECK-ARM-ARM64: }
855 short test_InterlockedCompareExchange16_rel(short volatile *Destination, short Exchange, short Comperand) {
856 return _InterlockedCompareExchange16_rel(Destination, Exchange, Comperand);
858 // CHECK-ARM-ARM64: define{{.*}}i16 @test_InterlockedCompareExchange16_rel(i16*{{[a-z_ ]*}}%Destination, i16{{[a-z_ ]*}}%Exchange, i16{{[a-z_ ]*}}%Comperand){{.*}}{
859 // CHECK-ARM-ARM64: [[TMP:%[0-9]+]] = cmpxchg volatile i16* %Destination, i16 %Comperand, i16 %Exchange release monotonic
860 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = extractvalue { i16, i1 } [[TMP]], 0
861 // CHECK-ARM-ARM64: ret i16 [[RESULT]]
862 // CHECK-ARM-ARM64: }
864 short test_InterlockedCompareExchange16_nf(short volatile *Destination, short Exchange, short Comperand) {
865 return _InterlockedCompareExchange16_nf(Destination, Exchange, Comperand);
867 // CHECK-ARM-ARM64: define{{.*}}i16 @test_InterlockedCompareExchange16_nf(i16*{{[a-z_ ]*}}%Destination, i16{{[a-z_ ]*}}%Exchange, i16{{[a-z_ ]*}}%Comperand){{.*}}{
868 // CHECK-ARM-ARM64: [[TMP:%[0-9]+]] = cmpxchg volatile i16* %Destination, i16 %Comperand, i16 %Exchange monotonic monotonic
869 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = extractvalue { i16, i1 } [[TMP]], 0
870 // CHECK-ARM-ARM64: ret i16 [[RESULT]]
871 // CHECK-ARM-ARM64: }
873 long test_InterlockedCompareExchange_acq(long volatile *Destination, long Exchange, long Comperand) {
874 return _InterlockedCompareExchange_acq(Destination, Exchange, Comperand);
876 // CHECK-ARM-ARM64: define{{.*}}i32 @test_InterlockedCompareExchange_acq(i32*{{[a-z_ ]*}}%Destination, i32{{[a-z_ ]*}}%Exchange, i32{{[a-z_ ]*}}%Comperand){{.*}}{
877 // CHECK-ARM-ARM64: [[TMP:%[0-9]+]] = cmpxchg volatile i32* %Destination, i32 %Comperand, i32 %Exchange acquire acquire
878 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = extractvalue { i32, i1 } [[TMP]], 0
879 // CHECK-ARM-ARM64: ret i32 [[RESULT]]
880 // CHECK-ARM-ARM64: }
882 long test_InterlockedCompareExchange_rel(long volatile *Destination, long Exchange, long Comperand) {
883 return _InterlockedCompareExchange_rel(Destination, Exchange, Comperand);
885 // CHECK-ARM-ARM64: define{{.*}}i32 @test_InterlockedCompareExchange_rel(i32*{{[a-z_ ]*}}%Destination, i32{{[a-z_ ]*}}%Exchange, i32{{[a-z_ ]*}}%Comperand){{.*}}{
886 // CHECK-ARM-ARM64: [[TMP:%[0-9]+]] = cmpxchg volatile i32* %Destination, i32 %Comperand, i32 %Exchange release monotonic
887 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = extractvalue { i32, i1 } [[TMP]], 0
888 // CHECK-ARM-ARM64: ret i32 [[RESULT]]
889 // CHECK-ARM-ARM64: }
891 long test_InterlockedCompareExchange_nf(long volatile *Destination, long Exchange, long Comperand) {
892 return _InterlockedCompareExchange_nf(Destination, Exchange, Comperand);
894 // CHECK-ARM-ARM64: define{{.*}}i32 @test_InterlockedCompareExchange_nf(i32*{{[a-z_ ]*}}%Destination, i32{{[a-z_ ]*}}%Exchange, i32{{[a-z_ ]*}}%Comperand){{.*}}{
895 // CHECK-ARM-ARM64: [[TMP:%[0-9]+]] = cmpxchg volatile i32* %Destination, i32 %Comperand, i32 %Exchange monotonic monotonic
896 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = extractvalue { i32, i1 } [[TMP]], 0
897 // CHECK-ARM-ARM64: ret i32 [[RESULT]]
898 // CHECK-ARM-ARM64: }
900 __int64 test_InterlockedCompareExchange64_acq(__int64 volatile *Destination, __int64 Exchange, __int64 Comperand) {
901 return _InterlockedCompareExchange64_acq(Destination, Exchange, Comperand);
903 // CHECK-ARM-ARM64: define{{.*}}i64 @test_InterlockedCompareExchange64_acq(i64*{{[a-z_ ]*}}%Destination, i64{{[a-z_ ]*}}%Exchange, i64{{[a-z_ ]*}}%Comperand){{.*}}{
904 // CHECK-ARM-ARM64: [[TMP:%[0-9]+]] = cmpxchg volatile i64* %Destination, i64 %Comperand, i64 %Exchange acquire acquire
905 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = extractvalue { i64, i1 } [[TMP]], 0
906 // CHECK-ARM-ARM64: ret i64 [[RESULT]]
907 // CHECK-ARM-ARM64: }
909 __int64 test_InterlockedCompareExchange64_rel(__int64 volatile *Destination, __int64 Exchange, __int64 Comperand) {
910 return _InterlockedCompareExchange64_rel(Destination, Exchange, Comperand);
912 // CHECK-ARM-ARM64: define{{.*}}i64 @test_InterlockedCompareExchange64_rel(i64*{{[a-z_ ]*}}%Destination, i64{{[a-z_ ]*}}%Exchange, i64{{[a-z_ ]*}}%Comperand){{.*}}{
913 // CHECK-ARM-ARM64: [[TMP:%[0-9]+]] = cmpxchg volatile i64* %Destination, i64 %Comperand, i64 %Exchange release monotonic
914 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = extractvalue { i64, i1 } [[TMP]], 0
915 // CHECK-ARM-ARM64: ret i64 [[RESULT]]
916 // CHECK-ARM-ARM64: }
918 __int64 test_InterlockedCompareExchange64_nf(__int64 volatile *Destination, __int64 Exchange, __int64 Comperand) {
919 return _InterlockedCompareExchange64_nf(Destination, Exchange, Comperand);
921 // CHECK-ARM-ARM64: define{{.*}}i64 @test_InterlockedCompareExchange64_nf(i64*{{[a-z_ ]*}}%Destination, i64{{[a-z_ ]*}}%Exchange, i64{{[a-z_ ]*}}%Comperand){{.*}}{
922 // CHECK-ARM-ARM64: [[TMP:%[0-9]+]] = cmpxchg volatile i64* %Destination, i64 %Comperand, i64 %Exchange monotonic monotonic
923 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = extractvalue { i64, i1 } [[TMP]], 0
924 // CHECK-ARM-ARM64: ret i64 [[RESULT]]
925 // CHECK-ARM-ARM64: }
927 char test_InterlockedOr8_acq(char volatile *value, char mask) {
928 return _InterlockedOr8_acq(value, mask);
930 // CHECK-ARM-ARM64: define{{.*}}i8 @test_InterlockedOr8_acq(i8*{{[a-z_ ]*}}%value, i8{{[a-z_ ]*}}%mask){{.*}}{
931 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw or i8* %value, i8 %mask acquire
932 // CHECK-ARM-ARM64: ret i8 [[RESULT:%[0-9]+]]
933 // CHECK-ARM-ARM64: }
935 char test_InterlockedOr8_rel(char volatile *value, char mask) {
936 return _InterlockedOr8_rel(value, mask);
938 // CHECK-ARM-ARM64: define{{.*}}i8 @test_InterlockedOr8_rel(i8*{{[a-z_ ]*}}%value, i8{{[a-z_ ]*}}%mask){{.*}}{
939 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw or i8* %value, i8 %mask release
940 // CHECK-ARM-ARM64: ret i8 [[RESULT:%[0-9]+]]
941 // CHECK-ARM-ARM64: }
943 char test_InterlockedOr8_nf(char volatile *value, char mask) {
944 return _InterlockedOr8_nf(value, mask);
946 // CHECK-ARM-ARM64: define{{.*}}i8 @test_InterlockedOr8_nf(i8*{{[a-z_ ]*}}%value, i8{{[a-z_ ]*}}%mask){{.*}}{
947 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw or i8* %value, i8 %mask monotonic
948 // CHECK-ARM-ARM64: ret i8 [[RESULT:%[0-9]+]]
949 // CHECK-ARM-ARM64: }
951 short test_InterlockedOr16_acq(short volatile *value, short mask) {
952 return _InterlockedOr16_acq(value, mask);
954 // CHECK-ARM-ARM64: define{{.*}}i16 @test_InterlockedOr16_acq(i16*{{[a-z_ ]*}}%value, i16{{[a-z_ ]*}}%mask){{.*}}{
955 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw or i16* %value, i16 %mask acquire
956 // CHECK-ARM-ARM64: ret i16 [[RESULT:%[0-9]+]]
957 // CHECK-ARM-ARM64: }
959 short test_InterlockedOr16_rel(short volatile *value, short mask) {
960 return _InterlockedOr16_rel(value, mask);
962 // CHECK-ARM-ARM64: define{{.*}}i16 @test_InterlockedOr16_rel(i16*{{[a-z_ ]*}}%value, i16{{[a-z_ ]*}}%mask){{.*}}{
963 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw or i16* %value, i16 %mask release
964 // CHECK-ARM-ARM64: ret i16 [[RESULT:%[0-9]+]]
965 // CHECK-ARM-ARM64: }
967 short test_InterlockedOr16_nf(short volatile *value, short mask) {
968 return _InterlockedOr16_nf(value, mask);
970 // CHECK-ARM-ARM64: define{{.*}}i16 @test_InterlockedOr16_nf(i16*{{[a-z_ ]*}}%value, i16{{[a-z_ ]*}}%mask){{.*}}{
971 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw or i16* %value, i16 %mask monotonic
972 // CHECK-ARM-ARM64: ret i16 [[RESULT:%[0-9]+]]
973 // CHECK-ARM-ARM64: }
975 long test_InterlockedOr_acq(long volatile *value, long mask) {
976 return _InterlockedOr_acq(value, mask);
978 // CHECK-ARM-ARM64: define{{.*}}i32 @test_InterlockedOr_acq(i32*{{[a-z_ ]*}}%value, i32{{[a-z_ ]*}}%mask){{.*}}{
979 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw or i32* %value, i32 %mask acquire
980 // CHECK-ARM-ARM64: ret i32 [[RESULT:%[0-9]+]]
981 // CHECK-ARM-ARM64: }
983 long test_InterlockedOr_rel(long volatile *value, long mask) {
984 return _InterlockedOr_rel(value, mask);
986 // CHECK-ARM-ARM64: define{{.*}}i32 @test_InterlockedOr_rel(i32*{{[a-z_ ]*}}%value, i32{{[a-z_ ]*}}%mask){{.*}}{
987 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw or i32* %value, i32 %mask release
988 // CHECK-ARM-ARM64: ret i32 [[RESULT:%[0-9]+]]
989 // CHECK-ARM-ARM64: }
991 long test_InterlockedOr_nf(long volatile *value, long mask) {
992 return _InterlockedOr_nf(value, mask);
994 // CHECK-ARM-ARM64: define{{.*}}i32 @test_InterlockedOr_nf(i32*{{[a-z_ ]*}}%value, i32{{[a-z_ ]*}}%mask){{.*}}{
995 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw or i32* %value, i32 %mask monotonic
996 // CHECK-ARM-ARM64: ret i32 [[RESULT:%[0-9]+]]
997 // CHECK-ARM-ARM64: }
999 __int64 test_InterlockedOr64_acq(__int64 volatile *value, __int64 mask) {
1000 return _InterlockedOr64_acq(value, mask);
1002 // CHECK-ARM-ARM64: define{{.*}}i64 @test_InterlockedOr64_acq(i64*{{[a-z_ ]*}}%value, i64{{[a-z_ ]*}}%mask){{.*}}{
1003 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw or i64* %value, i64 %mask acquire
1004 // CHECK-ARM-ARM64: ret i64 [[RESULT:%[0-9]+]]
1005 // CHECK-ARM-ARM64: }
1007 __int64 test_InterlockedOr64_rel(__int64 volatile *value, __int64 mask) {
1008 return _InterlockedOr64_rel(value, mask);
1010 // CHECK-ARM-ARM64: define{{.*}}i64 @test_InterlockedOr64_rel(i64*{{[a-z_ ]*}}%value, i64{{[a-z_ ]*}}%mask){{.*}}{
1011 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw or i64* %value, i64 %mask release
1012 // CHECK-ARM-ARM64: ret i64 [[RESULT:%[0-9]+]]
1013 // CHECK-ARM-ARM64: }
1015 __int64 test_InterlockedOr64_nf(__int64 volatile *value, __int64 mask) {
1016 return _InterlockedOr64_nf(value, mask);
1018 // CHECK-ARM-ARM64: define{{.*}}i64 @test_InterlockedOr64_nf(i64*{{[a-z_ ]*}}%value, i64{{[a-z_ ]*}}%mask){{.*}}{
1019 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw or i64* %value, i64 %mask monotonic
1020 // CHECK-ARM-ARM64: ret i64 [[RESULT:%[0-9]+]]
1021 // CHECK-ARM-ARM64: }
1023 char test_InterlockedXor8_acq(char volatile *value, char mask) {
1024 return _InterlockedXor8_acq(value, mask);
1026 // CHECK-ARM-ARM64: define{{.*}}i8 @test_InterlockedXor8_acq(i8*{{[a-z_ ]*}}%value, i8{{[a-z_ ]*}}%mask){{.*}}{
1027 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw xor i8* %value, i8 %mask acquire
1028 // CHECK-ARM-ARM64: ret i8 [[RESULT:%[0-9]+]]
1029 // CHECK-ARM-ARM64: }
1031 char test_InterlockedXor8_rel(char volatile *value, char mask) {
1032 return _InterlockedXor8_rel(value, mask);
1034 // CHECK-ARM-ARM64: define{{.*}}i8 @test_InterlockedXor8_rel(i8*{{[a-z_ ]*}}%value, i8{{[a-z_ ]*}}%mask){{.*}}{
1035 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw xor i8* %value, i8 %mask release
1036 // CHECK-ARM-ARM64: ret i8 [[RESULT:%[0-9]+]]
1037 // CHECK-ARM-ARM64: }
1039 char test_InterlockedXor8_nf(char volatile *value, char mask) {
1040 return _InterlockedXor8_nf(value, mask);
1042 // CHECK-ARM-ARM64: define{{.*}}i8 @test_InterlockedXor8_nf(i8*{{[a-z_ ]*}}%value, i8{{[a-z_ ]*}}%mask){{.*}}{
1043 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw xor i8* %value, i8 %mask monotonic
1044 // CHECK-ARM-ARM64: ret i8 [[RESULT:%[0-9]+]]
1045 // CHECK-ARM-ARM64: }
1047 short test_InterlockedXor16_acq(short volatile *value, short mask) {
1048 return _InterlockedXor16_acq(value, mask);
1050 // CHECK-ARM-ARM64: define{{.*}}i16 @test_InterlockedXor16_acq(i16*{{[a-z_ ]*}}%value, i16{{[a-z_ ]*}}%mask){{.*}}{
1051 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw xor i16* %value, i16 %mask acquire
1052 // CHECK-ARM-ARM64: ret i16 [[RESULT:%[0-9]+]]
1053 // CHECK-ARM-ARM64: }
1055 short test_InterlockedXor16_rel(short volatile *value, short mask) {
1056 return _InterlockedXor16_rel(value, mask);
1058 // CHECK-ARM-ARM64: define{{.*}}i16 @test_InterlockedXor16_rel(i16*{{[a-z_ ]*}}%value, i16{{[a-z_ ]*}}%mask){{.*}}{
1059 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw xor i16* %value, i16 %mask release
1060 // CHECK-ARM-ARM64: ret i16 [[RESULT:%[0-9]+]]
1061 // CHECK-ARM-ARM64: }
1063 short test_InterlockedXor16_nf(short volatile *value, short mask) {
1064 return _InterlockedXor16_nf(value, mask);
1066 // CHECK-ARM-ARM64: define{{.*}}i16 @test_InterlockedXor16_nf(i16*{{[a-z_ ]*}}%value, i16{{[a-z_ ]*}}%mask){{.*}}{
1067 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw xor i16* %value, i16 %mask monotonic
1068 // CHECK-ARM-ARM64: ret i16 [[RESULT:%[0-9]+]]
1069 // CHECK-ARM-ARM64: }
1071 long test_InterlockedXor_acq(long volatile *value, long mask) {
1072 return _InterlockedXor_acq(value, mask);
1074 // CHECK-ARM-ARM64: define{{.*}}i32 @test_InterlockedXor_acq(i32*{{[a-z_ ]*}}%value, i32{{[a-z_ ]*}}%mask){{.*}}{
1075 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw xor i32* %value, i32 %mask acquire
1076 // CHECK-ARM-ARM64: ret i32 [[RESULT:%[0-9]+]]
1077 // CHECK-ARM-ARM64: }
1079 long test_InterlockedXor_rel(long volatile *value, long mask) {
1080 return _InterlockedXor_rel(value, mask);
1082 // CHECK-ARM-ARM64: define{{.*}}i32 @test_InterlockedXor_rel(i32*{{[a-z_ ]*}}%value, i32{{[a-z_ ]*}}%mask){{.*}}{
1083 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw xor i32* %value, i32 %mask release
1084 // CHECK-ARM-ARM64: ret i32 [[RESULT:%[0-9]+]]
1085 // CHECK-ARM-ARM64: }
1087 long test_InterlockedXor_nf(long volatile *value, long mask) {
1088 return _InterlockedXor_nf(value, mask);
1090 // CHECK-ARM-ARM64: define{{.*}}i32 @test_InterlockedXor_nf(i32*{{[a-z_ ]*}}%value, i32{{[a-z_ ]*}}%mask){{.*}}{
1091 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw xor i32* %value, i32 %mask monotonic
1092 // CHECK-ARM-ARM64: ret i32 [[RESULT:%[0-9]+]]
1093 // CHECK-ARM-ARM64: }
1095 __int64 test_InterlockedXor64_acq(__int64 volatile *value, __int64 mask) {
1096 return _InterlockedXor64_acq(value, mask);
1098 // CHECK-ARM-ARM64: define{{.*}}i64 @test_InterlockedXor64_acq(i64*{{[a-z_ ]*}}%value, i64{{[a-z_ ]*}}%mask){{.*}}{
1099 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw xor i64* %value, i64 %mask acquire
1100 // CHECK-ARM-ARM64: ret i64 [[RESULT:%[0-9]+]]
1101 // CHECK-ARM-ARM64: }
1103 __int64 test_InterlockedXor64_rel(__int64 volatile *value, __int64 mask) {
1104 return _InterlockedXor64_rel(value, mask);
1106 // CHECK-ARM-ARM64: define{{.*}}i64 @test_InterlockedXor64_rel(i64*{{[a-z_ ]*}}%value, i64{{[a-z_ ]*}}%mask){{.*}}{
1107 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw xor i64* %value, i64 %mask release
1108 // CHECK-ARM-ARM64: ret i64 [[RESULT:%[0-9]+]]
1109 // CHECK-ARM-ARM64: }
1111 __int64 test_InterlockedXor64_nf(__int64 volatile *value, __int64 mask) {
1112 return _InterlockedXor64_nf(value, mask);
1114 // CHECK-ARM-ARM64: define{{.*}}i64 @test_InterlockedXor64_nf(i64*{{[a-z_ ]*}}%value, i64{{[a-z_ ]*}}%mask){{.*}}{
1115 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw xor i64* %value, i64 %mask monotonic
1116 // CHECK-ARM-ARM64: ret i64 [[RESULT:%[0-9]+]]
1117 // CHECK-ARM-ARM64: }
1119 char test_InterlockedAnd8_acq(char volatile *value, char mask) {
1120 return _InterlockedAnd8_acq(value, mask);
1122 // CHECK-ARM-ARM64: define{{.*}}i8 @test_InterlockedAnd8_acq(i8*{{[a-z_ ]*}}%value, i8{{[a-z_ ]*}}%mask){{.*}}{
1123 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw and i8* %value, i8 %mask acquire
1124 // CHECK-ARM-ARM64: ret i8 [[RESULT:%[0-9]+]]
1125 // CHECK-ARM-ARM64: }
1127 char test_InterlockedAnd8_rel(char volatile *value, char mask) {
1128 return _InterlockedAnd8_rel(value, mask);
1130 // CHECK-ARM-ARM64: define{{.*}}i8 @test_InterlockedAnd8_rel(i8*{{[a-z_ ]*}}%value, i8{{[a-z_ ]*}}%mask){{.*}}{
1131 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw and i8* %value, i8 %mask release
1132 // CHECK-ARM-ARM64: ret i8 [[RESULT:%[0-9]+]]
1133 // CHECK-ARM-ARM64: }
1135 char test_InterlockedAnd8_nf(char volatile *value, char mask) {
1136 return _InterlockedAnd8_nf(value, mask);
1138 // CHECK-ARM-ARM64: define{{.*}}i8 @test_InterlockedAnd8_nf(i8*{{[a-z_ ]*}}%value, i8{{[a-z_ ]*}}%mask){{.*}}{
1139 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw and i8* %value, i8 %mask monotonic
1140 // CHECK-ARM-ARM64: ret i8 [[RESULT:%[0-9]+]]
1141 // CHECK-ARM-ARM64: }
1143 short test_InterlockedAnd16_acq(short volatile *value, short mask) {
1144 return _InterlockedAnd16_acq(value, mask);
1146 // CHECK-ARM-ARM64: define{{.*}}i16 @test_InterlockedAnd16_acq(i16*{{[a-z_ ]*}}%value, i16{{[a-z_ ]*}}%mask){{.*}}{
1147 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw and i16* %value, i16 %mask acquire
1148 // CHECK-ARM-ARM64: ret i16 [[RESULT:%[0-9]+]]
1149 // CHECK-ARM-ARM64: }
1151 short test_InterlockedAnd16_rel(short volatile *value, short mask) {
1152 return _InterlockedAnd16_rel(value, mask);
1154 // CHECK-ARM-ARM64: define{{.*}}i16 @test_InterlockedAnd16_rel(i16*{{[a-z_ ]*}}%value, i16{{[a-z_ ]*}}%mask){{.*}}{
1155 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw and i16* %value, i16 %mask release
1156 // CHECK-ARM-ARM64: ret i16 [[RESULT:%[0-9]+]]
1157 // CHECK-ARM-ARM64: }
1159 short test_InterlockedAnd16_nf(short volatile *value, short mask) {
1160 return _InterlockedAnd16_nf(value, mask);
1162 // CHECK-ARM-ARM64: define{{.*}}i16 @test_InterlockedAnd16_nf(i16*{{[a-z_ ]*}}%value, i16{{[a-z_ ]*}}%mask){{.*}}{
1163 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw and i16* %value, i16 %mask monotonic
1164 // CHECK-ARM-ARM64: ret i16 [[RESULT:%[0-9]+]]
1165 // CHECK-ARM-ARM64: }
1167 long test_InterlockedAnd_acq(long volatile *value, long mask) {
1168 return _InterlockedAnd_acq(value, mask);
1170 // CHECK-ARM-ARM64: define{{.*}}i32 @test_InterlockedAnd_acq(i32*{{[a-z_ ]*}}%value, i32{{[a-z_ ]*}}%mask){{.*}}{
1171 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw and i32* %value, i32 %mask acquire
1172 // CHECK-ARM-ARM64: ret i32 [[RESULT:%[0-9]+]]
1173 // CHECK-ARM-ARM64: }
1175 long test_InterlockedAnd_rel(long volatile *value, long mask) {
1176 return _InterlockedAnd_rel(value, mask);
1178 // CHECK-ARM-ARM64: define{{.*}}i32 @test_InterlockedAnd_rel(i32*{{[a-z_ ]*}}%value, i32{{[a-z_ ]*}}%mask){{.*}}{
1179 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw and i32* %value, i32 %mask release
1180 // CHECK-ARM-ARM64: ret i32 [[RESULT:%[0-9]+]]
1181 // CHECK-ARM-ARM64: }
1183 long test_InterlockedAnd_nf(long volatile *value, long mask) {
1184 return _InterlockedAnd_nf(value, mask);
1186 // CHECK-ARM-ARM64: define{{.*}}i32 @test_InterlockedAnd_nf(i32*{{[a-z_ ]*}}%value, i32{{[a-z_ ]*}}%mask){{.*}}{
1187 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw and i32* %value, i32 %mask monotonic
1188 // CHECK-ARM-ARM64: ret i32 [[RESULT:%[0-9]+]]
1189 // CHECK-ARM-ARM64: }
1191 __int64 test_InterlockedAnd64_acq(__int64 volatile *value, __int64 mask) {
1192 return _InterlockedAnd64_acq(value, mask);
1194 // CHECK-ARM-ARM64: define{{.*}}i64 @test_InterlockedAnd64_acq(i64*{{[a-z_ ]*}}%value, i64{{[a-z_ ]*}}%mask){{.*}}{
1195 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw and i64* %value, i64 %mask acquire
1196 // CHECK-ARM-ARM64: ret i64 [[RESULT:%[0-9]+]]
1197 // CHECK-ARM-ARM64: }
1199 __int64 test_InterlockedAnd64_rel(__int64 volatile *value, __int64 mask) {
1200 return _InterlockedAnd64_rel(value, mask);
1202 // CHECK-ARM-ARM64: define{{.*}}i64 @test_InterlockedAnd64_rel(i64*{{[a-z_ ]*}}%value, i64{{[a-z_ ]*}}%mask){{.*}}{
1203 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw and i64* %value, i64 %mask release
1204 // CHECK-ARM-ARM64: ret i64 [[RESULT:%[0-9]+]]
1205 // CHECK-ARM-ARM64: }
1207 __int64 test_InterlockedAnd64_nf(__int64 volatile *value, __int64 mask) {
1208 return _InterlockedAnd64_nf(value, mask);
1210 // CHECK-ARM-ARM64: define{{.*}}i64 @test_InterlockedAnd64_nf(i64*{{[a-z_ ]*}}%value, i64{{[a-z_ ]*}}%mask){{.*}}{
1211 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw and i64* %value, i64 %mask monotonic
1212 // CHECK-ARM-ARM64: ret i64 [[RESULT:%[0-9]+]]
1213 // CHECK-ARM-ARM64: }
1215 short test_InterlockedIncrement16_acq(short volatile *Addend) {
1216 return _InterlockedIncrement16_acq(Addend);
1218 // CHECK-ARM-ARM64: define{{.*}}i16 @test_InterlockedIncrement16_acq(i16*{{[a-z_ ]*}}%Addend){{.*}}{
1219 // CHECK-ARM-ARM64: [[TMP:%[0-9]+]] = atomicrmw add i16* %Addend, i16 1 acquire
1220 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = add i16 [[TMP]], 1
1221 // CHECK-ARM-ARM64: ret i16 [[RESULT]]
1222 // CHECK-ARM-ARM64: }
1224 short test_InterlockedIncrement16_rel(short volatile *Addend) {
1225 return _InterlockedIncrement16_rel(Addend);
1227 // CHECK-ARM-ARM64: define{{.*}}i16 @test_InterlockedIncrement16_rel(i16*{{[a-z_ ]*}}%Addend){{.*}}{
1228 // CHECK-ARM-ARM64: [[TMP:%[0-9]+]] = atomicrmw add i16* %Addend, i16 1 release
1229 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = add i16 [[TMP]], 1
1230 // CHECK-ARM-ARM64: ret i16 [[RESULT]]
1231 // CHECK-ARM-ARM64: }
1233 short test_InterlockedIncrement16_nf(short volatile *Addend) {
1234 return _InterlockedIncrement16_nf(Addend);
1236 // CHECK-ARM-ARM64: define{{.*}}i16 @test_InterlockedIncrement16_nf(i16*{{[a-z_ ]*}}%Addend){{.*}}{
1237 // CHECK-ARM-ARM64: [[TMP:%[0-9]+]] = atomicrmw add i16* %Addend, i16 1 monotonic
1238 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = add i16 [[TMP]], 1
1239 // CHECK-ARM-ARM64: ret i16 [[RESULT]]
1240 // CHECK-ARM-ARM64: }
1242 long test_InterlockedIncrement_acq(long volatile *Addend) {
1243 return _InterlockedIncrement_acq(Addend);
1245 // CHECK-ARM-ARM64: define{{.*}}i32 @test_InterlockedIncrement_acq(i32*{{[a-z_ ]*}}%Addend){{.*}}{
1246 // CHECK-ARM-ARM64: [[TMP:%[0-9]+]] = atomicrmw add i32* %Addend, i32 1 acquire
1247 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = add i32 [[TMP]], 1
1248 // CHECK-ARM-ARM64: ret i32 [[RESULT]]
1249 // CHECK-ARM-ARM64: }
1251 long test_InterlockedIncrement_rel(long volatile *Addend) {
1252 return _InterlockedIncrement_rel(Addend);
1254 // CHECK-ARM-ARM64: define{{.*}}i32 @test_InterlockedIncrement_rel(i32*{{[a-z_ ]*}}%Addend){{.*}}{
1255 // CHECK-ARM-ARM64: [[TMP:%[0-9]+]] = atomicrmw add i32* %Addend, i32 1 release
1256 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = add i32 [[TMP]], 1
1257 // CHECK-ARM-ARM64: ret i32 [[RESULT]]
1258 // CHECK-ARM-ARM64: }
1260 long test_InterlockedIncrement_nf(long volatile *Addend) {
1261 return _InterlockedIncrement_nf(Addend);
1263 // CHECK-ARM-ARM64: define{{.*}}i32 @test_InterlockedIncrement_nf(i32*{{[a-z_ ]*}}%Addend){{.*}}{
1264 // CHECK-ARM-ARM64: [[TMP:%[0-9]+]] = atomicrmw add i32* %Addend, i32 1 monotonic
1265 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = add i32 [[TMP]], 1
1266 // CHECK-ARM-ARM64: ret i32 [[RESULT]]
1267 // CHECK-ARM-ARM64: }
1269 __int64 test_InterlockedIncrement64_acq(__int64 volatile *Addend) {
1270 return _InterlockedIncrement64_acq(Addend);
1272 // CHECK-ARM-ARM64: define{{.*}}i64 @test_InterlockedIncrement64_acq(i64*{{[a-z_ ]*}}%Addend){{.*}}{
1273 // CHECK-ARM-ARM64: [[TMP:%[0-9]+]] = atomicrmw add i64* %Addend, i64 1 acquire
1274 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = add i64 [[TMP]], 1
1275 // CHECK-ARM-ARM64: ret i64 [[RESULT]]
1276 // CHECK-ARM-ARM64: }
1278 __int64 test_InterlockedIncrement64_rel(__int64 volatile *Addend) {
1279 return _InterlockedIncrement64_rel(Addend);
1281 // CHECK-ARM-ARM64: define{{.*}}i64 @test_InterlockedIncrement64_rel(i64*{{[a-z_ ]*}}%Addend){{.*}}{
1282 // CHECK-ARM-ARM64: [[TMP:%[0-9]+]] = atomicrmw add i64* %Addend, i64 1 release
1283 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = add i64 [[TMP]], 1
1284 // CHECK-ARM-ARM64: ret i64 [[RESULT]]
1285 // CHECK-ARM-ARM64: }
1287 __int64 test_InterlockedIncrement64_nf(__int64 volatile *Addend) {
1288 return _InterlockedIncrement64_nf(Addend);
1290 // CHECK-ARM-ARM64: define{{.*}}i64 @test_InterlockedIncrement64_nf(i64*{{[a-z_ ]*}}%Addend){{.*}}{
1291 // CHECK-ARM-ARM64: [[TMP:%[0-9]+]] = atomicrmw add i64* %Addend, i64 1 monotonic
1292 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = add i64 [[TMP]], 1
1293 // CHECK-ARM-ARM64: ret i64 [[RESULT]]
1294 // CHECK-ARM-ARM64: }
1296 short test_InterlockedDecrement16_acq(short volatile *Addend) {
1297 return _InterlockedDecrement16_acq(Addend);
1299 // CHECK-ARM-ARM64: define{{.*}}i16 @test_InterlockedDecrement16_acq(i16*{{[a-z_ ]*}}%Addend){{.*}}{
1300 // CHECK-ARM-ARM64: [[TMP:%[0-9]+]] = atomicrmw sub i16* %Addend, i16 1 acquire
1301 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = add i16 [[TMP]], -1
1302 // CHECK-ARM-ARM64: ret i16 [[RESULT]]
1303 // CHECK-ARM-ARM64: }
1305 short test_InterlockedDecrement16_rel(short volatile *Addend) {
1306 return _InterlockedDecrement16_rel(Addend);
1308 // CHECK-ARM-ARM64: define{{.*}}i16 @test_InterlockedDecrement16_rel(i16*{{[a-z_ ]*}}%Addend){{.*}}{
1309 // CHECK-ARM-ARM64: [[TMP:%[0-9]+]] = atomicrmw sub i16* %Addend, i16 1 release
1310 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = add i16 [[TMP]], -1
1311 // CHECK-ARM-ARM64: ret i16 [[RESULT]]
1312 // CHECK-ARM-ARM64: }
1314 short test_InterlockedDecrement16_nf(short volatile *Addend) {
1315 return _InterlockedDecrement16_nf(Addend);
1317 // CHECK-ARM-ARM64: define{{.*}}i16 @test_InterlockedDecrement16_nf(i16*{{[a-z_ ]*}}%Addend){{.*}}{
1318 // CHECK-ARM-ARM64: [[TMP:%[0-9]+]] = atomicrmw sub i16* %Addend, i16 1 monotonic
1319 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = add i16 [[TMP]], -1
1320 // CHECK-ARM-ARM64: ret i16 [[RESULT]]
1321 // CHECK-ARM-ARM64: }
1323 long test_InterlockedDecrement_acq(long volatile *Addend) {
1324 return _InterlockedDecrement_acq(Addend);
1326 // CHECK-ARM-ARM64: define{{.*}}i32 @test_InterlockedDecrement_acq(i32*{{[a-z_ ]*}}%Addend){{.*}}{
1327 // CHECK-ARM-ARM64: [[TMP:%[0-9]+]] = atomicrmw sub i32* %Addend, i32 1 acquire
1328 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = add i32 [[TMP]], -1
1329 // CHECK-ARM-ARM64: ret i32 [[RESULT]]
1330 // CHECK-ARM-ARM64: }
1332 long test_InterlockedDecrement_rel(long volatile *Addend) {
1333 return _InterlockedDecrement_rel(Addend);
1335 // CHECK-ARM-ARM64: define{{.*}}i32 @test_InterlockedDecrement_rel(i32*{{[a-z_ ]*}}%Addend){{.*}}{
1336 // CHECK-ARM-ARM64: [[TMP:%[0-9]+]] = atomicrmw sub i32* %Addend, i32 1 release
1337 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = add i32 [[TMP]], -1
1338 // CHECK-ARM-ARM64: ret i32 [[RESULT]]
1339 // CHECK-ARM-ARM64: }
1341 long test_InterlockedDecrement_nf(long volatile *Addend) {
1342 return _InterlockedDecrement_nf(Addend);
1344 // CHECK-ARM-ARM64: define{{.*}}i32 @test_InterlockedDecrement_nf(i32*{{[a-z_ ]*}}%Addend){{.*}}{
1345 // CHECK-ARM-ARM64: [[TMP:%[0-9]+]] = atomicrmw sub i32* %Addend, i32 1 monotonic
1346 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = add i32 [[TMP]], -1
1347 // CHECK-ARM-ARM64: ret i32 [[RESULT]]
1348 // CHECK-ARM-ARM64: }
1350 __int64 test_InterlockedDecrement64_acq(__int64 volatile *Addend) {
1351 return _InterlockedDecrement64_acq(Addend);
1353 // CHECK-ARM-ARM64: define{{.*}}i64 @test_InterlockedDecrement64_acq(i64*{{[a-z_ ]*}}%Addend){{.*}}{
1354 // CHECK-ARM-ARM64: [[TMP:%[0-9]+]] = atomicrmw sub i64* %Addend, i64 1 acquire
1355 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = add i64 [[TMP]], -1
1356 // CHECK-ARM-ARM64: ret i64 [[RESULT]]
1357 // CHECK-ARM-ARM64: }
1359 __int64 test_InterlockedDecrement64_rel(__int64 volatile *Addend) {
1360 return _InterlockedDecrement64_rel(Addend);
1362 // CHECK-ARM-ARM64: define{{.*}}i64 @test_InterlockedDecrement64_rel(i64*{{[a-z_ ]*}}%Addend){{.*}}{
1363 // CHECK-ARM-ARM64: [[TMP:%[0-9]+]] = atomicrmw sub i64* %Addend, i64 1 release
1364 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = add i64 [[TMP]], -1
1365 // CHECK-ARM-ARM64: ret i64 [[RESULT]]
1366 // CHECK-ARM-ARM64: }
1368 __int64 test_InterlockedDecrement64_nf(__int64 volatile *Addend) {
1369 return _InterlockedDecrement64_nf(Addend);
1371 // CHECK-ARM-ARM64: define{{.*}}i64 @test_InterlockedDecrement64_nf(i64*{{[a-z_ ]*}}%Addend){{.*}}{
1372 // CHECK-ARM-ARM64: [[TMP:%[0-9]+]] = atomicrmw sub i64* %Addend, i64 1 monotonic
1373 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = add i64 [[TMP]], -1
1374 // CHECK-ARM-ARM64: ret i64 [[RESULT]]
1375 // CHECK-ARM-ARM64: }
1378 void test__fastfail() {
1381 // CHECK-LABEL: define{{.*}} void @test__fastfail()
1382 // CHECK-ARM: call void asm sideeffect "udf #251", "{r0}"(i32 42) #[[NORETURN:[0-9]+]]
1383 // CHECK-INTEL: call void asm sideeffect "int $$0x29", "{cx}"(i32 42) #[[NORETURN]]
1384 // CHECK-ARM64: call void asm sideeffect "brk #0xF003", "{w0}"(i32 42) #[[NORETURN:[0-9]+]]
1386 // Attributes come last.
1388 // CHECK: attributes #[[NORETURN]] = { noreturn{{.*}} }