1
- ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
1
+ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --scrub-attributes
2
2
; RUN: opt %s -S -passes=msan 2>&1 | FileCheck %s
3
3
4
4
target datalayout = "e-m:o-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128"
@@ -287,7 +287,7 @@ define <4 x float> @test_x86_avx_cvt_pd2_ps_256(<4 x double> %a0) #0 {
287
287
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i256 [[TMP2]], 0
288
288
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF0:![0-9]+]]
289
289
; CHECK: 3:
290
- ; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR8:[0-9]+]]
290
+ ; CHECK-NEXT: call void @__msan_warning_noreturn()
291
291
; CHECK-NEXT: unreachable
292
292
; CHECK: 4:
293
293
; CHECK-NEXT: [[RES:%.*]] = call <4 x float> @llvm.x86.avx.cvt.pd2.ps.256(<4 x double> [[A0:%.*]])
@@ -308,7 +308,7 @@ define <4 x i32> @test_x86_avx_cvt_pd2dq_256(<4 x double> %a0) #0 {
308
308
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i256 [[TMP2]], 0
309
309
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF0]]
310
310
; CHECK: 3:
311
- ; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR8]]
311
+ ; CHECK-NEXT: call void @__msan_warning_noreturn()
312
312
; CHECK-NEXT: unreachable
313
313
; CHECK: 4:
314
314
; CHECK-NEXT: [[RES:%.*]] = call <4 x i32> @llvm.x86.avx.cvt.pd2dq.256(<4 x double> [[A0:%.*]])
@@ -329,7 +329,7 @@ define <8 x i32> @test_x86_avx_cvt_ps2dq_256(<8 x float> %a0) #0 {
329
329
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i256 [[TMP2]], 0
330
330
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF0]]
331
331
; CHECK: 3:
332
- ; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR8]]
332
+ ; CHECK-NEXT: call void @__msan_warning_noreturn()
333
333
; CHECK-NEXT: unreachable
334
334
; CHECK: 4:
335
335
; CHECK-NEXT: [[RES:%.*]] = call <8 x i32> @llvm.x86.avx.cvt.ps2dq.256(<8 x float> [[A0:%.*]])
@@ -350,7 +350,7 @@ define <4 x i32> @test_x86_avx_cvtt_pd2dq_256(<4 x double> %a0) #0 {
350
350
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i256 [[TMP2]], 0
351
351
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF0]]
352
352
; CHECK: 3:
353
- ; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR8]]
353
+ ; CHECK-NEXT: call void @__msan_warning_noreturn()
354
354
; CHECK-NEXT: unreachable
355
355
; CHECK: 4:
356
356
; CHECK-NEXT: [[RES:%.*]] = call <4 x i32> @llvm.x86.avx.cvtt.pd2dq.256(<4 x double> [[A0:%.*]])
@@ -371,7 +371,7 @@ define <8 x i32> @test_x86_avx_cvtt_ps2dq_256(<8 x float> %a0) #0 {
371
371
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i256 [[TMP2]], 0
372
372
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF0]]
373
373
; CHECK: 3:
374
- ; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR8]]
374
+ ; CHECK-NEXT: call void @__msan_warning_noreturn()
375
375
; CHECK-NEXT: unreachable
376
376
; CHECK: 4:
377
377
; CHECK-NEXT: [[RES:%.*]] = call <8 x i32> @llvm.x86.avx.cvtt.ps2dq.256(<8 x float> [[A0:%.*]])
@@ -396,7 +396,7 @@ define <8 x float> @test_x86_avx_dp_ps_256(<8 x float> %a0, <8 x float> %a1) #0
396
396
; CHECK-NEXT: [[_MSOR:%.*]] = or i1 [[_MSCMP]], [[_MSCMP1]]
397
397
; CHECK-NEXT: br i1 [[_MSOR]], label [[TMP5:%.*]], label [[TMP6:%.*]], !prof [[PROF0]]
398
398
; CHECK: 5:
399
- ; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR8]]
399
+ ; CHECK-NEXT: call void @__msan_warning_noreturn()
400
400
; CHECK-NEXT: unreachable
401
401
; CHECK: 6:
402
402
; CHECK-NEXT: [[RES:%.*]] = call <8 x float> @llvm.x86.avx.dp.ps.256(<8 x float> [[A0:%.*]], <8 x float> [[A1:%.*]], i8 7)
@@ -484,7 +484,7 @@ define <32 x i8> @test_x86_avx_ldu_dq_256(ptr %a0) #0 {
484
484
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
485
485
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP5:%.*]], label [[TMP6:%.*]], !prof [[PROF0]]
486
486
; CHECK: 5:
487
- ; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR8]]
487
+ ; CHECK-NEXT: call void @__msan_warning_noreturn()
488
488
; CHECK-NEXT: unreachable
489
489
; CHECK: 6:
490
490
; CHECK-NEXT: [[RES:%.*]] = call <32 x i8> @llvm.x86.avx.ldu.dq.256(ptr [[A0]])
@@ -508,7 +508,7 @@ define <2 x double> @test_x86_avx_maskload_pd(ptr %a0, <2 x i64> %mask) #0 {
508
508
; CHECK-NEXT: [[_MSOR:%.*]] = or i1 [[_MSCMP]], [[_MSCMP1]]
509
509
; CHECK-NEXT: br i1 [[_MSOR]], label [[TMP4:%.*]], label [[TMP5:%.*]], !prof [[PROF0]]
510
510
; CHECK: 4:
511
- ; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR8]]
511
+ ; CHECK-NEXT: call void @__msan_warning_noreturn()
512
512
; CHECK-NEXT: unreachable
513
513
; CHECK: 5:
514
514
; CHECK-NEXT: [[RES:%.*]] = call <2 x double> @llvm.x86.avx.maskload.pd(ptr [[A0:%.*]], <2 x i64> [[MASK:%.*]])
@@ -532,7 +532,7 @@ define <4 x double> @test_x86_avx_maskload_pd_256(ptr %a0, <4 x i64> %mask) #0 {
532
532
; CHECK-NEXT: [[_MSOR:%.*]] = or i1 [[_MSCMP]], [[_MSCMP1]]
533
533
; CHECK-NEXT: br i1 [[_MSOR]], label [[TMP4:%.*]], label [[TMP5:%.*]], !prof [[PROF0]]
534
534
; CHECK: 4:
535
- ; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR8]]
535
+ ; CHECK-NEXT: call void @__msan_warning_noreturn()
536
536
; CHECK-NEXT: unreachable
537
537
; CHECK: 5:
538
538
; CHECK-NEXT: [[RES:%.*]] = call <4 x double> @llvm.x86.avx.maskload.pd.256(ptr [[A0:%.*]], <4 x i64> [[MASK:%.*]])
@@ -556,7 +556,7 @@ define <4 x float> @test_x86_avx_maskload_ps(ptr %a0, <4 x i32> %mask) #0 {
556
556
; CHECK-NEXT: [[_MSOR:%.*]] = or i1 [[_MSCMP]], [[_MSCMP1]]
557
557
; CHECK-NEXT: br i1 [[_MSOR]], label [[TMP4:%.*]], label [[TMP5:%.*]], !prof [[PROF0]]
558
558
; CHECK: 4:
559
- ; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR8]]
559
+ ; CHECK-NEXT: call void @__msan_warning_noreturn()
560
560
; CHECK-NEXT: unreachable
561
561
; CHECK: 5:
562
562
; CHECK-NEXT: [[RES:%.*]] = call <4 x float> @llvm.x86.avx.maskload.ps(ptr [[A0:%.*]], <4 x i32> [[MASK:%.*]])
@@ -580,7 +580,7 @@ define <8 x float> @test_x86_avx_maskload_ps_256(ptr %a0, <8 x i32> %mask) #0 {
580
580
; CHECK-NEXT: [[_MSOR:%.*]] = or i1 [[_MSCMP]], [[_MSCMP1]]
581
581
; CHECK-NEXT: br i1 [[_MSOR]], label [[TMP4:%.*]], label [[TMP5:%.*]], !prof [[PROF0]]
582
582
; CHECK: 4:
583
- ; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR8]]
583
+ ; CHECK-NEXT: call void @__msan_warning_noreturn()
584
584
; CHECK-NEXT: unreachable
585
585
; CHECK: 5:
586
586
; CHECK-NEXT: [[RES:%.*]] = call <8 x float> @llvm.x86.avx.maskload.ps.256(ptr [[A0:%.*]], <8 x i32> [[MASK:%.*]])
@@ -608,7 +608,7 @@ define void @test_x86_avx_maskstore_pd(ptr %a0, <2 x i64> %mask, <2 x double> %a
608
608
; CHECK-NEXT: [[_MSOR3:%.*]] = or i1 [[_MSOR]], [[_MSCMP2]]
609
609
; CHECK-NEXT: br i1 [[_MSOR3]], label [[TMP6:%.*]], label [[TMP7:%.*]], !prof [[PROF0]]
610
610
; CHECK: 6:
611
- ; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR8]]
611
+ ; CHECK-NEXT: call void @__msan_warning_noreturn()
612
612
; CHECK-NEXT: unreachable
613
613
; CHECK: 7:
614
614
; CHECK-NEXT: call void @llvm.x86.avx.maskstore.pd(ptr [[A0:%.*]], <2 x i64> [[MASK:%.*]], <2 x double> [[A2:%.*]])
@@ -635,7 +635,7 @@ define void @test_x86_avx_maskstore_pd_256(ptr %a0, <4 x i64> %mask, <4 x double
635
635
; CHECK-NEXT: [[_MSOR3:%.*]] = or i1 [[_MSOR]], [[_MSCMP2]]
636
636
; CHECK-NEXT: br i1 [[_MSOR3]], label [[TMP6:%.*]], label [[TMP7:%.*]], !prof [[PROF0]]
637
637
; CHECK: 6:
638
- ; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR8]]
638
+ ; CHECK-NEXT: call void @__msan_warning_noreturn()
639
639
; CHECK-NEXT: unreachable
640
640
; CHECK: 7:
641
641
; CHECK-NEXT: call void @llvm.x86.avx.maskstore.pd.256(ptr [[A0:%.*]], <4 x i64> [[MASK:%.*]], <4 x double> [[A2:%.*]])
@@ -662,7 +662,7 @@ define void @test_x86_avx_maskstore_ps(ptr %a0, <4 x i32> %mask, <4 x float> %a2
662
662
; CHECK-NEXT: [[_MSOR3:%.*]] = or i1 [[_MSOR]], [[_MSCMP2]]
663
663
; CHECK-NEXT: br i1 [[_MSOR3]], label [[TMP6:%.*]], label [[TMP7:%.*]], !prof [[PROF0]]
664
664
; CHECK: 6:
665
- ; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR8]]
665
+ ; CHECK-NEXT: call void @__msan_warning_noreturn()
666
666
; CHECK-NEXT: unreachable
667
667
; CHECK: 7:
668
668
; CHECK-NEXT: call void @llvm.x86.avx.maskstore.ps(ptr [[A0:%.*]], <4 x i32> [[MASK:%.*]], <4 x float> [[A2:%.*]])
@@ -689,7 +689,7 @@ define void @test_x86_avx_maskstore_ps_256(ptr %a0, <8 x i32> %mask, <8 x float>
689
689
; CHECK-NEXT: [[_MSOR3:%.*]] = or i1 [[_MSOR]], [[_MSCMP2]]
690
690
; CHECK-NEXT: br i1 [[_MSOR3]], label [[TMP6:%.*]], label [[TMP7:%.*]], !prof [[PROF0]]
691
691
; CHECK: 6:
692
- ; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR8]]
692
+ ; CHECK-NEXT: call void @__msan_warning_noreturn()
693
693
; CHECK-NEXT: unreachable
694
694
; CHECK: 7:
695
695
; CHECK-NEXT: call void @llvm.x86.avx.maskstore.ps.256(ptr [[A0:%.*]], <8 x i32> [[MASK:%.*]], <8 x float> [[A2:%.*]])
@@ -773,7 +773,7 @@ define i32 @test_x86_avx_movmsk_pd_256(<4 x double> %a0) #0 {
773
773
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i256 [[TMP2]], 0
774
774
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF0]]
775
775
; CHECK: 3:
776
- ; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR8]]
776
+ ; CHECK-NEXT: call void @__msan_warning_noreturn()
777
777
; CHECK-NEXT: unreachable
778
778
; CHECK: 4:
779
779
; CHECK-NEXT: [[RES:%.*]] = call i32 @llvm.x86.avx.movmsk.pd.256(<4 x double> [[A0:%.*]])
@@ -794,7 +794,7 @@ define i32 @test_x86_avx_movmsk_ps_256(<8 x float> %a0) #0 {
794
794
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i256 [[TMP2]], 0
795
795
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF0]]
796
796
; CHECK: 3:
797
- ; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR8]]
797
+ ; CHECK-NEXT: call void @__msan_warning_noreturn()
798
798
; CHECK-NEXT: unreachable
799
799
; CHECK: 4:
800
800
; CHECK-NEXT: [[RES:%.*]] = call i32 @llvm.x86.avx.movmsk.ps.256(<8 x float> [[A0:%.*]])
@@ -886,7 +886,7 @@ define <4 x double> @test_x86_avx_round_pd_256(<4 x double> %a0) #0 {
886
886
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i256 [[TMP2]], 0
887
887
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF0]]
888
888
; CHECK: 3:
889
- ; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR8]]
889
+ ; CHECK-NEXT: call void @__msan_warning_noreturn()
890
890
; CHECK-NEXT: unreachable
891
891
; CHECK: 4:
892
892
; CHECK-NEXT: [[RES:%.*]] = call <4 x double> @llvm.x86.avx.round.pd.256(<4 x double> [[A0:%.*]], i32 7)
@@ -907,7 +907,7 @@ define <8 x float> @test_x86_avx_round_ps_256(<8 x float> %a0) #0 {
907
907
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i256 [[TMP2]], 0
908
908
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF0]]
909
909
; CHECK: 3:
910
- ; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR8]]
910
+ ; CHECK-NEXT: call void @__msan_warning_noreturn()
911
911
; CHECK-NEXT: unreachable
912
912
; CHECK: 4:
913
913
; CHECK-NEXT: [[RES:%.*]] = call <8 x float> @llvm.x86.avx.round.ps.256(<8 x float> [[A0:%.*]], i32 7)
@@ -945,7 +945,7 @@ define <2 x double> @test_x86_avx_vpermilvar_pd(<2 x double> %a0, <2 x i64> %a1)
945
945
; CHECK-NEXT: [[_MSOR:%.*]] = or i1 [[_MSCMP]], [[_MSCMP1]]
946
946
; CHECK-NEXT: br i1 [[_MSOR]], label [[TMP5:%.*]], label [[TMP6:%.*]], !prof [[PROF0]]
947
947
; CHECK: 5:
948
- ; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR8]]
948
+ ; CHECK-NEXT: call void @__msan_warning_noreturn()
949
949
; CHECK-NEXT: unreachable
950
950
; CHECK: 6:
951
951
; CHECK-NEXT: [[RES:%.*]] = call <2 x double> @llvm.x86.avx.vpermilvar.pd(<2 x double> [[A0:%.*]], <2 x i64> [[A1:%.*]])
@@ -970,7 +970,7 @@ define <4 x double> @test_x86_avx_vpermilvar_pd_256(<4 x double> %a0, <4 x i64>
970
970
; CHECK-NEXT: [[_MSOR:%.*]] = or i1 [[_MSCMP]], [[_MSCMP1]]
971
971
; CHECK-NEXT: br i1 [[_MSOR]], label [[TMP5:%.*]], label [[TMP6:%.*]], !prof [[PROF0]]
972
972
; CHECK: 5:
973
- ; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR8]]
973
+ ; CHECK-NEXT: call void @__msan_warning_noreturn()
974
974
; CHECK-NEXT: unreachable
975
975
; CHECK: 6:
976
976
; CHECK-NEXT: [[RES:%.*]] = call <4 x double> @llvm.x86.avx.vpermilvar.pd.256(<4 x double> [[A0:%.*]], <4 x i64> [[A1:%.*]])
@@ -990,7 +990,7 @@ define <4 x double> @test_x86_avx_vpermilvar_pd_256_2(<4 x double> %a0) #0 {
990
990
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i256 [[TMP2]], 0
991
991
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF0]]
992
992
; CHECK: 3:
993
- ; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR8]]
993
+ ; CHECK-NEXT: call void @__msan_warning_noreturn()
994
994
; CHECK-NEXT: unreachable
995
995
; CHECK: 4:
996
996
; CHECK-NEXT: [[RES:%.*]] = call <4 x double> @llvm.x86.avx.vpermilvar.pd.256(<4 x double> [[A0:%.*]], <4 x i64> <i64 2, i64 0, i64 0, i64 2>)
@@ -1013,7 +1013,7 @@ define <4 x float> @test_x86_avx_vpermilvar_ps(<4 x float> %a0, <4 x i32> %a1) #
1013
1013
; CHECK-NEXT: [[_MSOR:%.*]] = or i1 [[_MSCMP]], [[_MSCMP1]]
1014
1014
; CHECK-NEXT: br i1 [[_MSOR]], label [[TMP5:%.*]], label [[TMP6:%.*]], !prof [[PROF0]]
1015
1015
; CHECK: 5:
1016
- ; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR8]]
1016
+ ; CHECK-NEXT: call void @__msan_warning_noreturn()
1017
1017
; CHECK-NEXT: unreachable
1018
1018
; CHECK: 6:
1019
1019
; CHECK-NEXT: [[RES:%.*]] = call <4 x float> @llvm.x86.avx.vpermilvar.ps(<4 x float> [[A0:%.*]], <4 x i32> [[A1:%.*]])
@@ -1031,7 +1031,7 @@ define <4 x float> @test_x86_avx_vpermilvar_ps_load(<4 x float> %a0, ptr %a1) #0
1031
1031
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
1032
1032
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF0]]
1033
1033
; CHECK: 3:
1034
- ; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR8]]
1034
+ ; CHECK-NEXT: call void @__msan_warning_noreturn()
1035
1035
; CHECK-NEXT: unreachable
1036
1036
; CHECK: 4:
1037
1037
; CHECK-NEXT: [[A2:%.*]] = load <4 x i32>, ptr [[A1:%.*]], align 16
@@ -1046,7 +1046,7 @@ define <4 x float> @test_x86_avx_vpermilvar_ps_load(<4 x float> %a0, ptr %a1) #0
1046
1046
; CHECK-NEXT: [[_MSOR:%.*]] = or i1 [[_MSCMP1]], [[_MSCMP2]]
1047
1047
; CHECK-NEXT: br i1 [[_MSOR]], label [[TMP10:%.*]], label [[TMP11:%.*]], !prof [[PROF0]]
1048
1048
; CHECK: 10:
1049
- ; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR8]]
1049
+ ; CHECK-NEXT: call void @__msan_warning_noreturn()
1050
1050
; CHECK-NEXT: unreachable
1051
1051
; CHECK: 11:
1052
1052
; CHECK-NEXT: [[RES:%.*]] = call <4 x float> @llvm.x86.avx.vpermilvar.ps(<4 x float> [[A0:%.*]], <4 x i32> [[A2]])
@@ -1072,7 +1072,7 @@ define <8 x float> @test_x86_avx_vpermilvar_ps_256(<8 x float> %a0, <8 x i32> %a
1072
1072
; CHECK-NEXT: [[_MSOR:%.*]] = or i1 [[_MSCMP]], [[_MSCMP1]]
1073
1073
; CHECK-NEXT: br i1 [[_MSOR]], label [[TMP5:%.*]], label [[TMP6:%.*]], !prof [[PROF0]]
1074
1074
; CHECK: 5:
1075
- ; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR8]]
1075
+ ; CHECK-NEXT: call void @__msan_warning_noreturn()
1076
1076
; CHECK-NEXT: unreachable
1077
1077
; CHECK: 6:
1078
1078
; CHECK-NEXT: [[RES:%.*]] = call <8 x float> @llvm.x86.avx.vpermilvar.ps.256(<8 x float> [[A0:%.*]], <8 x i32> [[A1:%.*]])
@@ -1348,14 +1348,14 @@ define void @movnt_dq(ptr %p, <2 x i64> %a1) nounwind #0 {
1348
1348
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP2]], 0
1349
1349
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF0]]
1350
1350
; CHECK: 3:
1351
- ; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR8]]
1351
+ ; CHECK-NEXT: call void @__msan_warning_noreturn()
1352
1352
; CHECK-NEXT: unreachable
1353
1353
; CHECK: 4:
1354
1354
; CHECK-NEXT: [[TMP5:%.*]] = ptrtoint ptr [[P:%.*]] to i64
1355
1355
; CHECK-NEXT: [[TMP6:%.*]] = xor i64 [[TMP5]], 87960930222080
1356
1356
; CHECK-NEXT: [[TMP7:%.*]] = inttoptr i64 [[TMP6]] to ptr
1357
1357
; CHECK-NEXT: store <4 x i64> [[_MSPROP1]], ptr [[TMP7]], align 32
1358
- ; CHECK-NEXT: store <4 x i64> [[A3]], ptr [[P]], align 32, !nontemporal !1
1358
+ ; CHECK-NEXT: store <4 x i64> [[A3]], ptr [[P]], align 32, !nontemporal [[META1:![0-9]+]]
1359
1359
; CHECK-NEXT: ret void
1360
1360
;
1361
1361
%a2 = add <2 x i64 > %a1 , <i64 1 , i64 1 >
@@ -1373,14 +1373,14 @@ define void @movnt_ps(ptr %p, <8 x float> %a) nounwind #0 {
1373
1373
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
1374
1374
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF0]]
1375
1375
; CHECK: 3:
1376
- ; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR8]]
1376
+ ; CHECK-NEXT: call void @__msan_warning_noreturn()
1377
1377
; CHECK-NEXT: unreachable
1378
1378
; CHECK: 4:
1379
1379
; CHECK-NEXT: [[TMP5:%.*]] = ptrtoint ptr [[P:%.*]] to i64
1380
1380
; CHECK-NEXT: [[TMP6:%.*]] = xor i64 [[TMP5]], 87960930222080
1381
1381
; CHECK-NEXT: [[TMP7:%.*]] = inttoptr i64 [[TMP6]] to ptr
1382
1382
; CHECK-NEXT: store <8 x i32> [[TMP2]], ptr [[TMP7]], align 32
1383
- ; CHECK-NEXT: store <8 x float> [[A:%.*]], ptr [[P]], align 32, !nontemporal !1
1383
+ ; CHECK-NEXT: store <8 x float> [[A:%.*]], ptr [[P]], align 32, !nontemporal [[META1]]
1384
1384
; CHECK-NEXT: ret void
1385
1385
;
1386
1386
tail call void @llvm.x86.avx.movnt.ps.256 (ptr %p , <8 x float > %a ) nounwind
@@ -1399,14 +1399,14 @@ define void @movnt_pd(ptr %p, <4 x double> %a1) nounwind #0 {
1399
1399
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP2]], 0
1400
1400
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF0]]
1401
1401
; CHECK: 3:
1402
- ; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR8]]
1402
+ ; CHECK-NEXT: call void @__msan_warning_noreturn()
1403
1403
; CHECK-NEXT: unreachable
1404
1404
; CHECK: 4:
1405
1405
; CHECK-NEXT: [[TMP5:%.*]] = ptrtoint ptr [[P:%.*]] to i64
1406
1406
; CHECK-NEXT: [[TMP6:%.*]] = xor i64 [[TMP5]], 87960930222080
1407
1407
; CHECK-NEXT: [[TMP7:%.*]] = inttoptr i64 [[TMP6]] to ptr
1408
1408
; CHECK-NEXT: store <4 x i64> [[_MSPROP]], ptr [[TMP7]], align 32
1409
- ; CHECK-NEXT: store <4 x double> [[A2]], ptr [[P]], align 32, !nontemporal !1
1409
+ ; CHECK-NEXT: store <4 x double> [[A2]], ptr [[P]], align 32, !nontemporal [[META1]]
1410
1410
; CHECK-NEXT: ret void
1411
1411
;
1412
1412
%a2 = fadd <4 x double > %a1 , <double 0x0 , double 0x0 , double 0x0 , double 0x0 >
0 commit comments