Skip to content

Commit 508fd96

Browse files
[CLANG][AArch64]Add SVE tuple types for mfloat8_t (#112687)
This patch adds scalable tuple types vectors for MFloat_8 type, according to the ACLE[1]. [1] https://github.com/ARM-software/acle.git
1 parent 18ac017 commit 508fd96

File tree

6 files changed

+167
-3
lines changed

6 files changed

+167
-3
lines changed

clang/include/clang/Basic/AArch64SVEACLETypes.def

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -138,6 +138,8 @@ SVE_VECTOR_TYPE_FLOAT("__clang_svfloat64x2_t", "svfloat64x2_t", SveFloat64x2, Sv
138138

139139
SVE_VECTOR_TYPE_BFLOAT("__clang_svbfloat16x2_t", "svbfloat16x2_t", SveBFloat16x2, SveBFloat16x2Ty, 8, 16, 2)
140140

141+
SVE_VECTOR_TYPE_INT("__clang_svmfloat8x2_t", "svmfloat8x2_t", SveMFloat8x2, SveMFloat8x2Ty, 16, 8, 2, false)
142+
141143
//
142144
// x3
143145
//
@@ -158,6 +160,8 @@ SVE_VECTOR_TYPE_FLOAT("__clang_svfloat64x3_t", "svfloat64x3_t", SveFloat64x3, Sv
158160

159161
SVE_VECTOR_TYPE_BFLOAT("__clang_svbfloat16x3_t", "svbfloat16x3_t", SveBFloat16x3, SveBFloat16x3Ty, 8, 16, 3)
160162

163+
SVE_VECTOR_TYPE_INT("__clang_svmfloat8x3_t", "svmfloat8x3_t", SveMFloat8x3, SveMFloat8x3Ty, 16, 8, 3, false)
164+
161165
//
162166
// x4
163167
//
@@ -178,6 +182,8 @@ SVE_VECTOR_TYPE_FLOAT("__clang_svfloat64x4_t", "svfloat64x4_t", SveFloat64x4, Sv
178182

179183
SVE_VECTOR_TYPE_BFLOAT("__clang_svbfloat16x4_t", "svbfloat16x4_t", SveBFloat16x4, SveBFloat16x4Ty, 8, 16, 4)
180184

185+
SVE_VECTOR_TYPE_INT("__clang_svmfloat8x4_t", "svmfloat8x4_t", SveMFloat8x4, SveMFloat8x4Ty, 16, 8, 4, false)
186+
181187
SVE_PREDICATE_TYPE_ALL("__SVBool_t", "__SVBool_t", SveBool, SveBoolTy, 16, 1)
182188
SVE_PREDICATE_TYPE_ALL("__clang_svboolx2_t", "svboolx2_t", SveBoolx2, SveBoolx2Ty, 16, 2)
183189
SVE_PREDICATE_TYPE_ALL("__clang_svboolx4_t", "svboolx4_t", SveBoolx4, SveBoolx4Ty, 16, 4)

clang/include/clang/Serialization/ASTBitCodes.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1149,7 +1149,7 @@ enum PredefinedTypeIDs {
11491149
///
11501150
/// Type IDs for non-predefined types will start at
11511151
/// NUM_PREDEF_TYPE_IDs.
1152-
const unsigned NUM_PREDEF_TYPE_IDS = 506;
1152+
const unsigned NUM_PREDEF_TYPE_IDS = 509;
11531153

11541154
// Ensure we do not overrun the predefined types we reserved
11551155
// in the enum PredefinedTypeIDs above.

clang/test/CodeGenCXX/aarch64-mangle-sve-vectors.cpp

Lines changed: 85 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -59,6 +59,9 @@ void f(__clang_svbfloat16x3_t, __clang_svbfloat16x3_t);
5959
void f(__clang_svbfloat16x4_t, __clang_svbfloat16x4_t);
6060
void f(__clang_svboolx2_t, __clang_svboolx2_t);
6161
void f(__clang_svboolx4_t, __clang_svboolx4_t);
62+
void f(__clang_svmfloat8x2_t, __clang_svmfloat8x2_t);
63+
void f(__clang_svmfloat8x3_t, __clang_svmfloat8x3_t);
64+
void f(__clang_svmfloat8x4_t, __clang_svmfloat8x4_t);
6265

6366
// CHECK-LABEL: define dso_local void @_Z3foov(
6467
// CHECK-SAME: ) #[[ATTR0:[0-9]+]] {
@@ -139,6 +142,12 @@ void f(__clang_svboolx4_t, __clang_svboolx4_t);
139142
// CHECK-NEXT: [[COERCE73:%.*]] = alloca { <vscale x 16 x i1>, <vscale x 16 x i1> }, align 2
140143
// CHECK-NEXT: [[COERCE74:%.*]] = alloca { <vscale x 16 x i1>, <vscale x 16 x i1>, <vscale x 16 x i1>, <vscale x 16 x i1> }, align 2
141144
// CHECK-NEXT: [[COERCE75:%.*]] = alloca { <vscale x 16 x i1>, <vscale x 16 x i1>, <vscale x 16 x i1>, <vscale x 16 x i1> }, align 2
145+
// CHECK-NEXT: [[COERCE76:%.*]] = alloca { <vscale x 16 x i8>, <vscale x 16 x i8> }, align 16
146+
// CHECK-NEXT: [[COERCE77:%.*]] = alloca { <vscale x 16 x i8>, <vscale x 16 x i8> }, align 16
147+
// CHECK-NEXT: [[COERCE78:%.*]] = alloca { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> }, align 16
148+
// CHECK-NEXT: [[COERCE79:%.*]] = alloca { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> }, align 16
149+
// CHECK-NEXT: [[COERCE80:%.*]] = alloca { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> }, align 16
150+
// CHECK-NEXT: [[COERCE81:%.*]] = alloca { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> }, align 16
142151
// CHECK-NEXT: call void @_Z1fu10__SVInt8_tS_(<vscale x 16 x i8> zeroinitializer, <vscale x 16 x i8> zeroinitializer)
143152
// CHECK-NEXT: call void @_Z1fu11__SVInt16_tS_(<vscale x 8 x i16> zeroinitializer, <vscale x 8 x i16> zeroinitializer)
144153
// CHECK-NEXT: call void @_Z1fu11__SVInt16_tS_(<vscale x 8 x i16> zeroinitializer, <vscale x 8 x i16> zeroinitializer)
@@ -151,7 +160,7 @@ void f(__clang_svboolx4_t, __clang_svboolx4_t);
151160
// CHECK-NEXT: call void @_Z1fu13__SVFloat16_tS_(<vscale x 8 x half> zeroinitializer, <vscale x 8 x half> zeroinitializer)
152161
// CHECK-NEXT: call void @_Z1fu13__SVFloat32_tS_(<vscale x 4 x float> zeroinitializer, <vscale x 4 x float> zeroinitializer)
153162
// CHECK-NEXT: call void @_Z1fu13__SVFloat64_tS_(<vscale x 2 x double> zeroinitializer, <vscale x 2 x double> zeroinitializer)
154-
// CHECK-NEXT: call void @_Z1fu13__SVMfloat8_tS_(<vscale x 16 x i8> zeroinitializer, <vscale x 16 x i8> zeroinitializer)
163+
// CHECK-NEXT: call void @_Z1fu13__SVMfloat8_tS_(<vscale x 16 x i8> zeroinitializer, <vscale x 16 x i8> zeroinitializer)
155164
// CHECK-NEXT: call void @_Z1fu14__SVBfloat16_tS_(<vscale x 8 x bfloat> zeroinitializer, <vscale x 8 x bfloat> zeroinitializer)
156165
// CHECK-NEXT: call void @_Z1fu10__SVBool_tS_(<vscale x 16 x i1> zeroinitializer, <vscale x 16 x i1> zeroinitializer)
157166
// CHECK-NEXT: call void @_Z1fu11__SVCount_tS_(target("aarch64.svcount") zeroinitializer, target("aarch64.svcount") zeroinitializer)
@@ -573,6 +582,39 @@ void f(__clang_svboolx4_t, __clang_svboolx4_t);
573582
// CHECK-NEXT: [[COERCE75_EXTRACT2:%.*]] = extractvalue { <vscale x 16 x i1>, <vscale x 16 x i1>, <vscale x 16 x i1>, <vscale x 16 x i1> } [[COERCE75_TUPLE]], 2
574583
// CHECK-NEXT: [[COERCE75_EXTRACT3:%.*]] = extractvalue { <vscale x 16 x i1>, <vscale x 16 x i1>, <vscale x 16 x i1>, <vscale x 16 x i1> } [[COERCE75_TUPLE]], 3
575584
// CHECK-NEXT: call void @_Z1f10svboolx4_tS_(<vscale x 16 x i1> [[COERCE74_EXTRACT0]], <vscale x 16 x i1> [[COERCE74_EXTRACT1]], <vscale x 16 x i1> [[COERCE74_EXTRACT2]], <vscale x 16 x i1> [[COERCE74_EXTRACT3]], <vscale x 16 x i1> [[COERCE75_EXTRACT0]], <vscale x 16 x i1> [[COERCE75_EXTRACT1]], <vscale x 16 x i1> [[COERCE75_EXTRACT2]], <vscale x 16 x i1> [[COERCE75_EXTRACT3]])
585+
// CHECK-NEXT: store { <vscale x 16 x i8>, <vscale x 16 x i8> } zeroinitializer, ptr [[COERCE76]], align 16
586+
// CHECK-NEXT: [[COERCE76_TUPLE:%.*]] = load { <vscale x 16 x i8>, <vscale x 16 x i8> }, ptr [[COERCE76]], align 16
587+
// CHECK-NEXT: [[COERCE76_EXTRACT0:%.*]] = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8> } [[COERCE76_TUPLE]], 0
588+
// CHECK-NEXT: [[COERCE76_EXTRACT1:%.*]] = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8> } [[COERCE76_TUPLE]], 1
589+
// CHECK-NEXT: store { <vscale x 16 x i8>, <vscale x 16 x i8> } zeroinitializer, ptr [[COERCE77]], align 16
590+
// CHECK-NEXT: [[COERCE77_TUPLE:%.*]] = load { <vscale x 16 x i8>, <vscale x 16 x i8> }, ptr [[COERCE77]], align 16
591+
// CHECK-NEXT: [[COERCE77_EXTRACT0:%.*]] = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8> } [[COERCE77_TUPLE]], 0
592+
// CHECK-NEXT: [[COERCE77_EXTRACT1:%.*]] = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8> } [[COERCE77_TUPLE]], 1
593+
// CHECK-NEXT: call void @_Z1f13svmfloat8x2_tS_(<vscale x 16 x i8> [[COERCE76_EXTRACT0]], <vscale x 16 x i8> [[COERCE76_EXTRACT1]], <vscale x 16 x i8> [[COERCE77_EXTRACT0]], <vscale x 16 x i8> [[COERCE77_EXTRACT1]])
594+
// CHECK-NEXT: store { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } zeroinitializer, ptr [[COERCE78]], align 16
595+
// CHECK-NEXT: [[COERCE78_TUPLE:%.*]] = load { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> }, ptr [[COERCE78]], align 16
596+
// CHECK-NEXT: [[COERCE78_EXTRACT0:%.*]] = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } [[COERCE78_TUPLE]], 0
597+
// CHECK-NEXT: [[COERCE78_EXTRACT1:%.*]] = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } [[COERCE78_TUPLE]], 1
598+
// CHECK-NEXT: [[COERCE78_EXTRACT2:%.*]] = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } [[COERCE78_TUPLE]], 2
599+
// CHECK-NEXT: store { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } zeroinitializer, ptr [[COERCE79]], align 16
600+
// CHECK-NEXT: [[COERCE79_TUPLE:%.*]] = load { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> }, ptr [[COERCE79]], align 16
601+
// CHECK-NEXT: [[COERCE79_EXTRACT0:%.*]] = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } [[COERCE79_TUPLE]], 0
602+
// CHECK-NEXT: [[COERCE79_EXTRACT1:%.*]] = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } [[COERCE79_TUPLE]], 1
603+
// CHECK-NEXT: [[COERCE79_EXTRACT2:%.*]] = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } [[COERCE79_TUPLE]], 2
604+
// CHECK-NEXT: call void @_Z1f13svmfloat8x3_tS_(<vscale x 16 x i8> [[COERCE78_EXTRACT0]], <vscale x 16 x i8> [[COERCE78_EXTRACT1]], <vscale x 16 x i8> [[COERCE78_EXTRACT2]], <vscale x 16 x i8> [[COERCE79_EXTRACT0]], <vscale x 16 x i8> [[COERCE79_EXTRACT1]], <vscale x 16 x i8> [[COERCE79_EXTRACT2]])
605+
// CHECK-NEXT: store { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } zeroinitializer, ptr [[COERCE80]], align 16
606+
// CHECK-NEXT: [[COERCE80_TUPLE:%.*]] = load { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> }, ptr [[COERCE80]], align 16
607+
// CHECK-NEXT: [[COERCE80_EXTRACT0:%.*]] = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } [[COERCE80_TUPLE]], 0
608+
// CHECK-NEXT: [[COERCE80_EXTRACT1:%.*]] = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } [[COERCE80_TUPLE]], 1
609+
// CHECK-NEXT: [[COERCE80_EXTRACT2:%.*]] = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } [[COERCE80_TUPLE]], 2
610+
// CHECK-NEXT: [[COERCE80_EXTRACT3:%.*]] = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } [[COERCE80_TUPLE]], 3
611+
// CHECK-NEXT: store { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } zeroinitializer, ptr [[COERCE81]], align 16
612+
// CHECK-NEXT: [[COERCE81_TUPLE:%.*]] = load { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> }, ptr [[COERCE81]], align 16
613+
// CHECK-NEXT: [[COERCE81_EXTRACT0:%.*]] = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } [[COERCE81_TUPLE]], 0
614+
// CHECK-NEXT: [[COERCE81_EXTRACT1:%.*]] = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } [[COERCE81_TUPLE]], 1
615+
// CHECK-NEXT: [[COERCE81_EXTRACT2:%.*]] = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } [[COERCE81_TUPLE]], 2
616+
// CHECK-NEXT: [[COERCE81_EXTRACT3:%.*]] = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } [[COERCE81_TUPLE]], 3
617+
// CHECK-NEXT: call void @_Z1f13svmfloat8x4_tS_(<vscale x 16 x i8> [[COERCE80_EXTRACT0]], <vscale x 16 x i8> [[COERCE80_EXTRACT1]], <vscale x 16 x i8> [[COERCE80_EXTRACT2]], <vscale x 16 x i8> [[COERCE80_EXTRACT3]], <vscale x 16 x i8> [[COERCE81_EXTRACT0]], <vscale x 16 x i8> [[COERCE81_EXTRACT1]], <vscale x 16 x i8> [[COERCE81_EXTRACT2]], <vscale x 16 x i8> [[COERCE81_EXTRACT3]])
576618
// CHECK-NEXT: ret void
577619
//
578620
// COMPAT_17-LABEL: define dso_local void @_Z3foov(
@@ -654,6 +696,12 @@ void f(__clang_svboolx4_t, __clang_svboolx4_t);
654696
// COMPAT_17-NEXT: [[COERCE73:%.*]] = alloca { <vscale x 16 x i1>, <vscale x 16 x i1> }, align 2
655697
// COMPAT_17-NEXT: [[COERCE74:%.*]] = alloca { <vscale x 16 x i1>, <vscale x 16 x i1>, <vscale x 16 x i1>, <vscale x 16 x i1> }, align 2
656698
// COMPAT_17-NEXT: [[COERCE75:%.*]] = alloca { <vscale x 16 x i1>, <vscale x 16 x i1>, <vscale x 16 x i1>, <vscale x 16 x i1> }, align 2
699+
// COMPAT_17-NEXT: [[COERCE76:%.*]] = alloca { <vscale x 16 x i8>, <vscale x 16 x i8> }, align 16
700+
// COMPAT_17-NEXT: [[COERCE77:%.*]] = alloca { <vscale x 16 x i8>, <vscale x 16 x i8> }, align 16
701+
// COMPAT_17-NEXT: [[COERCE78:%.*]] = alloca { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> }, align 16
702+
// COMPAT_17-NEXT: [[COERCE79:%.*]] = alloca { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> }, align 16
703+
// COMPAT_17-NEXT: [[COERCE80:%.*]] = alloca { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> }, align 16
704+
// COMPAT_17-NEXT: [[COERCE81:%.*]] = alloca { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> }, align 16
657705
// COMPAT_17-NEXT: call void @_Z1fu10__SVInt8_tu10__SVInt8_t(<vscale x 16 x i8> zeroinitializer, <vscale x 16 x i8> zeroinitializer)
658706
// COMPAT_17-NEXT: call void @_Z1fu11__SVInt16_tu11__SVInt16_t(<vscale x 8 x i16> zeroinitializer, <vscale x 8 x i16> zeroinitializer)
659707
// COMPAT_17-NEXT: call void @_Z1fu11__SVInt16_tu11__SVInt16_t(<vscale x 8 x i16> zeroinitializer, <vscale x 8 x i16> zeroinitializer)
@@ -1088,6 +1136,39 @@ void f(__clang_svboolx4_t, __clang_svboolx4_t);
10881136
// COMPAT_17-NEXT: [[COERCE75_EXTRACT2:%.*]] = extractvalue { <vscale x 16 x i1>, <vscale x 16 x i1>, <vscale x 16 x i1>, <vscale x 16 x i1> } [[COERCE75_TUPLE]], 2
10891137
// COMPAT_17-NEXT: [[COERCE75_EXTRACT3:%.*]] = extractvalue { <vscale x 16 x i1>, <vscale x 16 x i1>, <vscale x 16 x i1>, <vscale x 16 x i1> } [[COERCE75_TUPLE]], 3
10901138
// COMPAT_17-NEXT: call void @_Z1f10svboolx4_t10svboolx4_t(<vscale x 16 x i1> [[COERCE74_EXTRACT0]], <vscale x 16 x i1> [[COERCE74_EXTRACT1]], <vscale x 16 x i1> [[COERCE74_EXTRACT2]], <vscale x 16 x i1> [[COERCE74_EXTRACT3]], <vscale x 16 x i1> [[COERCE75_EXTRACT0]], <vscale x 16 x i1> [[COERCE75_EXTRACT1]], <vscale x 16 x i1> [[COERCE75_EXTRACT2]], <vscale x 16 x i1> [[COERCE75_EXTRACT3]])
1139+
// COMPAT_17-NEXT: store { <vscale x 16 x i8>, <vscale x 16 x i8> } zeroinitializer, ptr [[COERCE76]], align 16
1140+
// COMPAT_17-NEXT: [[COERCE76_TUPLE:%.*]] = load { <vscale x 16 x i8>, <vscale x 16 x i8> }, ptr [[COERCE76]], align 16
1141+
// COMPAT_17-NEXT: [[COERCE76_EXTRACT0:%.*]] = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8> } [[COERCE76_TUPLE]], 0
1142+
// COMPAT_17-NEXT: [[COERCE76_EXTRACT1:%.*]] = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8> } [[COERCE76_TUPLE]], 1
1143+
// COMPAT_17-NEXT: store { <vscale x 16 x i8>, <vscale x 16 x i8> } zeroinitializer, ptr [[COERCE77]], align 16
1144+
// COMPAT_17-NEXT: [[COERCE77_TUPLE:%.*]] = load { <vscale x 16 x i8>, <vscale x 16 x i8> }, ptr [[COERCE77]], align 16
1145+
// COMPAT_17-NEXT: [[COERCE77_EXTRACT0:%.*]] = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8> } [[COERCE77_TUPLE]], 0
1146+
// COMPAT_17-NEXT: [[COERCE77_EXTRACT1:%.*]] = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8> } [[COERCE77_TUPLE]], 1
1147+
// COMPAT_17-NEXT: call void @_Z1f13svmfloat8x2_t13svmfloat8x2_t(<vscale x 16 x i8> [[COERCE76_EXTRACT0]], <vscale x 16 x i8> [[COERCE76_EXTRACT1]], <vscale x 16 x i8> [[COERCE77_EXTRACT0]], <vscale x 16 x i8> [[COERCE77_EXTRACT1]])
1148+
// COMPAT_17-NEXT: store { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } zeroinitializer, ptr [[COERCE78]], align 16
1149+
// COMPAT_17-NEXT: [[COERCE78_TUPLE:%.*]] = load { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> }, ptr [[COERCE78]], align 16
1150+
// COMPAT_17-NEXT: [[COERCE78_EXTRACT0:%.*]] = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } [[COERCE78_TUPLE]], 0
1151+
// COMPAT_17-NEXT: [[COERCE78_EXTRACT1:%.*]] = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } [[COERCE78_TUPLE]], 1
1152+
// COMPAT_17-NEXT: [[COERCE78_EXTRACT2:%.*]] = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } [[COERCE78_TUPLE]], 2
1153+
// COMPAT_17-NEXT: store { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } zeroinitializer, ptr [[COERCE79]], align 16
1154+
// COMPAT_17-NEXT: [[COERCE79_TUPLE:%.*]] = load { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> }, ptr [[COERCE79]], align 16
1155+
// COMPAT_17-NEXT: [[COERCE79_EXTRACT0:%.*]] = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } [[COERCE79_TUPLE]], 0
1156+
// COMPAT_17-NEXT: [[COERCE79_EXTRACT1:%.*]] = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } [[COERCE79_TUPLE]], 1
1157+
// COMPAT_17-NEXT: [[COERCE79_EXTRACT2:%.*]] = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } [[COERCE79_TUPLE]], 2
1158+
// COMPAT_17-NEXT: call void @_Z1f13svmfloat8x3_t13svmfloat8x3_t(<vscale x 16 x i8> [[COERCE78_EXTRACT0]], <vscale x 16 x i8> [[COERCE78_EXTRACT1]], <vscale x 16 x i8> [[COERCE78_EXTRACT2]], <vscale x 16 x i8> [[COERCE79_EXTRACT0]], <vscale x 16 x i8> [[COERCE79_EXTRACT1]], <vscale x 16 x i8> [[COERCE79_EXTRACT2]])
1159+
// COMPAT_17-NEXT: store { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } zeroinitializer, ptr [[COERCE80]], align 16
1160+
// COMPAT_17-NEXT: [[COERCE80_TUPLE:%.*]] = load { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> }, ptr [[COERCE80]], align 16
1161+
// COMPAT_17-NEXT: [[COERCE80_EXTRACT0:%.*]] = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } [[COERCE80_TUPLE]], 0
1162+
// COMPAT_17-NEXT: [[COERCE80_EXTRACT1:%.*]] = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } [[COERCE80_TUPLE]], 1
1163+
// COMPAT_17-NEXT: [[COERCE80_EXTRACT2:%.*]] = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } [[COERCE80_TUPLE]], 2
1164+
// COMPAT_17-NEXT: [[COERCE80_EXTRACT3:%.*]] = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } [[COERCE80_TUPLE]], 3
1165+
// COMPAT_17-NEXT: store { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } zeroinitializer, ptr [[COERCE81]], align 16
1166+
// COMPAT_17-NEXT: [[COERCE81_TUPLE:%.*]] = load { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> }, ptr [[COERCE81]], align 16
1167+
// COMPAT_17-NEXT: [[COERCE81_EXTRACT0:%.*]] = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } [[COERCE81_TUPLE]], 0
1168+
// COMPAT_17-NEXT: [[COERCE81_EXTRACT1:%.*]] = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } [[COERCE81_TUPLE]], 1
1169+
// COMPAT_17-NEXT: [[COERCE81_EXTRACT2:%.*]] = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } [[COERCE81_TUPLE]], 2
1170+
// COMPAT_17-NEXT: [[COERCE81_EXTRACT3:%.*]] = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } [[COERCE81_TUPLE]], 3
1171+
// COMPAT_17-NEXT: call void @_Z1f13svmfloat8x4_t13svmfloat8x4_t(<vscale x 16 x i8> [[COERCE80_EXTRACT0]], <vscale x 16 x i8> [[COERCE80_EXTRACT1]], <vscale x 16 x i8> [[COERCE80_EXTRACT2]], <vscale x 16 x i8> [[COERCE80_EXTRACT3]], <vscale x 16 x i8> [[COERCE81_EXTRACT0]], <vscale x 16 x i8> [[COERCE81_EXTRACT1]], <vscale x 16 x i8> [[COERCE81_EXTRACT2]], <vscale x 16 x i8> [[COERCE81_EXTRACT3]])
10911172
// COMPAT_17-NEXT: ret void
10921173
//
10931174
void foo() {
@@ -1146,4 +1227,7 @@ void foo() {
11461227
f(__clang_svbfloat16x4_t(), __clang_svbfloat16x4_t());
11471228
f(__clang_svboolx2_t(), __clang_svboolx2_t());
11481229
f(__clang_svboolx4_t(), __clang_svboolx4_t());
1230+
f(__clang_svmfloat8x2_t(), __clang_svmfloat8x2_t());
1231+
f(__clang_svmfloat8x3_t(), __clang_svmfloat8x3_t());
1232+
f(__clang_svmfloat8x4_t(), __clang_svmfloat8x4_t());
11491233
}

0 commit comments

Comments
 (0)