@@ -267,19 +267,19 @@ def RetCC_X86Common : CallingConv<[
267
267
// Vector types are returned in XMM0 and XMM1, when they fit. XMM2 and XMM3
268
268
// can only be used by ABI non-compliant code. If the target doesn't have XMM
269
269
// registers, it won't have vector types.
270
- CCIfType<[v16i8, v8i16, v4i32, v2i64, v8f16, v4f32, v2f64],
270
+ CCIfType<[v16i8, v8i16, v4i32, v2i64, v8f16, v8bf16, v4f32, v2f64],
271
271
CCAssignToReg<[XMM0,XMM1,XMM2,XMM3]>>,
272
272
273
273
// 256-bit vectors are returned in YMM0 and XMM1, when they fit. YMM2 and YMM3
274
274
// can only be used by ABI non-compliant code. This vector type is only
275
275
// supported while using the AVX target feature.
276
- CCIfType<[v32i8, v16i16, v8i32, v4i64, v16f16, v8f32, v4f64],
276
+ CCIfType<[v32i8, v16i16, v8i32, v4i64, v16f16, v16bf16, v8f32, v4f64],
277
277
CCAssignToReg<[YMM0,YMM1,YMM2,YMM3]>>,
278
278
279
279
// 512-bit vectors are returned in ZMM0 and ZMM1, when they fit. ZMM2 and ZMM3
280
280
// can only be used by ABI non-compliant code. This vector type is only
281
281
// supported while using the AVX-512 target feature.
282
- CCIfType<[v64i8, v32i16, v16i32, v8i64, v32f16, v16f32, v8f64],
282
+ CCIfType<[v64i8, v32i16, v16i32, v8i64, v32f16, v32bf16, v16f32, v8f64],
283
283
CCAssignToReg<[ZMM0,ZMM1,ZMM2,ZMM3]>>,
284
284
285
285
// Long double types are always returned in FP0 (even with SSE),
@@ -565,7 +565,7 @@ def CC_X86_64_C : CallingConv<[
565
565
CCIfType<[v64i1], CCPromoteToType<v64i8>>,
566
566
567
567
// The first 8 FP/Vector arguments are passed in XMM registers.
568
- CCIfType<[f16, f32, f64, f128, v16i8, v8i16, v4i32, v2i64, v8f16, v4f32, v2f64],
568
+ CCIfType<[f16, f32, f64, f128, v16i8, v8i16, v4i32, v2i64, v8f16, v8bf16, v4f32, v2f64],
569
569
CCIfSubtarget<"hasSSE1()",
570
570
CCAssignToReg<[XMM0, XMM1, XMM2, XMM3, XMM4, XMM5, XMM6, XMM7]>>>,
571
571
@@ -574,13 +574,13 @@ def CC_X86_64_C : CallingConv<[
574
574
// FIXME: This isn't precisely correct; the x86-64 ABI document says that
575
575
// fixed arguments to vararg functions are supposed to be passed in
576
576
// registers. Actually modeling that would be a lot of work, though.
577
- CCIfNotVarArg<CCIfType<[v32i8, v16i16, v8i32, v4i64, v16f16, v8f32, v4f64],
577
+ CCIfNotVarArg<CCIfType<[v32i8, v16i16, v8i32, v4i64, v16f16, v16bf16, v8f32, v4f64],
578
578
CCIfSubtarget<"hasAVX()",
579
579
CCAssignToReg<[YMM0, YMM1, YMM2, YMM3,
580
580
YMM4, YMM5, YMM6, YMM7]>>>>,
581
581
582
582
// The first 8 512-bit vector arguments are passed in ZMM registers.
583
- CCIfNotVarArg<CCIfType<[v64i8, v32i16, v16i32, v8i64, v32f16, v16f32, v8f64],
583
+ CCIfNotVarArg<CCIfType<[v64i8, v32i16, v16i32, v8i64, v32f16, v32bf16, v16f32, v8f64],
584
584
CCIfSubtarget<"hasAVX512()",
585
585
CCAssignToReg<[ZMM0, ZMM1, ZMM2, ZMM3, ZMM4, ZMM5, ZMM6, ZMM7]>>>>,
586
586
@@ -593,14 +593,14 @@ def CC_X86_64_C : CallingConv<[
593
593
CCIfType<[f80, f128], CCAssignToStack<0, 0>>,
594
594
595
595
// Vectors get 16-byte stack slots that are 16-byte aligned.
596
- CCIfType<[v16i8, v8i16, v4i32, v2i64, v8f16, v4f32, v2f64], CCAssignToStack<16, 16>>,
596
+ CCIfType<[v16i8, v8i16, v4i32, v2i64, v8f16, v8bf16, v4f32, v2f64], CCAssignToStack<16, 16>>,
597
597
598
598
// 256-bit vectors get 32-byte stack slots that are 32-byte aligned.
599
- CCIfType<[v32i8, v16i16, v8i32, v4i64, v16f16, v8f32, v4f64],
599
+ CCIfType<[v32i8, v16i16, v8i32, v4i64, v16f16, v16bf16, v8f32, v4f64],
600
600
CCAssignToStack<32, 32>>,
601
601
602
602
// 512-bit vectors get 64-byte stack slots that are 64-byte aligned.
603
- CCIfType<[v64i8, v32i16, v16i32, v8i64, v32f16, v16f32, v8f64],
603
+ CCIfType<[v64i8, v32i16, v16i32, v8i64, v32f16, v32bf16, v16f32, v8f64],
604
604
CCAssignToStack<64, 64>>
605
605
]>;
606
606
@@ -631,13 +631,13 @@ def CC_X86_Win64_C : CallingConv<[
631
631
CCIfCFGuardTarget<CCAssignToReg<[RAX]>>,
632
632
633
633
// 128 bit vectors are passed by pointer
634
- CCIfType<[v16i8, v8i16, v4i32, v2i64, v8f16, v4f32, v2f64], CCPassIndirect<i64>>,
634
+ CCIfType<[v16i8, v8i16, v4i32, v2i64, v8f16, v8bf16, v4f32, v2f64], CCPassIndirect<i64>>,
635
635
636
636
// 256 bit vectors are passed by pointer
637
- CCIfType<[v32i8, v16i16, v8i32, v4i64, v16f16, v8f32, v4f64], CCPassIndirect<i64>>,
637
+ CCIfType<[v32i8, v16i16, v8i32, v4i64, v16f16, v16bf16, v8f32, v4f64], CCPassIndirect<i64>>,
638
638
639
639
// 512 bit vectors are passed by pointer
640
- CCIfType<[v64i8, v32i16, v16i32, v32f16, v16f32, v8f64, v8i64], CCPassIndirect<i64>>,
640
+ CCIfType<[v64i8, v32i16, v16i32, v32f16, v32bf16, v16f32, v8f64, v8i64], CCPassIndirect<i64>>,
641
641
642
642
// Long doubles are passed by pointer
643
643
CCIfType<[f80], CCPassIndirect<i64>>,
@@ -734,48 +734,48 @@ def CC_X86_64_AnyReg : CallingConv<[
734
734
/// values are spilled on the stack.
735
735
def CC_X86_32_Vector_Common : CallingConv<[
736
736
// Other SSE vectors get 16-byte stack slots that are 16-byte aligned.
737
- CCIfType<[v16i8, v8i16, v4i32, v2i64, v8f16, v4f32, v2f64],
737
+ CCIfType<[v16i8, v8i16, v4i32, v2i64, v8f16, v8bf16, v4f32, v2f64],
738
738
CCAssignToStack<16, 16>>,
739
739
740
740
// 256-bit AVX vectors get 32-byte stack slots that are 32-byte aligned.
741
- CCIfType<[v32i8, v16i16, v8i32, v4i64, v16f16, v8f32, v4f64],
741
+ CCIfType<[v32i8, v16i16, v8i32, v4i64, v16f16, v16bf16, v8f32, v4f64],
742
742
CCAssignToStack<32, 32>>,
743
743
744
744
// 512-bit AVX 512-bit vectors get 64-byte stack slots that are 64-byte aligned.
745
- CCIfType<[v64i8, v32i16, v16i32, v8i64, v32f16, v16f32, v8f64],
745
+ CCIfType<[v64i8, v32i16, v16i32, v8i64, v32f16, v32bf16, v16f32, v8f64],
746
746
CCAssignToStack<64, 64>>
747
747
]>;
748
748
749
749
/// CC_X86_Win32_Vector - In X86 Win32 calling conventions, extra vector
750
750
/// values are spilled on the stack.
751
751
def CC_X86_Win32_Vector : CallingConv<[
752
752
// Other SSE vectors get 16-byte stack slots that are 4-byte aligned.
753
- CCIfType<[v16i8, v8i16, v4i32, v2i64, v8f16, v4f32, v2f64],
753
+ CCIfType<[v16i8, v8i16, v4i32, v2i64, v8f16, v8bf16, v4f32, v2f64],
754
754
CCAssignToStack<16, 4>>,
755
755
756
756
// 256-bit AVX vectors get 32-byte stack slots that are 4-byte aligned.
757
- CCIfType<[v32i8, v16i16, v8i32, v4i64, v16f16, v8f32, v4f64],
757
+ CCIfType<[v32i8, v16i16, v8i32, v4i64, v16f16, v16bf16, v8f32, v4f64],
758
758
CCAssignToStack<32, 4>>,
759
759
760
760
// 512-bit AVX 512-bit vectors get 64-byte stack slots that are 4-byte aligned.
761
- CCIfType<[v64i8, v32i16, v16i32, v8i64, v32f16, v16f32, v8f64],
761
+ CCIfType<[v64i8, v32i16, v16i32, v8i64, v32f16, v32bf16, v16f32, v8f64],
762
762
CCAssignToStack<64, 4>>
763
763
]>;
764
764
765
765
// CC_X86_32_Vector_Standard - The first 3 vector arguments are passed in
766
766
// vector registers
767
767
def CC_X86_32_Vector_Standard : CallingConv<[
768
768
// SSE vector arguments are passed in XMM registers.
769
- CCIfNotVarArg<CCIfType<[v16i8, v8i16, v4i32, v2i64, v8f16, v4f32, v2f64],
769
+ CCIfNotVarArg<CCIfType<[v16i8, v8i16, v4i32, v2i64, v8f16, v8bf16, v4f32, v2f64],
770
770
CCAssignToReg<[XMM0, XMM1, XMM2]>>>,
771
771
772
772
// AVX 256-bit vector arguments are passed in YMM registers.
773
- CCIfNotVarArg<CCIfType<[v32i8, v16i16, v8i32, v4i64, v16f16, v8f32, v4f64],
773
+ CCIfNotVarArg<CCIfType<[v32i8, v16i16, v8i32, v4i64, v16f16, v16bf16, v8f32, v4f64],
774
774
CCIfSubtarget<"hasAVX()",
775
775
CCAssignToReg<[YMM0, YMM1, YMM2]>>>>,
776
776
777
777
// AVX 512-bit vector arguments are passed in ZMM registers.
778
- CCIfNotVarArg<CCIfType<[v64i8, v32i16, v16i32, v8i64, v32f16, v16f32, v8f64],
778
+ CCIfNotVarArg<CCIfType<[v64i8, v32i16, v16i32, v8i64, v32f16, v32bf16, v16f32, v8f64],
779
779
CCAssignToReg<[ZMM0, ZMM1, ZMM2]>>>,
780
780
781
781
CCIfIsVarArgOnWin<CCDelegateTo<CC_X86_Win32_Vector>>,
@@ -786,16 +786,16 @@ def CC_X86_32_Vector_Standard : CallingConv<[
786
786
// vector registers.
787
787
def CC_X86_32_Vector_Darwin : CallingConv<[
788
788
// SSE vector arguments are passed in XMM registers.
789
- CCIfNotVarArg<CCIfType<[v16i8, v8i16, v4i32, v2i64, v8f16, v4f32, v2f64],
789
+ CCIfNotVarArg<CCIfType<[v16i8, v8i16, v4i32, v2i64, v8f16, v8bf16, v4f32, v2f64],
790
790
CCAssignToReg<[XMM0, XMM1, XMM2, XMM3]>>>,
791
791
792
792
// AVX 256-bit vector arguments are passed in YMM registers.
793
- CCIfNotVarArg<CCIfType<[v32i8, v16i16, v8i32, v4i64, v16f16, v8f32, v4f64],
793
+ CCIfNotVarArg<CCIfType<[v32i8, v16i16, v8i32, v4i64, v16f16, v16bf16, v8f32, v4f64],
794
794
CCIfSubtarget<"hasAVX()",
795
795
CCAssignToReg<[YMM0, YMM1, YMM2, YMM3]>>>>,
796
796
797
797
// AVX 512-bit vector arguments are passed in ZMM registers.
798
- CCIfNotVarArg<CCIfType<[v64i8, v32i16, v16i32, v8i64, v32f16, v16f32, v8f64],
798
+ CCIfNotVarArg<CCIfType<[v64i8, v32i16, v16i32, v8i64, v32f16, v32bf16, v16f32, v8f64],
799
799
CCAssignToReg<[ZMM0, ZMM1, ZMM2, ZMM3]>>>,
800
800
801
801
CCDelegateTo<CC_X86_32_Vector_Common>
0 commit comments