-
Notifications
You must be signed in to change notification settings - Fork 13.6k
[RISCV] Use _B* suffix for vector mask logic pseudo instructions. #119787
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Conversation
@llvm/pr-subscribers-llvm-globalisel Author: Craig Topper (topperc) ChangesReplace LMUL suffixes with _B1, _B2, etc. This matches what we do Now all pseudoinstructions that use Log2SEW=0 will be consistently Stacked on #119785 Patch is 31.24 KiB, truncated to 20.00 KiB below, full version: https://github.com/llvm/llvm-project/pull/119787.diff 10 Files Affected:
diff --git a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
index c3922e38729dc3..f43c120dc1946a 100644
--- a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
@@ -1670,16 +1670,16 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) {
: RISCV::PseudoVMSLT_VX_##suffix; \
VMSGTOpcode = IsUnsigned ? RISCV::PseudoVMSGTU_VX_##suffix \
: RISCV::PseudoVMSGT_VX_##suffix; \
- VMNANDOpcode = RISCV::PseudoVMNAND_MM_##suffix; \
+ VMNANDOpcode = RISCV::PseudoVMNAND_MM_##suffix_b; \
VMSetOpcode = RISCV::PseudoVMSET_M_##suffix_b; \
break;
- CASE_VMSLT_VMNAND_VMSET_OPCODES(LMUL_F8, MF8, B1)
- CASE_VMSLT_VMNAND_VMSET_OPCODES(LMUL_F4, MF4, B2)
- CASE_VMSLT_VMNAND_VMSET_OPCODES(LMUL_F2, MF2, B4)
+ CASE_VMSLT_VMNAND_VMSET_OPCODES(LMUL_F8, MF8, B64)
+ CASE_VMSLT_VMNAND_VMSET_OPCODES(LMUL_F4, MF4, B32)
+ CASE_VMSLT_VMNAND_VMSET_OPCODES(LMUL_F2, MF2, B16)
CASE_VMSLT_VMNAND_VMSET_OPCODES(LMUL_1, M1, B8)
- CASE_VMSLT_VMNAND_VMSET_OPCODES(LMUL_2, M2, B16)
- CASE_VMSLT_VMNAND_VMSET_OPCODES(LMUL_4, M4, B32)
- CASE_VMSLT_VMNAND_VMSET_OPCODES(LMUL_8, M8, B64)
+ CASE_VMSLT_VMNAND_VMSET_OPCODES(LMUL_2, M2, B4)
+ CASE_VMSLT_VMNAND_VMSET_OPCODES(LMUL_4, M4, B2)
+ CASE_VMSLT_VMNAND_VMSET_OPCODES(LMUL_8, M8, B1)
#undef CASE_VMSLT_VMNAND_VMSET_OPCODES
}
SDValue SEW = CurDAG->getTargetConstant(
@@ -1751,13 +1751,13 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) {
VMSGTMaskOpcode = IsUnsigned ? RISCV::PseudoVMSGTU_VX_##suffix##_MASK \
: RISCV::PseudoVMSGT_VX_##suffix##_MASK; \
break;
- CASE_VMSLT_OPCODES(LMUL_F8, MF8, B1)
- CASE_VMSLT_OPCODES(LMUL_F4, MF4, B2)
- CASE_VMSLT_OPCODES(LMUL_F2, MF2, B4)
+ CASE_VMSLT_OPCODES(LMUL_F8, MF8, B64)
+ CASE_VMSLT_OPCODES(LMUL_F4, MF4, B32)
+ CASE_VMSLT_OPCODES(LMUL_F2, MF2, B16)
CASE_VMSLT_OPCODES(LMUL_1, M1, B8)
- CASE_VMSLT_OPCODES(LMUL_2, M2, B16)
- CASE_VMSLT_OPCODES(LMUL_4, M4, B32)
- CASE_VMSLT_OPCODES(LMUL_8, M8, B64)
+ CASE_VMSLT_OPCODES(LMUL_2, M2, B4)
+ CASE_VMSLT_OPCODES(LMUL_4, M4, B2)
+ CASE_VMSLT_OPCODES(LMUL_8, M8, B1)
#undef CASE_VMSLT_OPCODES
}
// Mask operations use the LMUL from the mask type.
@@ -1770,13 +1770,13 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) {
VMANDNOpcode = RISCV::PseudoVMANDN_MM_##suffix; \
VMOROpcode = RISCV::PseudoVMOR_MM_##suffix; \
break;
- CASE_VMXOR_VMANDN_VMOR_OPCODES(LMUL_F8, MF8)
- CASE_VMXOR_VMANDN_VMOR_OPCODES(LMUL_F4, MF4)
- CASE_VMXOR_VMANDN_VMOR_OPCODES(LMUL_F2, MF2)
- CASE_VMXOR_VMANDN_VMOR_OPCODES(LMUL_1, M1)
- CASE_VMXOR_VMANDN_VMOR_OPCODES(LMUL_2, M2)
- CASE_VMXOR_VMANDN_VMOR_OPCODES(LMUL_4, M4)
- CASE_VMXOR_VMANDN_VMOR_OPCODES(LMUL_8, M8)
+ CASE_VMXOR_VMANDN_VMOR_OPCODES(LMUL_F8, B64)
+ CASE_VMXOR_VMANDN_VMOR_OPCODES(LMUL_F4, B32)
+ CASE_VMXOR_VMANDN_VMOR_OPCODES(LMUL_F2, B16)
+ CASE_VMXOR_VMANDN_VMOR_OPCODES(LMUL_1, B8)
+ CASE_VMXOR_VMANDN_VMOR_OPCODES(LMUL_2, B4)
+ CASE_VMXOR_VMANDN_VMOR_OPCODES(LMUL_4, B2)
+ CASE_VMXOR_VMANDN_VMOR_OPCODES(LMUL_8, B1)
#undef CASE_VMXOR_VMANDN_VMOR_OPCODES
}
SDValue SEW = CurDAG->getTargetConstant(
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
index 6c4e41711440e6..5af0dd14de2a81 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
@@ -415,13 +415,13 @@ class MTypeInfo<ValueType Mas, LMULInfo M, string Bx> {
defset list<MTypeInfo> AllMasks = {
// vbool<n>_t, <n> = SEW/LMUL, we assume SEW=8 and corresponding LMUL.
- def : MTypeInfo<vbool64_t, V_MF8, "B1">;
- def : MTypeInfo<vbool32_t, V_MF4, "B2">;
- def : MTypeInfo<vbool16_t, V_MF2, "B4">;
+ def : MTypeInfo<vbool64_t, V_MF8, "B64">;
+ def : MTypeInfo<vbool32_t, V_MF4, "B32">;
+ def : MTypeInfo<vbool16_t, V_MF2, "B16">;
def : MTypeInfo<vbool8_t, V_M1, "B8">;
- def : MTypeInfo<vbool4_t, V_M2, "B16">;
- def : MTypeInfo<vbool2_t, V_M4, "B32">;
- def : MTypeInfo<vbool1_t, V_M8, "B64">;
+ def : MTypeInfo<vbool4_t, V_M2, "B4">;
+ def : MTypeInfo<vbool2_t, V_M4, "B2">;
+ def : MTypeInfo<vbool1_t, V_M8, "B1">;
}
class VTypeInfoToWide<VTypeInfo vti, VTypeInfo wti> {
@@ -2266,11 +2266,10 @@ multiclass VPseudoBinaryV_VI_RM<Operand ImmType, LMULInfo m, string Constraint =
}
multiclass VPseudoVALU_MM<bit Commutable = 0> {
- foreach m = MxList in {
- defvar mx = m.MX;
- let VLMul = m.value, isCommutable = Commutable in {
- def "_MM_" # mx : VPseudoBinaryNoMask<VR, VR, VR, "">,
- SchedBinary<"WriteVMALUV", "ReadVMALUV", "ReadVMALUV", mx>;
+ foreach mti = AllMasks in {
+ let VLMul = mti.LMul.value, isCommutable = Commutable in {
+ def "_MM_" # mti.BX : VPseudoBinaryNoMask<VR, VR, VR, "">,
+ SchedBinary<"WriteVMALUV", "ReadVMALUV", "ReadVMALUV", mti.LMul.MX>;
}
}
}
@@ -4943,7 +4942,7 @@ multiclass VPatBinaryV_VI_RM<string intrinsic, string instruction,
multiclass VPatBinaryM_MM<string intrinsic, string instruction> {
foreach mti = AllMasks in
let Predicates = [HasVInstructions] in
- def : VPatBinaryM<intrinsic, instruction # "_MM_" # mti.LMul.MX,
+ def : VPatBinaryM<intrinsic, instruction # "_MM_" # mti.BX,
mti.Mask, mti.Mask, mti.Mask,
mti.Log2SEW, VR, VR>;
}
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td
index 021c4b3b724b02..880ea0ae0a976c 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td
@@ -1141,35 +1141,35 @@ defm : VPatAVGADD_VV_VX_RM<avgceilu, 0b00, suffix = "U">;
foreach mti = AllMasks in {
let Predicates = [HasVInstructions] in {
def : Pat<(mti.Mask (and VR:$rs1, VR:$rs2)),
- (!cast<Instruction>("PseudoVMAND_MM_"#mti.LMul.MX)
+ (!cast<Instruction>("PseudoVMAND_MM_"#mti.BX)
VR:$rs1, VR:$rs2, mti.AVL, mti.Log2SEW)>;
def : Pat<(mti.Mask (or VR:$rs1, VR:$rs2)),
- (!cast<Instruction>("PseudoVMOR_MM_"#mti.LMul.MX)
+ (!cast<Instruction>("PseudoVMOR_MM_"#mti.BX)
VR:$rs1, VR:$rs2, mti.AVL, mti.Log2SEW)>;
def : Pat<(mti.Mask (xor VR:$rs1, VR:$rs2)),
- (!cast<Instruction>("PseudoVMXOR_MM_"#mti.LMul.MX)
+ (!cast<Instruction>("PseudoVMXOR_MM_"#mti.BX)
VR:$rs1, VR:$rs2, mti.AVL, mti.Log2SEW)>;
def : Pat<(mti.Mask (rvv_vnot (and VR:$rs1, VR:$rs2))),
- (!cast<Instruction>("PseudoVMNAND_MM_"#mti.LMul.MX)
+ (!cast<Instruction>("PseudoVMNAND_MM_"#mti.BX)
VR:$rs1, VR:$rs2, mti.AVL, mti.Log2SEW)>;
def : Pat<(mti.Mask (rvv_vnot (or VR:$rs1, VR:$rs2))),
- (!cast<Instruction>("PseudoVMNOR_MM_"#mti.LMul.MX)
+ (!cast<Instruction>("PseudoVMNOR_MM_"#mti.BX)
VR:$rs1, VR:$rs2, mti.AVL, mti.Log2SEW)>;
def : Pat<(mti.Mask (rvv_vnot (xor VR:$rs1, VR:$rs2))),
- (!cast<Instruction>("PseudoVMXNOR_MM_"#mti.LMul.MX)
+ (!cast<Instruction>("PseudoVMXNOR_MM_"#mti.BX)
VR:$rs1, VR:$rs2, mti.AVL, mti.Log2SEW)>;
def : Pat<(mti.Mask (and VR:$rs1, (rvv_vnot VR:$rs2))),
- (!cast<Instruction>("PseudoVMANDN_MM_"#mti.LMul.MX)
+ (!cast<Instruction>("PseudoVMANDN_MM_"#mti.BX)
VR:$rs1, VR:$rs2, mti.AVL, mti.Log2SEW)>;
def : Pat<(mti.Mask (or VR:$rs1, (rvv_vnot VR:$rs2))),
- (!cast<Instruction>("PseudoVMORN_MM_"#mti.LMul.MX)
+ (!cast<Instruction>("PseudoVMORN_MM_"#mti.BX)
VR:$rs1, VR:$rs2, mti.AVL, mti.Log2SEW)>;
// Handle rvv_vnot the same as the vmnot.m pseudoinstruction.
def : Pat<(mti.Mask (rvv_vnot VR:$rs)),
- (!cast<Instruction>("PseudoVMNAND_MM_"#mti.LMul.MX)
+ (!cast<Instruction>("PseudoVMNAND_MM_"#mti.BX)
VR:$rs, VR:$rs, mti.AVL, mti.Log2SEW)>;
}
}
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td
index e48a6f9309294b..2026ba79e623d8 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td
@@ -2699,51 +2699,51 @@ foreach mti = AllMasks in {
(!cast<Instruction>("PseudoVMCLR_M_" # mti.BX) GPR:$vl, mti.Log2SEW)>;
def : Pat<(mti.Mask (riscv_vmand_vl VR:$rs1, VR:$rs2, VLOpFrag)),
- (!cast<Instruction>("PseudoVMAND_MM_" # mti.LMul.MX)
+ (!cast<Instruction>("PseudoVMAND_MM_" # mti.BX)
VR:$rs1, VR:$rs2, GPR:$vl, mti.Log2SEW)>;
def : Pat<(mti.Mask (riscv_vmor_vl VR:$rs1, VR:$rs2, VLOpFrag)),
- (!cast<Instruction>("PseudoVMOR_MM_" # mti.LMul.MX)
+ (!cast<Instruction>("PseudoVMOR_MM_" # mti.BX)
VR:$rs1, VR:$rs2, GPR:$vl, mti.Log2SEW)>;
def : Pat<(mti.Mask (riscv_vmxor_vl VR:$rs1, VR:$rs2, VLOpFrag)),
- (!cast<Instruction>("PseudoVMXOR_MM_" # mti.LMul.MX)
+ (!cast<Instruction>("PseudoVMXOR_MM_" # mti.BX)
VR:$rs1, VR:$rs2, GPR:$vl, mti.Log2SEW)>;
def : Pat<(mti.Mask (riscv_vmand_vl VR:$rs1,
(riscv_vmnot_vl VR:$rs2, VLOpFrag),
VLOpFrag)),
- (!cast<Instruction>("PseudoVMANDN_MM_" # mti.LMul.MX)
+ (!cast<Instruction>("PseudoVMANDN_MM_" # mti.BX)
VR:$rs1, VR:$rs2, GPR:$vl, mti.Log2SEW)>;
def : Pat<(mti.Mask (riscv_vmor_vl VR:$rs1,
(riscv_vmnot_vl VR:$rs2, VLOpFrag),
VLOpFrag)),
- (!cast<Instruction>("PseudoVMORN_MM_" # mti.LMul.MX)
+ (!cast<Instruction>("PseudoVMORN_MM_" # mti.BX)
VR:$rs1, VR:$rs2, GPR:$vl, mti.Log2SEW)>;
// XOR is associative so we need 2 patterns for VMXNOR.
def : Pat<(mti.Mask (riscv_vmxor_vl (riscv_vmnot_vl VR:$rs1,
VLOpFrag),
VR:$rs2, VLOpFrag)),
- (!cast<Instruction>("PseudoVMXNOR_MM_" # mti.LMul.MX)
+ (!cast<Instruction>("PseudoVMXNOR_MM_" # mti.BX)
VR:$rs1, VR:$rs2, GPR:$vl, mti.Log2SEW)>;
def : Pat<(mti.Mask (riscv_vmnot_vl (riscv_vmand_vl VR:$rs1, VR:$rs2,
VLOpFrag),
VLOpFrag)),
- (!cast<Instruction>("PseudoVMNAND_MM_" # mti.LMul.MX)
+ (!cast<Instruction>("PseudoVMNAND_MM_" # mti.BX)
VR:$rs1, VR:$rs2, GPR:$vl, mti.Log2SEW)>;
def : Pat<(mti.Mask (riscv_vmnot_vl (riscv_vmor_vl VR:$rs1, VR:$rs2,
VLOpFrag),
VLOpFrag)),
- (!cast<Instruction>("PseudoVMNOR_MM_" # mti.LMul.MX)
+ (!cast<Instruction>("PseudoVMNOR_MM_" # mti.BX)
VR:$rs1, VR:$rs2, GPR:$vl, mti.Log2SEW)>;
def : Pat<(mti.Mask (riscv_vmnot_vl (riscv_vmxor_vl VR:$rs1, VR:$rs2,
VLOpFrag),
VLOpFrag)),
- (!cast<Instruction>("PseudoVMXNOR_MM_" # mti.LMul.MX)
+ (!cast<Instruction>("PseudoVMXNOR_MM_" # mti.BX)
VR:$rs1, VR:$rs2, GPR:$vl, mti.Log2SEW)>;
// Match the not idiom to the vmnot.m pseudo.
def : Pat<(mti.Mask (riscv_vmnot_vl VR:$rs, VLOpFrag)),
- (!cast<Instruction>("PseudoVMNAND_MM_" # mti.LMul.MX)
+ (!cast<Instruction>("PseudoVMNAND_MM_" # mti.BX)
VR:$rs, VR:$rs, GPR:$vl, mti.Log2SEW)>;
// 15.2 Vector count population in mask vcpop.m
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/rvv/render-vlop-rv32.mir b/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/rvv/render-vlop-rv32.mir
index 5600e351aa3987..7610ebe7ed026b 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/rvv/render-vlop-rv32.mir
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/rvv/render-vlop-rv32.mir
@@ -11,8 +11,8 @@ body: |
bb.1:
; CHECK-LABEL: name: negative_vl
; CHECK: [[ADDI:%[0-9]+]]:gprnox0 = ADDI $x0, -2
- ; CHECK-NEXT: [[PseudoVMCLR_M_B1_:%[0-9]+]]:vr = PseudoVMCLR_M_B1 [[ADDI]], 0 /* e8 */
- ; CHECK-NEXT: $v0 = COPY [[PseudoVMCLR_M_B1_]]
+ ; CHECK-NEXT: [[PseudoVMCLR_M_B64_:%[0-9]+]]:vr = PseudoVMCLR_M_B64 [[ADDI]], 0 /* e8 */
+ ; CHECK-NEXT: $v0 = COPY [[PseudoVMCLR_M_B64_]]
; CHECK-NEXT: PseudoRET implicit $v0
%0:gprb(s32) = G_CONSTANT i32 -2
%1:vrb(<vscale x 1 x s1>) = G_VMCLR_VL %0(s32)
@@ -31,8 +31,8 @@ body: |
; CHECK: liveins: $x10
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x10
- ; CHECK-NEXT: [[PseudoVMCLR_M_B1_:%[0-9]+]]:vr = PseudoVMCLR_M_B1 [[COPY]], 0 /* e8 */
- ; CHECK-NEXT: $v0 = COPY [[PseudoVMCLR_M_B1_]]
+ ; CHECK-NEXT: [[PseudoVMCLR_M_B64_:%[0-9]+]]:vr = PseudoVMCLR_M_B64 [[COPY]], 0 /* e8 */
+ ; CHECK-NEXT: $v0 = COPY [[PseudoVMCLR_M_B64_]]
; CHECK-NEXT: PseudoRET implicit $v0
%0:gprb(s32) = COPY $x10
%1:vrb(<vscale x 1 x s1>) = G_VMCLR_VL %0(s32)
@@ -48,8 +48,8 @@ tracksRegLiveness: true
body: |
bb.1:
; CHECK-LABEL: name: nonzero_vl
- ; CHECK: [[PseudoVMCLR_M_B1_:%[0-9]+]]:vr = PseudoVMCLR_M_B1 1, 0 /* e8 */
- ; CHECK-NEXT: $v0 = COPY [[PseudoVMCLR_M_B1_]]
+ ; CHECK: [[PseudoVMCLR_M_B64_:%[0-9]+]]:vr = PseudoVMCLR_M_B64 1, 0 /* e8 */
+ ; CHECK-NEXT: $v0 = COPY [[PseudoVMCLR_M_B64_]]
; CHECK-NEXT: PseudoRET implicit $v0
%0:gprb(s32) = G_CONSTANT i32 1
%1:vrb(<vscale x 1 x s1>) = G_VMCLR_VL %0(s32)
@@ -65,8 +65,8 @@ tracksRegLiveness: true
body: |
bb.1:
; CHECK-LABEL: name: zero_vl
- ; CHECK: [[PseudoVMCLR_M_B1_:%[0-9]+]]:vr = PseudoVMCLR_M_B1 0, 0 /* e8 */
- ; CHECK-NEXT: $v0 = COPY [[PseudoVMCLR_M_B1_]]
+ ; CHECK: [[PseudoVMCLR_M_B64_:%[0-9]+]]:vr = PseudoVMCLR_M_B64 0, 0 /* e8 */
+ ; CHECK-NEXT: $v0 = COPY [[PseudoVMCLR_M_B64_]]
; CHECK-NEXT: PseudoRET implicit $v0
%0:gprb(s32) = G_CONSTANT i32 0
%1:vrb(<vscale x 1 x s1>) = G_VMCLR_VL %0(s32)
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/rvv/render-vlop-rv64.mir b/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/rvv/render-vlop-rv64.mir
index c2c0ed72be7b7c..de78ceb2f5e13c 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/rvv/render-vlop-rv64.mir
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/rvv/render-vlop-rv64.mir
@@ -11,8 +11,8 @@ body: |
bb.1:
; CHECK-LABEL: name: negative_vl
; CHECK: [[ADDI:%[0-9]+]]:gprnox0 = ADDI $x0, -2
- ; CHECK-NEXT: [[PseudoVMCLR_M_B1_:%[0-9]+]]:vr = PseudoVMCLR_M_B1 [[ADDI]], 0 /* e8 */
- ; CHECK-NEXT: $v0 = COPY [[PseudoVMCLR_M_B1_]]
+ ; CHECK-NEXT: [[PseudoVMCLR_M_B64_:%[0-9]+]]:vr = PseudoVMCLR_M_B64 [[ADDI]], 0 /* e8 */
+ ; CHECK-NEXT: $v0 = COPY [[PseudoVMCLR_M_B64_]]
; CHECK-NEXT: PseudoRET implicit $v0
%0:gprb(s64) = G_CONSTANT i64 -2
%1:vrb(<vscale x 1 x s1>) = G_VMCLR_VL %0(s64)
@@ -31,8 +31,8 @@ body: |
; CHECK: liveins: $x10
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x10
- ; CHECK-NEXT: [[PseudoVMCLR_M_B1_:%[0-9]+]]:vr = PseudoVMCLR_M_B1 [[COPY]], 0 /* e8 */
- ; CHECK-NEXT: $v0 = COPY [[PseudoVMCLR_M_B1_]]
+ ; CHECK-NEXT: [[PseudoVMCLR_M_B64_:%[0-9]+]]:vr = PseudoVMCLR_M_B64 [[COPY]], 0 /* e8 */
+ ; CHECK-NEXT: $v0 = COPY [[PseudoVMCLR_M_B64_]]
; CHECK-NEXT: PseudoRET implicit $v0
%0:gprb(s64) = COPY $x10
%1:vrb(<vscale x 1 x s1>) = G_VMCLR_VL %0(s64)
@@ -48,8 +48,8 @@ tracksRegLiveness: true
body: |
bb.1:
; CHECK-LABEL: name: nonzero_vl
- ; CHECK: [[PseudoVMCLR_M_B1_:%[0-9]+]]:vr = PseudoVMCLR_M_B1 1, 0 /* e8 */
- ; CHECK-NEXT: $v0 = COPY [[PseudoVMCLR_M_B1_]]
+ ; CHECK: [[PseudoVMCLR_M_B64_:%[0-9]+]]:vr = PseudoVMCLR_M_B64 1, 0 /* e8 */
+ ; CHECK-NEXT: $v0 = COPY [[PseudoVMCLR_M_B64_]]
; CHECK-NEXT: PseudoRET implicit $v0
%0:gprb(s64) = G_CONSTANT i64 1
%1:vrb(<vscale x 1 x s1>) = G_VMCLR_VL %0(s64)
@@ -65,8 +65,8 @@ tracksRegLiveness: true
body: |
bb.1:
; CHECK-LABEL: name: zero_vl
- ; CHECK: [[PseudoVMCLR_M_B1_:%[0-9]+]]:vr = PseudoVMCLR_M_B1 0, 0 /* e8 */
- ; CHECK-NEXT: $v0 = COPY [[PseudoVMCLR_M_B1_]]
+ ; CHECK: [[PseudoVMCLR_M_B64_:%[0-9]+]]:vr = PseudoVMCLR_M_B64 0, 0 /* e8 */
+ ; CHECK-NEXT: $v0 = COPY [[PseudoVMCLR_M_B64_]]
; CHECK-NEXT: PseudoRET implicit $v0
%0:gprb(s64) = G_CONSTANT i64 0
%1:vrb(<vscale x 1 x s1>) = G_VMCLR_VL %0(s64)
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/rvv/vmclr-rv32.mir b/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/rvv/vmclr-rv32.mir
index 1ef1312cc17c0e..ab91b3d80bd9bc 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/rvv/vmclr-rv32.mir
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/rvv/vmclr-rv32.mir
@@ -10,8 +10,8 @@ tracksRegLiveness: true
body: |
bb.1:
; CHECK-LABEL: name: splat_zero_nxv1i1
- ; CHECK: [[PseudoVMCLR_M_B1_:%[0-9]+]]:vr = PseudoVMCLR_M_B1 -1, 0 /* e8 */
- ; CHECK-NEXT: $v0 = COPY [[PseudoVMCLR_M_B1_]]
+ ; CHECK: [[PseudoVMCLR_M_B64_:%[0-9]+]]:vr = PseudoVMCLR_M_B64 -1, 0 /* e8 */
+ ; CHECK-NEXT: $v0 = COPY [[PseudoVMCLR_M_B64_]]
; CHECK-NEXT: PseudoRET implicit $v0
%0:gprb(s32) = G_CONSTANT i32 -1
%1:vrb(<vscale x 1 x s1>) = G_VMCLR_VL %0(s32)
@@ -27,8 +27,8 @@ tracksRegLiveness: true
body: |
bb.1:
; CHECK-LABEL: name: splat_zero_nxv2i1
- ; CHECK: [[PseudoVMCLR_M_B2_:%[0-9]+]]:vr = PseudoVMCLR_M_B2 -1, 0 /* e8 */
- ; CHECK-NEXT: $v0 = COPY [[PseudoVMCLR_M_B2_]]
+ ; CHECK: [[PseudoVMCLR_M_B32_:%[0-9]+]]:vr = PseudoVMCLR_M_B32 -1, 0 /* e8 */
+ ; CHECK-NEXT: $v0 = COPY [[PseudoVMCLR_M_B32_]]
; CHECK-NEXT: PseudoRET implicit $v0
%0:gprb(s32) = G_CONSTANT i32 -1
%1:vrb(<vscale x 2 x s1>) = G_VMCLR_VL %0(s32)
@@ -44,8 +44,8 @@ tracksRegLiveness: true
body: |
bb.1:
; CHECK-LABEL: name: splat_zero_nxv4i1
- ; CHECK: [[PseudoVMCLR_M_B4_:%[0-9]+]]:vr = PseudoVMCLR_M_B4 -1, 0 /* e8 */
- ; CHECK-NEXT: $v0 = COPY [[PseudoVMCLR_M_B4_]]
+ ; CHECK: [[PseudoVMCLR_M_B16_:%[0-9]+]]:vr = PseudoVMCLR_M_B16 -1, 0 /* e8 */
+ ; CHECK-NEXT: $v0 = COPY [[PseudoVMCLR_M_B16_]]
; CHECK-NEXT: PseudoRET implicit $v0
%0:gprb(s32) = G_CONSTANT i32 -1
%1:vrb(<vscale x 4 x s1>) = G_VMCLR_VL %0(s32)
@@ -78,8 +78,8 @@ tracksRegLiveness: true
body: |
bb.1:
; CHECK-LABEL: name: splat_zero_nxv16i1
- ; CHECK: [[PseudoVMCLR_M_B16_:%[0-9]+]]:vr = PseudoVMCLR_M_B16 -1, 0 /* e8 */
- ; CHECK-NEXT: $v0 = COPY [[PseudoVMCLR_M_B16_]]
+ ; CHECK: [[PseudoVMCLR_M_B4_:%[0-9]+]]:vr = PseudoVMCLR_M_B4 -1, 0 /* e8 */
+ ; CHECK-NEXT: $v0 = COPY [[PseudoVMCLR_M_B4_]]
; CHECK-NEXT: PseudoRET implicit $v0
%0:gprb(s32) = G_CONSTANT i32 -1
%1:vrb(<vscale x 16 x s1>) = G_VMCLR_VL %0(s32)
@@ -95,8 +95,8 @@ tracksRegLiveness: true
body: |
bb.1:
; CHECK-LABEL: name: splat_zero_nxv32i1
- ; CHECK: [[PseudoVMCLR_M_B32_:%[0-9]+]]:vr = PseudoVMCLR_M_B32 -1, 0 /* e8 */
- ; CHECK-NEXT: $v0 = COPY [[PseudoVMCLR_M_B32_]]
+ ; CHECK: [[PseudoVMCLR_M_B2_:%[0-9]+]]:vr = PseudoVMCLR_M_B2 -1, 0 /* e8 */
+ ; CHECK-NEXT: $v0 = COPY [[PseudoVMCLR_M_B2_]]
; CHECK-NEXT: PseudoRET implicit $v0
%0:gprb(s32) = ...
[truncated]
|
@llvm/pr-subscribers-backend-risc-v Author: Craig Topper (topperc) ChangesReplace LMUL suffixes with _B1, _B2, etc. This matches what we do Now all pseudoinstructions that use Log2SEW=0 will be consistently Stacked on #119785 Patch is 31.24 KiB, truncated to 20.00 KiB below, full version: https://github.com/llvm/llvm-project/pull/119787.diff 10 Files Affected:
diff --git a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
index c3922e38729dc3..f43c120dc1946a 100644
--- a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
@@ -1670,16 +1670,16 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) {
: RISCV::PseudoVMSLT_VX_##suffix; \
VMSGTOpcode = IsUnsigned ? RISCV::PseudoVMSGTU_VX_##suffix \
: RISCV::PseudoVMSGT_VX_##suffix; \
- VMNANDOpcode = RISCV::PseudoVMNAND_MM_##suffix; \
+ VMNANDOpcode = RISCV::PseudoVMNAND_MM_##suffix_b; \
VMSetOpcode = RISCV::PseudoVMSET_M_##suffix_b; \
break;
- CASE_VMSLT_VMNAND_VMSET_OPCODES(LMUL_F8, MF8, B1)
- CASE_VMSLT_VMNAND_VMSET_OPCODES(LMUL_F4, MF4, B2)
- CASE_VMSLT_VMNAND_VMSET_OPCODES(LMUL_F2, MF2, B4)
+ CASE_VMSLT_VMNAND_VMSET_OPCODES(LMUL_F8, MF8, B64)
+ CASE_VMSLT_VMNAND_VMSET_OPCODES(LMUL_F4, MF4, B32)
+ CASE_VMSLT_VMNAND_VMSET_OPCODES(LMUL_F2, MF2, B16)
CASE_VMSLT_VMNAND_VMSET_OPCODES(LMUL_1, M1, B8)
- CASE_VMSLT_VMNAND_VMSET_OPCODES(LMUL_2, M2, B16)
- CASE_VMSLT_VMNAND_VMSET_OPCODES(LMUL_4, M4, B32)
- CASE_VMSLT_VMNAND_VMSET_OPCODES(LMUL_8, M8, B64)
+ CASE_VMSLT_VMNAND_VMSET_OPCODES(LMUL_2, M2, B4)
+ CASE_VMSLT_VMNAND_VMSET_OPCODES(LMUL_4, M4, B2)
+ CASE_VMSLT_VMNAND_VMSET_OPCODES(LMUL_8, M8, B1)
#undef CASE_VMSLT_VMNAND_VMSET_OPCODES
}
SDValue SEW = CurDAG->getTargetConstant(
@@ -1751,13 +1751,13 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) {
VMSGTMaskOpcode = IsUnsigned ? RISCV::PseudoVMSGTU_VX_##suffix##_MASK \
: RISCV::PseudoVMSGT_VX_##suffix##_MASK; \
break;
- CASE_VMSLT_OPCODES(LMUL_F8, MF8, B1)
- CASE_VMSLT_OPCODES(LMUL_F4, MF4, B2)
- CASE_VMSLT_OPCODES(LMUL_F2, MF2, B4)
+ CASE_VMSLT_OPCODES(LMUL_F8, MF8, B64)
+ CASE_VMSLT_OPCODES(LMUL_F4, MF4, B32)
+ CASE_VMSLT_OPCODES(LMUL_F2, MF2, B16)
CASE_VMSLT_OPCODES(LMUL_1, M1, B8)
- CASE_VMSLT_OPCODES(LMUL_2, M2, B16)
- CASE_VMSLT_OPCODES(LMUL_4, M4, B32)
- CASE_VMSLT_OPCODES(LMUL_8, M8, B64)
+ CASE_VMSLT_OPCODES(LMUL_2, M2, B4)
+ CASE_VMSLT_OPCODES(LMUL_4, M4, B2)
+ CASE_VMSLT_OPCODES(LMUL_8, M8, B1)
#undef CASE_VMSLT_OPCODES
}
// Mask operations use the LMUL from the mask type.
@@ -1770,13 +1770,13 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) {
VMANDNOpcode = RISCV::PseudoVMANDN_MM_##suffix; \
VMOROpcode = RISCV::PseudoVMOR_MM_##suffix; \
break;
- CASE_VMXOR_VMANDN_VMOR_OPCODES(LMUL_F8, MF8)
- CASE_VMXOR_VMANDN_VMOR_OPCODES(LMUL_F4, MF4)
- CASE_VMXOR_VMANDN_VMOR_OPCODES(LMUL_F2, MF2)
- CASE_VMXOR_VMANDN_VMOR_OPCODES(LMUL_1, M1)
- CASE_VMXOR_VMANDN_VMOR_OPCODES(LMUL_2, M2)
- CASE_VMXOR_VMANDN_VMOR_OPCODES(LMUL_4, M4)
- CASE_VMXOR_VMANDN_VMOR_OPCODES(LMUL_8, M8)
+ CASE_VMXOR_VMANDN_VMOR_OPCODES(LMUL_F8, B64)
+ CASE_VMXOR_VMANDN_VMOR_OPCODES(LMUL_F4, B32)
+ CASE_VMXOR_VMANDN_VMOR_OPCODES(LMUL_F2, B16)
+ CASE_VMXOR_VMANDN_VMOR_OPCODES(LMUL_1, B8)
+ CASE_VMXOR_VMANDN_VMOR_OPCODES(LMUL_2, B4)
+ CASE_VMXOR_VMANDN_VMOR_OPCODES(LMUL_4, B2)
+ CASE_VMXOR_VMANDN_VMOR_OPCODES(LMUL_8, B1)
#undef CASE_VMXOR_VMANDN_VMOR_OPCODES
}
SDValue SEW = CurDAG->getTargetConstant(
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
index 6c4e41711440e6..5af0dd14de2a81 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
@@ -415,13 +415,13 @@ class MTypeInfo<ValueType Mas, LMULInfo M, string Bx> {
defset list<MTypeInfo> AllMasks = {
// vbool<n>_t, <n> = SEW/LMUL, we assume SEW=8 and corresponding LMUL.
- def : MTypeInfo<vbool64_t, V_MF8, "B1">;
- def : MTypeInfo<vbool32_t, V_MF4, "B2">;
- def : MTypeInfo<vbool16_t, V_MF2, "B4">;
+ def : MTypeInfo<vbool64_t, V_MF8, "B64">;
+ def : MTypeInfo<vbool32_t, V_MF4, "B32">;
+ def : MTypeInfo<vbool16_t, V_MF2, "B16">;
def : MTypeInfo<vbool8_t, V_M1, "B8">;
- def : MTypeInfo<vbool4_t, V_M2, "B16">;
- def : MTypeInfo<vbool2_t, V_M4, "B32">;
- def : MTypeInfo<vbool1_t, V_M8, "B64">;
+ def : MTypeInfo<vbool4_t, V_M2, "B4">;
+ def : MTypeInfo<vbool2_t, V_M4, "B2">;
+ def : MTypeInfo<vbool1_t, V_M8, "B1">;
}
class VTypeInfoToWide<VTypeInfo vti, VTypeInfo wti> {
@@ -2266,11 +2266,10 @@ multiclass VPseudoBinaryV_VI_RM<Operand ImmType, LMULInfo m, string Constraint =
}
multiclass VPseudoVALU_MM<bit Commutable = 0> {
- foreach m = MxList in {
- defvar mx = m.MX;
- let VLMul = m.value, isCommutable = Commutable in {
- def "_MM_" # mx : VPseudoBinaryNoMask<VR, VR, VR, "">,
- SchedBinary<"WriteVMALUV", "ReadVMALUV", "ReadVMALUV", mx>;
+ foreach mti = AllMasks in {
+ let VLMul = mti.LMul.value, isCommutable = Commutable in {
+ def "_MM_" # mti.BX : VPseudoBinaryNoMask<VR, VR, VR, "">,
+ SchedBinary<"WriteVMALUV", "ReadVMALUV", "ReadVMALUV", mti.LMul.MX>;
}
}
}
@@ -4943,7 +4942,7 @@ multiclass VPatBinaryV_VI_RM<string intrinsic, string instruction,
multiclass VPatBinaryM_MM<string intrinsic, string instruction> {
foreach mti = AllMasks in
let Predicates = [HasVInstructions] in
- def : VPatBinaryM<intrinsic, instruction # "_MM_" # mti.LMul.MX,
+ def : VPatBinaryM<intrinsic, instruction # "_MM_" # mti.BX,
mti.Mask, mti.Mask, mti.Mask,
mti.Log2SEW, VR, VR>;
}
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td
index 021c4b3b724b02..880ea0ae0a976c 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td
@@ -1141,35 +1141,35 @@ defm : VPatAVGADD_VV_VX_RM<avgceilu, 0b00, suffix = "U">;
foreach mti = AllMasks in {
let Predicates = [HasVInstructions] in {
def : Pat<(mti.Mask (and VR:$rs1, VR:$rs2)),
- (!cast<Instruction>("PseudoVMAND_MM_"#mti.LMul.MX)
+ (!cast<Instruction>("PseudoVMAND_MM_"#mti.BX)
VR:$rs1, VR:$rs2, mti.AVL, mti.Log2SEW)>;
def : Pat<(mti.Mask (or VR:$rs1, VR:$rs2)),
- (!cast<Instruction>("PseudoVMOR_MM_"#mti.LMul.MX)
+ (!cast<Instruction>("PseudoVMOR_MM_"#mti.BX)
VR:$rs1, VR:$rs2, mti.AVL, mti.Log2SEW)>;
def : Pat<(mti.Mask (xor VR:$rs1, VR:$rs2)),
- (!cast<Instruction>("PseudoVMXOR_MM_"#mti.LMul.MX)
+ (!cast<Instruction>("PseudoVMXOR_MM_"#mti.BX)
VR:$rs1, VR:$rs2, mti.AVL, mti.Log2SEW)>;
def : Pat<(mti.Mask (rvv_vnot (and VR:$rs1, VR:$rs2))),
- (!cast<Instruction>("PseudoVMNAND_MM_"#mti.LMul.MX)
+ (!cast<Instruction>("PseudoVMNAND_MM_"#mti.BX)
VR:$rs1, VR:$rs2, mti.AVL, mti.Log2SEW)>;
def : Pat<(mti.Mask (rvv_vnot (or VR:$rs1, VR:$rs2))),
- (!cast<Instruction>("PseudoVMNOR_MM_"#mti.LMul.MX)
+ (!cast<Instruction>("PseudoVMNOR_MM_"#mti.BX)
VR:$rs1, VR:$rs2, mti.AVL, mti.Log2SEW)>;
def : Pat<(mti.Mask (rvv_vnot (xor VR:$rs1, VR:$rs2))),
- (!cast<Instruction>("PseudoVMXNOR_MM_"#mti.LMul.MX)
+ (!cast<Instruction>("PseudoVMXNOR_MM_"#mti.BX)
VR:$rs1, VR:$rs2, mti.AVL, mti.Log2SEW)>;
def : Pat<(mti.Mask (and VR:$rs1, (rvv_vnot VR:$rs2))),
- (!cast<Instruction>("PseudoVMANDN_MM_"#mti.LMul.MX)
+ (!cast<Instruction>("PseudoVMANDN_MM_"#mti.BX)
VR:$rs1, VR:$rs2, mti.AVL, mti.Log2SEW)>;
def : Pat<(mti.Mask (or VR:$rs1, (rvv_vnot VR:$rs2))),
- (!cast<Instruction>("PseudoVMORN_MM_"#mti.LMul.MX)
+ (!cast<Instruction>("PseudoVMORN_MM_"#mti.BX)
VR:$rs1, VR:$rs2, mti.AVL, mti.Log2SEW)>;
// Handle rvv_vnot the same as the vmnot.m pseudoinstruction.
def : Pat<(mti.Mask (rvv_vnot VR:$rs)),
- (!cast<Instruction>("PseudoVMNAND_MM_"#mti.LMul.MX)
+ (!cast<Instruction>("PseudoVMNAND_MM_"#mti.BX)
VR:$rs, VR:$rs, mti.AVL, mti.Log2SEW)>;
}
}
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td
index e48a6f9309294b..2026ba79e623d8 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td
@@ -2699,51 +2699,51 @@ foreach mti = AllMasks in {
(!cast<Instruction>("PseudoVMCLR_M_" # mti.BX) GPR:$vl, mti.Log2SEW)>;
def : Pat<(mti.Mask (riscv_vmand_vl VR:$rs1, VR:$rs2, VLOpFrag)),
- (!cast<Instruction>("PseudoVMAND_MM_" # mti.LMul.MX)
+ (!cast<Instruction>("PseudoVMAND_MM_" # mti.BX)
VR:$rs1, VR:$rs2, GPR:$vl, mti.Log2SEW)>;
def : Pat<(mti.Mask (riscv_vmor_vl VR:$rs1, VR:$rs2, VLOpFrag)),
- (!cast<Instruction>("PseudoVMOR_MM_" # mti.LMul.MX)
+ (!cast<Instruction>("PseudoVMOR_MM_" # mti.BX)
VR:$rs1, VR:$rs2, GPR:$vl, mti.Log2SEW)>;
def : Pat<(mti.Mask (riscv_vmxor_vl VR:$rs1, VR:$rs2, VLOpFrag)),
- (!cast<Instruction>("PseudoVMXOR_MM_" # mti.LMul.MX)
+ (!cast<Instruction>("PseudoVMXOR_MM_" # mti.BX)
VR:$rs1, VR:$rs2, GPR:$vl, mti.Log2SEW)>;
def : Pat<(mti.Mask (riscv_vmand_vl VR:$rs1,
(riscv_vmnot_vl VR:$rs2, VLOpFrag),
VLOpFrag)),
- (!cast<Instruction>("PseudoVMANDN_MM_" # mti.LMul.MX)
+ (!cast<Instruction>("PseudoVMANDN_MM_" # mti.BX)
VR:$rs1, VR:$rs2, GPR:$vl, mti.Log2SEW)>;
def : Pat<(mti.Mask (riscv_vmor_vl VR:$rs1,
(riscv_vmnot_vl VR:$rs2, VLOpFrag),
VLOpFrag)),
- (!cast<Instruction>("PseudoVMORN_MM_" # mti.LMul.MX)
+ (!cast<Instruction>("PseudoVMORN_MM_" # mti.BX)
VR:$rs1, VR:$rs2, GPR:$vl, mti.Log2SEW)>;
// XOR is associative so we need 2 patterns for VMXNOR.
def : Pat<(mti.Mask (riscv_vmxor_vl (riscv_vmnot_vl VR:$rs1,
VLOpFrag),
VR:$rs2, VLOpFrag)),
- (!cast<Instruction>("PseudoVMXNOR_MM_" # mti.LMul.MX)
+ (!cast<Instruction>("PseudoVMXNOR_MM_" # mti.BX)
VR:$rs1, VR:$rs2, GPR:$vl, mti.Log2SEW)>;
def : Pat<(mti.Mask (riscv_vmnot_vl (riscv_vmand_vl VR:$rs1, VR:$rs2,
VLOpFrag),
VLOpFrag)),
- (!cast<Instruction>("PseudoVMNAND_MM_" # mti.LMul.MX)
+ (!cast<Instruction>("PseudoVMNAND_MM_" # mti.BX)
VR:$rs1, VR:$rs2, GPR:$vl, mti.Log2SEW)>;
def : Pat<(mti.Mask (riscv_vmnot_vl (riscv_vmor_vl VR:$rs1, VR:$rs2,
VLOpFrag),
VLOpFrag)),
- (!cast<Instruction>("PseudoVMNOR_MM_" # mti.LMul.MX)
+ (!cast<Instruction>("PseudoVMNOR_MM_" # mti.BX)
VR:$rs1, VR:$rs2, GPR:$vl, mti.Log2SEW)>;
def : Pat<(mti.Mask (riscv_vmnot_vl (riscv_vmxor_vl VR:$rs1, VR:$rs2,
VLOpFrag),
VLOpFrag)),
- (!cast<Instruction>("PseudoVMXNOR_MM_" # mti.LMul.MX)
+ (!cast<Instruction>("PseudoVMXNOR_MM_" # mti.BX)
VR:$rs1, VR:$rs2, GPR:$vl, mti.Log2SEW)>;
// Match the not idiom to the vmnot.m pseudo.
def : Pat<(mti.Mask (riscv_vmnot_vl VR:$rs, VLOpFrag)),
- (!cast<Instruction>("PseudoVMNAND_MM_" # mti.LMul.MX)
+ (!cast<Instruction>("PseudoVMNAND_MM_" # mti.BX)
VR:$rs, VR:$rs, GPR:$vl, mti.Log2SEW)>;
// 15.2 Vector count population in mask vcpop.m
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/rvv/render-vlop-rv32.mir b/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/rvv/render-vlop-rv32.mir
index 5600e351aa3987..7610ebe7ed026b 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/rvv/render-vlop-rv32.mir
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/rvv/render-vlop-rv32.mir
@@ -11,8 +11,8 @@ body: |
bb.1:
; CHECK-LABEL: name: negative_vl
; CHECK: [[ADDI:%[0-9]+]]:gprnox0 = ADDI $x0, -2
- ; CHECK-NEXT: [[PseudoVMCLR_M_B1_:%[0-9]+]]:vr = PseudoVMCLR_M_B1 [[ADDI]], 0 /* e8 */
- ; CHECK-NEXT: $v0 = COPY [[PseudoVMCLR_M_B1_]]
+ ; CHECK-NEXT: [[PseudoVMCLR_M_B64_:%[0-9]+]]:vr = PseudoVMCLR_M_B64 [[ADDI]], 0 /* e8 */
+ ; CHECK-NEXT: $v0 = COPY [[PseudoVMCLR_M_B64_]]
; CHECK-NEXT: PseudoRET implicit $v0
%0:gprb(s32) = G_CONSTANT i32 -2
%1:vrb(<vscale x 1 x s1>) = G_VMCLR_VL %0(s32)
@@ -31,8 +31,8 @@ body: |
; CHECK: liveins: $x10
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x10
- ; CHECK-NEXT: [[PseudoVMCLR_M_B1_:%[0-9]+]]:vr = PseudoVMCLR_M_B1 [[COPY]], 0 /* e8 */
- ; CHECK-NEXT: $v0 = COPY [[PseudoVMCLR_M_B1_]]
+ ; CHECK-NEXT: [[PseudoVMCLR_M_B64_:%[0-9]+]]:vr = PseudoVMCLR_M_B64 [[COPY]], 0 /* e8 */
+ ; CHECK-NEXT: $v0 = COPY [[PseudoVMCLR_M_B64_]]
; CHECK-NEXT: PseudoRET implicit $v0
%0:gprb(s32) = COPY $x10
%1:vrb(<vscale x 1 x s1>) = G_VMCLR_VL %0(s32)
@@ -48,8 +48,8 @@ tracksRegLiveness: true
body: |
bb.1:
; CHECK-LABEL: name: nonzero_vl
- ; CHECK: [[PseudoVMCLR_M_B1_:%[0-9]+]]:vr = PseudoVMCLR_M_B1 1, 0 /* e8 */
- ; CHECK-NEXT: $v0 = COPY [[PseudoVMCLR_M_B1_]]
+ ; CHECK: [[PseudoVMCLR_M_B64_:%[0-9]+]]:vr = PseudoVMCLR_M_B64 1, 0 /* e8 */
+ ; CHECK-NEXT: $v0 = COPY [[PseudoVMCLR_M_B64_]]
; CHECK-NEXT: PseudoRET implicit $v0
%0:gprb(s32) = G_CONSTANT i32 1
%1:vrb(<vscale x 1 x s1>) = G_VMCLR_VL %0(s32)
@@ -65,8 +65,8 @@ tracksRegLiveness: true
body: |
bb.1:
; CHECK-LABEL: name: zero_vl
- ; CHECK: [[PseudoVMCLR_M_B1_:%[0-9]+]]:vr = PseudoVMCLR_M_B1 0, 0 /* e8 */
- ; CHECK-NEXT: $v0 = COPY [[PseudoVMCLR_M_B1_]]
+ ; CHECK: [[PseudoVMCLR_M_B64_:%[0-9]+]]:vr = PseudoVMCLR_M_B64 0, 0 /* e8 */
+ ; CHECK-NEXT: $v0 = COPY [[PseudoVMCLR_M_B64_]]
; CHECK-NEXT: PseudoRET implicit $v0
%0:gprb(s32) = G_CONSTANT i32 0
%1:vrb(<vscale x 1 x s1>) = G_VMCLR_VL %0(s32)
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/rvv/render-vlop-rv64.mir b/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/rvv/render-vlop-rv64.mir
index c2c0ed72be7b7c..de78ceb2f5e13c 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/rvv/render-vlop-rv64.mir
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/rvv/render-vlop-rv64.mir
@@ -11,8 +11,8 @@ body: |
bb.1:
; CHECK-LABEL: name: negative_vl
; CHECK: [[ADDI:%[0-9]+]]:gprnox0 = ADDI $x0, -2
- ; CHECK-NEXT: [[PseudoVMCLR_M_B1_:%[0-9]+]]:vr = PseudoVMCLR_M_B1 [[ADDI]], 0 /* e8 */
- ; CHECK-NEXT: $v0 = COPY [[PseudoVMCLR_M_B1_]]
+ ; CHECK-NEXT: [[PseudoVMCLR_M_B64_:%[0-9]+]]:vr = PseudoVMCLR_M_B64 [[ADDI]], 0 /* e8 */
+ ; CHECK-NEXT: $v0 = COPY [[PseudoVMCLR_M_B64_]]
; CHECK-NEXT: PseudoRET implicit $v0
%0:gprb(s64) = G_CONSTANT i64 -2
%1:vrb(<vscale x 1 x s1>) = G_VMCLR_VL %0(s64)
@@ -31,8 +31,8 @@ body: |
; CHECK: liveins: $x10
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x10
- ; CHECK-NEXT: [[PseudoVMCLR_M_B1_:%[0-9]+]]:vr = PseudoVMCLR_M_B1 [[COPY]], 0 /* e8 */
- ; CHECK-NEXT: $v0 = COPY [[PseudoVMCLR_M_B1_]]
+ ; CHECK-NEXT: [[PseudoVMCLR_M_B64_:%[0-9]+]]:vr = PseudoVMCLR_M_B64 [[COPY]], 0 /* e8 */
+ ; CHECK-NEXT: $v0 = COPY [[PseudoVMCLR_M_B64_]]
; CHECK-NEXT: PseudoRET implicit $v0
%0:gprb(s64) = COPY $x10
%1:vrb(<vscale x 1 x s1>) = G_VMCLR_VL %0(s64)
@@ -48,8 +48,8 @@ tracksRegLiveness: true
body: |
bb.1:
; CHECK-LABEL: name: nonzero_vl
- ; CHECK: [[PseudoVMCLR_M_B1_:%[0-9]+]]:vr = PseudoVMCLR_M_B1 1, 0 /* e8 */
- ; CHECK-NEXT: $v0 = COPY [[PseudoVMCLR_M_B1_]]
+ ; CHECK: [[PseudoVMCLR_M_B64_:%[0-9]+]]:vr = PseudoVMCLR_M_B64 1, 0 /* e8 */
+ ; CHECK-NEXT: $v0 = COPY [[PseudoVMCLR_M_B64_]]
; CHECK-NEXT: PseudoRET implicit $v0
%0:gprb(s64) = G_CONSTANT i64 1
%1:vrb(<vscale x 1 x s1>) = G_VMCLR_VL %0(s64)
@@ -65,8 +65,8 @@ tracksRegLiveness: true
body: |
bb.1:
; CHECK-LABEL: name: zero_vl
- ; CHECK: [[PseudoVMCLR_M_B1_:%[0-9]+]]:vr = PseudoVMCLR_M_B1 0, 0 /* e8 */
- ; CHECK-NEXT: $v0 = COPY [[PseudoVMCLR_M_B1_]]
+ ; CHECK: [[PseudoVMCLR_M_B64_:%[0-9]+]]:vr = PseudoVMCLR_M_B64 0, 0 /* e8 */
+ ; CHECK-NEXT: $v0 = COPY [[PseudoVMCLR_M_B64_]]
; CHECK-NEXT: PseudoRET implicit $v0
%0:gprb(s64) = G_CONSTANT i64 0
%1:vrb(<vscale x 1 x s1>) = G_VMCLR_VL %0(s64)
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/rvv/vmclr-rv32.mir b/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/rvv/vmclr-rv32.mir
index 1ef1312cc17c0e..ab91b3d80bd9bc 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/rvv/vmclr-rv32.mir
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/rvv/vmclr-rv32.mir
@@ -10,8 +10,8 @@ tracksRegLiveness: true
body: |
bb.1:
; CHECK-LABEL: name: splat_zero_nxv1i1
- ; CHECK: [[PseudoVMCLR_M_B1_:%[0-9]+]]:vr = PseudoVMCLR_M_B1 -1, 0 /* e8 */
- ; CHECK-NEXT: $v0 = COPY [[PseudoVMCLR_M_B1_]]
+ ; CHECK: [[PseudoVMCLR_M_B64_:%[0-9]+]]:vr = PseudoVMCLR_M_B64 -1, 0 /* e8 */
+ ; CHECK-NEXT: $v0 = COPY [[PseudoVMCLR_M_B64_]]
; CHECK-NEXT: PseudoRET implicit $v0
%0:gprb(s32) = G_CONSTANT i32 -1
%1:vrb(<vscale x 1 x s1>) = G_VMCLR_VL %0(s32)
@@ -27,8 +27,8 @@ tracksRegLiveness: true
body: |
bb.1:
; CHECK-LABEL: name: splat_zero_nxv2i1
- ; CHECK: [[PseudoVMCLR_M_B2_:%[0-9]+]]:vr = PseudoVMCLR_M_B2 -1, 0 /* e8 */
- ; CHECK-NEXT: $v0 = COPY [[PseudoVMCLR_M_B2_]]
+ ; CHECK: [[PseudoVMCLR_M_B32_:%[0-9]+]]:vr = PseudoVMCLR_M_B32 -1, 0 /* e8 */
+ ; CHECK-NEXT: $v0 = COPY [[PseudoVMCLR_M_B32_]]
; CHECK-NEXT: PseudoRET implicit $v0
%0:gprb(s32) = G_CONSTANT i32 -1
%1:vrb(<vscale x 2 x s1>) = G_VMCLR_VL %0(s32)
@@ -44,8 +44,8 @@ tracksRegLiveness: true
body: |
bb.1:
; CHECK-LABEL: name: splat_zero_nxv4i1
- ; CHECK: [[PseudoVMCLR_M_B4_:%[0-9]+]]:vr = PseudoVMCLR_M_B4 -1, 0 /* e8 */
- ; CHECK-NEXT: $v0 = COPY [[PseudoVMCLR_M_B4_]]
+ ; CHECK: [[PseudoVMCLR_M_B16_:%[0-9]+]]:vr = PseudoVMCLR_M_B16 -1, 0 /* e8 */
+ ; CHECK-NEXT: $v0 = COPY [[PseudoVMCLR_M_B16_]]
; CHECK-NEXT: PseudoRET implicit $v0
%0:gprb(s32) = G_CONSTANT i32 -1
%1:vrb(<vscale x 4 x s1>) = G_VMCLR_VL %0(s32)
@@ -78,8 +78,8 @@ tracksRegLiveness: true
body: |
bb.1:
; CHECK-LABEL: name: splat_zero_nxv16i1
- ; CHECK: [[PseudoVMCLR_M_B16_:%[0-9]+]]:vr = PseudoVMCLR_M_B16 -1, 0 /* e8 */
- ; CHECK-NEXT: $v0 = COPY [[PseudoVMCLR_M_B16_]]
+ ; CHECK: [[PseudoVMCLR_M_B4_:%[0-9]+]]:vr = PseudoVMCLR_M_B4 -1, 0 /* e8 */
+ ; CHECK-NEXT: $v0 = COPY [[PseudoVMCLR_M_B4_]]
; CHECK-NEXT: PseudoRET implicit $v0
%0:gprb(s32) = G_CONSTANT i32 -1
%1:vrb(<vscale x 16 x s1>) = G_VMCLR_VL %0(s32)
@@ -95,8 +95,8 @@ tracksRegLiveness: true
body: |
bb.1:
; CHECK-LABEL: name: splat_zero_nxv32i1
- ; CHECK: [[PseudoVMCLR_M_B32_:%[0-9]+]]:vr = PseudoVMCLR_M_B32 -1, 0 /* e8 */
- ; CHECK-NEXT: $v0 = COPY [[PseudoVMCLR_M_B32_]]
+ ; CHECK: [[PseudoVMCLR_M_B2_:%[0-9]+]]:vr = PseudoVMCLR_M_B2 -1, 0 /* e8 */
+ ; CHECK-NEXT: $v0 = COPY [[PseudoVMCLR_M_B2_]]
; CHECK-NEXT: PseudoRET implicit $v0
%0:gprb(s32) = ...
[truncated]
|
Replace LMUL suffixes with _B1, _B2, etc. This matches what we do for other mask only instructions like VCPOP_M, VFIRST_M, VMSBF_M, VLM, VSM, etc. Now all pseudoinstructions that use Log2SEW=0 will be consistently named.
df0998d
to
f403efd
Compare
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
LGTM.
LLVM Buildbot has detected a new failure on builder Full details are available at: https://lab.llvm.org/buildbot/#/builders/190/builds/11284 Here is the relevant piece of the build log for the reference
|
Replace LMUL suffixes with _B1, _B2, etc. This matches what we do
for other mask only instructions like VCPOP_M, VFIRST_M, VMSBF_M,
VLM, VSM, etc.
Now all pseudoinstructions that use Log2SEW=0 will be consistently
named.
Stacked on #119785