Skip to content

Commit 2c1c887

Browse files
authored
[RISCV] Make fixed-point instructions commutable (#90035)
This PR includes: * vsadd.vv/vsaddu.vv * vaadd.vv/vaaddu.vv * vsmul.vv
1 parent c705c68 commit 2c1c887

File tree

3 files changed

+194
-13
lines changed

3 files changed

+194
-13
lines changed

llvm/lib/Target/RISCV/RISCVInstrInfo.cpp

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -3132,6 +3132,11 @@ bool RISCVInstrInfo::findCommutedOpIndices(const MachineInstr &MI,
31323132
case CASE_RVV_OPCODE_WIDEN(VWMACC_VV):
31333133
case CASE_RVV_OPCODE_WIDEN(VWMACCU_VV):
31343134
case CASE_RVV_OPCODE_UNMASK(VADC_VVM):
3135+
case CASE_RVV_OPCODE(VSADD_VV):
3136+
case CASE_RVV_OPCODE(VSADDU_VV):
3137+
case CASE_RVV_OPCODE(VAADD_VV):
3138+
case CASE_RVV_OPCODE(VAADDU_VV):
3139+
case CASE_RVV_OPCODE(VSMUL_VV):
31353140
// Operands 2 and 3 are commutable.
31363141
return fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, 2, 3);
31373142
case CASE_VFMA_SPLATS(FMADD):

llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td

Lines changed: 16 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -2146,8 +2146,9 @@ multiclass VPseudoBinaryRoundingMode<VReg RetClass,
21462146
string Constraint = "",
21472147
int sew = 0,
21482148
int UsesVXRM = 1,
2149-
int TargetConstraintType = 1> {
2150-
let VLMul = MInfo.value, SEW=sew in {
2149+
int TargetConstraintType = 1,
2150+
bit Commutable = 0> {
2151+
let VLMul = MInfo.value, SEW=sew, isCommutable = Commutable in {
21512152
defvar suffix = !if(sew, "_" # MInfo.MX # "_E" # sew, "_" # MInfo.MX);
21522153
def suffix : VPseudoBinaryNoMaskRoundingMode<RetClass, Op1Class, Op2Class,
21532154
Constraint, UsesVXRM,
@@ -2232,8 +2233,9 @@ multiclass VPseudoBinaryV_VV<LMULInfo m, string Constraint = "", int sew = 0, bi
22322233
defm _VV : VPseudoBinary<m.vrclass, m.vrclass, m.vrclass, m, Constraint, sew, Commutable=Commutable>;
22332234
}
22342235

2235-
multiclass VPseudoBinaryV_VV_RM<LMULInfo m, string Constraint = ""> {
2236-
defm _VV : VPseudoBinaryRoundingMode<m.vrclass, m.vrclass, m.vrclass, m, Constraint>;
2236+
multiclass VPseudoBinaryV_VV_RM<LMULInfo m, string Constraint = "", bit Commutable = 0> {
2237+
defm _VV : VPseudoBinaryRoundingMode<m.vrclass, m.vrclass, m.vrclass, m, Constraint,
2238+
Commutable=Commutable>;
22372239
}
22382240

22392241
// Similar to VPseudoBinaryV_VV, but uses MxListF.
@@ -2715,10 +2717,11 @@ multiclass VPseudoVGTR_VV_VX_VI<Operand ImmType = simm5, string Constraint = "">
27152717
}
27162718
}
27172719

2718-
multiclass VPseudoVSALU_VV_VX_VI<Operand ImmType = simm5, string Constraint = ""> {
2720+
multiclass VPseudoVSALU_VV_VX_VI<Operand ImmType = simm5, string Constraint = "",
2721+
bit Commutable = 0> {
27192722
foreach m = MxList in {
27202723
defvar mx = m.MX;
2721-
defm "" : VPseudoBinaryV_VV<m, Constraint>,
2724+
defm "" : VPseudoBinaryV_VV<m, Constraint, Commutable=Commutable>,
27222725
SchedBinary<"WriteVSALUV", "ReadVSALUV", "ReadVSALUX", mx,
27232726
forceMergeOpRead=true>;
27242727
defm "" : VPseudoBinaryV_VX<m, Constraint>,
@@ -2788,7 +2791,7 @@ multiclass VPseudoVSALU_VV_VX {
27882791
multiclass VPseudoVSMUL_VV_VX_RM {
27892792
foreach m = MxList in {
27902793
defvar mx = m.MX;
2791-
defm "" : VPseudoBinaryV_VV_RM<m>,
2794+
defm "" : VPseudoBinaryV_VV_RM<m, Commutable=1>,
27922795
SchedBinary<"WriteVSMulV", "ReadVSMulV", "ReadVSMulV", mx,
27932796
forceMergeOpRead=true>;
27942797
defm "" : VPseudoBinaryV_VX_RM<m>,
@@ -2797,10 +2800,10 @@ multiclass VPseudoVSMUL_VV_VX_RM {
27972800
}
27982801
}
27992802

2800-
multiclass VPseudoVAALU_VV_VX_RM {
2803+
multiclass VPseudoVAALU_VV_VX_RM<bit Commutable = 0> {
28012804
foreach m = MxList in {
28022805
defvar mx = m.MX;
2803-
defm "" : VPseudoBinaryV_VV_RM<m>,
2806+
defm "" : VPseudoBinaryV_VV_RM<m, Commutable=Commutable>,
28042807
SchedBinary<"WriteVAALUV", "ReadVAALUV", "ReadVAALUV", mx,
28052808
forceMergeOpRead=true>;
28062809
defm "" : VPseudoBinaryV_VX_RM<m>,
@@ -6448,17 +6451,17 @@ defm PseudoVMV_V : VPseudoUnaryVMV_V_X_I;
64486451
// 12.1. Vector Single-Width Saturating Add and Subtract
64496452
//===----------------------------------------------------------------------===//
64506453
let Defs = [VXSAT], hasSideEffects = 1 in {
6451-
defm PseudoVSADDU : VPseudoVSALU_VV_VX_VI;
6452-
defm PseudoVSADD : VPseudoVSALU_VV_VX_VI;
6454+
defm PseudoVSADDU : VPseudoVSALU_VV_VX_VI<Commutable=1>;
6455+
defm PseudoVSADD : VPseudoVSALU_VV_VX_VI<Commutable=1>;
64536456
defm PseudoVSSUBU : VPseudoVSALU_VV_VX;
64546457
defm PseudoVSSUB : VPseudoVSALU_VV_VX;
64556458
}
64566459

64576460
//===----------------------------------------------------------------------===//
64586461
// 12.2. Vector Single-Width Averaging Add and Subtract
64596462
//===----------------------------------------------------------------------===//
6460-
defm PseudoVAADDU : VPseudoVAALU_VV_VX_RM;
6461-
defm PseudoVAADD : VPseudoVAALU_VV_VX_RM;
6463+
defm PseudoVAADDU : VPseudoVAALU_VV_VX_RM<Commutable=1>;
6464+
defm PseudoVAADD : VPseudoVAALU_VV_VX_RM<Commutable=1>;
64626465
defm PseudoVASUBU : VPseudoVAALU_VV_VX_RM;
64636466
defm PseudoVASUB : VPseudoVAALU_VV_VX_RM;
64646467

llvm/test/CodeGen/RISCV/rvv/commutable.ll

Lines changed: 173 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -649,3 +649,176 @@ entry:
649649
ret <vscale x 1 x i64> %ret
650650
}
651651

652+
; vsadd.vv
653+
declare <vscale x 1 x i64> @llvm.riscv.vsadd.nxv1i64.nxv1i64(<vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, iXLen);
654+
define <vscale x 1 x i64> @commutable_vsadd_vv(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2) nounwind {
655+
; CHECK-LABEL: commutable_vsadd_vv:
656+
; CHECK: # %bb.0: # %entry
657+
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
658+
; CHECK-NEXT: vsadd.vv v10, v8, v9
659+
; CHECK-NEXT: vsadd.vv v8, v9, v8
660+
; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma
661+
; CHECK-NEXT: vadd.vv v8, v10, v8
662+
; CHECK-NEXT: ret
663+
entry:
664+
%a = call <vscale x 1 x i64> @llvm.riscv.vsadd.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2)
665+
%b = call <vscale x 1 x i64> @llvm.riscv.vsadd.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %1, <vscale x 1 x i64> %0, iXLen %2)
666+
%ret = add <vscale x 1 x i64> %a, %b
667+
ret <vscale x 1 x i64> %ret
668+
}
669+
670+
declare <vscale x 1 x i64> @llvm.riscv.vsadd.mask.nxv1i64.nxv1i64(<vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i1>, iXLen, iXLen);
671+
define <vscale x 1 x i64> @commutable_vsadd_vv_masked(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %mask, iXLen %2) {
672+
; CHECK-LABEL: commutable_vsadd_vv_masked:
673+
; CHECK: # %bb.0:
674+
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
675+
; CHECK-NEXT: vsadd.vv v10, v8, v9, v0.t
676+
; CHECK-NEXT: vsadd.vv v8, v9, v8, v0.t
677+
; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma
678+
; CHECK-NEXT: vadd.vv v8, v10, v8
679+
; CHECK-NEXT: ret
680+
%a = call <vscale x 1 x i64> @llvm.riscv.vsadd.mask.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %mask, iXLen %2, iXLen 1)
681+
%b = call <vscale x 1 x i64> @llvm.riscv.vsadd.mask.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %1, <vscale x 1 x i64> %0, <vscale x 1 x i1> %mask, iXLen %2, iXLen 1)
682+
%ret = add <vscale x 1 x i64> %a, %b
683+
ret <vscale x 1 x i64> %ret
684+
}
685+
686+
; vsaddu.vv
687+
declare <vscale x 1 x i64> @llvm.riscv.vsaddu.nxv1i64.nxv1i64(<vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, iXLen);
688+
define <vscale x 1 x i64> @commutable_vsaddu_vv(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2) nounwind {
689+
; CHECK-LABEL: commutable_vsaddu_vv:
690+
; CHECK: # %bb.0: # %entry
691+
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
692+
; CHECK-NEXT: vsaddu.vv v10, v8, v9
693+
; CHECK-NEXT: vsaddu.vv v8, v9, v8
694+
; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma
695+
; CHECK-NEXT: vadd.vv v8, v10, v8
696+
; CHECK-NEXT: ret
697+
entry:
698+
%a = call <vscale x 1 x i64> @llvm.riscv.vsaddu.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2)
699+
%b = call <vscale x 1 x i64> @llvm.riscv.vsaddu.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %1, <vscale x 1 x i64> %0, iXLen %2)
700+
%ret = add <vscale x 1 x i64> %a, %b
701+
ret <vscale x 1 x i64> %ret
702+
}
703+
704+
declare <vscale x 1 x i64> @llvm.riscv.vsaddu.mask.nxv1i64.nxv1i64(<vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i1>, iXLen, iXLen);
705+
define <vscale x 1 x i64> @commutable_vsaddu_vv_masked(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %mask, iXLen %2) {
706+
; CHECK-LABEL: commutable_vsaddu_vv_masked:
707+
; CHECK: # %bb.0:
708+
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
709+
; CHECK-NEXT: vsaddu.vv v10, v8, v9, v0.t
710+
; CHECK-NEXT: vsaddu.vv v8, v9, v8, v0.t
711+
; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma
712+
; CHECK-NEXT: vadd.vv v8, v10, v8
713+
; CHECK-NEXT: ret
714+
%a = call <vscale x 1 x i64> @llvm.riscv.vsaddu.mask.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %mask, iXLen %2, iXLen 1)
715+
%b = call <vscale x 1 x i64> @llvm.riscv.vsaddu.mask.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %1, <vscale x 1 x i64> %0, <vscale x 1 x i1> %mask, iXLen %2, iXLen 1)
716+
%ret = add <vscale x 1 x i64> %a, %b
717+
ret <vscale x 1 x i64> %ret
718+
}
719+
720+
; vaadd.vv
721+
declare <vscale x 1 x i64> @llvm.riscv.vaadd.nxv1i64.nxv1i64(<vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, iXLen, iXLen);
722+
define <vscale x 1 x i64> @commutable_vaadd_vv(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2) nounwind {
723+
; CHECK-LABEL: commutable_vaadd_vv:
724+
; CHECK: # %bb.0: # %entry
725+
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
726+
; CHECK-NEXT: csrwi vxrm, 0
727+
; CHECK-NEXT: vaadd.vv v8, v8, v9
728+
; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma
729+
; CHECK-NEXT: vadd.vv v8, v8, v8
730+
; CHECK-NEXT: ret
731+
entry:
732+
%a = call <vscale x 1 x i64> @llvm.riscv.vaadd.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen 0, iXLen %2)
733+
%b = call <vscale x 1 x i64> @llvm.riscv.vaadd.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %1, <vscale x 1 x i64> %0, iXLen 0, iXLen %2)
734+
%ret = add <vscale x 1 x i64> %a, %b
735+
ret <vscale x 1 x i64> %ret
736+
}
737+
738+
declare <vscale x 1 x i64> @llvm.riscv.vaadd.mask.nxv1i64.nxv1i64(<vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i1>, iXLen, iXLen, iXLen);
739+
define <vscale x 1 x i64> @commutable_vaadd_vv_masked(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %mask, iXLen %2) {
740+
; CHECK-LABEL: commutable_vaadd_vv_masked:
741+
; CHECK: # %bb.0:
742+
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
743+
; CHECK-NEXT: csrwi vxrm, 0
744+
; CHECK-NEXT: vaadd.vv v10, v8, v9, v0.t
745+
; CHECK-NEXT: vaadd.vv v8, v8, v9, v0.t
746+
; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma
747+
; CHECK-NEXT: vadd.vv v8, v10, v8
748+
; CHECK-NEXT: ret
749+
%a = call <vscale x 1 x i64> @llvm.riscv.vaadd.mask.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %mask, iXLen 0, iXLen %2, iXLen 1)
750+
%b = call <vscale x 1 x i64> @llvm.riscv.vaadd.mask.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %1, <vscale x 1 x i64> %0, <vscale x 1 x i1> %mask, iXLen 0, iXLen %2, iXLen 1)
751+
%ret = add <vscale x 1 x i64> %a, %b
752+
ret <vscale x 1 x i64> %ret
753+
}
754+
755+
; vaaddu.vv
756+
declare <vscale x 1 x i64> @llvm.riscv.vaaddu.nxv1i64.nxv1i64(<vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, iXLen, iXLen);
757+
define <vscale x 1 x i64> @commutable_vaaddu_vv(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2) nounwind {
758+
; CHECK-LABEL: commutable_vaaddu_vv:
759+
; CHECK: # %bb.0: # %entry
760+
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
761+
; CHECK-NEXT: csrwi vxrm, 0
762+
; CHECK-NEXT: vaaddu.vv v8, v8, v9
763+
; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma
764+
; CHECK-NEXT: vadd.vv v8, v8, v8
765+
; CHECK-NEXT: ret
766+
entry:
767+
%a = call <vscale x 1 x i64> @llvm.riscv.vaaddu.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen 0, iXLen %2)
768+
%b = call <vscale x 1 x i64> @llvm.riscv.vaaddu.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %1, <vscale x 1 x i64> %0, iXLen 0, iXLen %2)
769+
%ret = add <vscale x 1 x i64> %a, %b
770+
ret <vscale x 1 x i64> %ret
771+
}
772+
773+
declare <vscale x 1 x i64> @llvm.riscv.vaaddu.mask.nxv1i64.nxv1i64(<vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i1>, iXLen, iXLen, iXLen);
774+
define <vscale x 1 x i64> @commutable_vaaddu_vv_masked(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %mask, iXLen %2) {
775+
; CHECK-LABEL: commutable_vaaddu_vv_masked:
776+
; CHECK: # %bb.0:
777+
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
778+
; CHECK-NEXT: csrwi vxrm, 0
779+
; CHECK-NEXT: vaaddu.vv v10, v8, v9, v0.t
780+
; CHECK-NEXT: vaaddu.vv v8, v8, v9, v0.t
781+
; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma
782+
; CHECK-NEXT: vadd.vv v8, v10, v8
783+
; CHECK-NEXT: ret
784+
%a = call <vscale x 1 x i64> @llvm.riscv.vaaddu.mask.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %mask, iXLen 0, iXLen %2, iXLen 1)
785+
%b = call <vscale x 1 x i64> @llvm.riscv.vaaddu.mask.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %1, <vscale x 1 x i64> %0, <vscale x 1 x i1> %mask, iXLen 0, iXLen %2, iXLen 1)
786+
%ret = add <vscale x 1 x i64> %a, %b
787+
ret <vscale x 1 x i64> %ret
788+
}
789+
790+
; vsmul.vv
791+
declare <vscale x 1 x i64> @llvm.riscv.vsmul.nxv1i64.nxv1i64(<vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, iXLen, iXLen);
792+
define <vscale x 1 x i64> @commutable_vsmul_vv(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2) nounwind {
793+
; CHECK-LABEL: commutable_vsmul_vv:
794+
; CHECK: # %bb.0: # %entry
795+
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
796+
; CHECK-NEXT: csrwi vxrm, 0
797+
; CHECK-NEXT: vsmul.vv v10, v8, v9
798+
; CHECK-NEXT: vsmul.vv v8, v9, v8
799+
; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma
800+
; CHECK-NEXT: vadd.vv v8, v10, v8
801+
; CHECK-NEXT: ret
802+
entry:
803+
%a = call <vscale x 1 x i64> @llvm.riscv.vsmul.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen 0, iXLen %2)
804+
%b = call <vscale x 1 x i64> @llvm.riscv.vsmul.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %1, <vscale x 1 x i64> %0, iXLen 0, iXLen %2)
805+
%ret = add <vscale x 1 x i64> %a, %b
806+
ret <vscale x 1 x i64> %ret
807+
}
808+
809+
declare <vscale x 1 x i64> @llvm.riscv.vsmul.mask.nxv1i64.nxv1i64(<vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i1>, iXLen, iXLen, iXLen);
810+
define <vscale x 1 x i64> @commutable_vsmul_vv_masked(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %mask, iXLen %2) {
811+
; CHECK-LABEL: commutable_vsmul_vv_masked:
812+
; CHECK: # %bb.0:
813+
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
814+
; CHECK-NEXT: csrwi vxrm, 0
815+
; CHECK-NEXT: vsmul.vv v10, v8, v9, v0.t
816+
; CHECK-NEXT: vsmul.vv v8, v9, v8, v0.t
817+
; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma
818+
; CHECK-NEXT: vadd.vv v8, v10, v8
819+
; CHECK-NEXT: ret
820+
%a = call <vscale x 1 x i64> @llvm.riscv.vsmul.mask.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %mask, iXLen 0, iXLen %2, iXLen 1)
821+
%b = call <vscale x 1 x i64> @llvm.riscv.vsmul.mask.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %1, <vscale x 1 x i64> %0, <vscale x 1 x i1> %mask, iXLen 0, iXLen %2, iXLen 1)
822+
%ret = add <vscale x 1 x i64> %a, %b
823+
ret <vscale x 1 x i64> %ret
824+
}

0 commit comments

Comments
 (0)