You signed in with another tab or window. Reload to refresh your session.You signed out in another tab or window. Reload to refresh your session.You switched accounts on another tab or window. Reload to refresh your session.Dismiss alert
Copy file name to clipboardExpand all lines: llvm/test/CodeGen/RISCV/rvv/commutable.ll
+173Lines changed: 173 additions & 0 deletions
Original file line number
Diff line number
Diff line change
@@ -649,3 +649,176 @@ entry:
649
649
ret <vscale x 1 x i64> %ret
650
650
}
651
651
652
+
; vsadd.vv
653
+
declare <vscale x 1 x i64> @llvm.riscv.vsadd.nxv1i64.nxv1i64(<vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, iXLen);
654
+
define <vscale x 1 x i64> @commutable_vsadd_vv(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2) nounwind {
655
+
; CHECK-LABEL: commutable_vsadd_vv:
656
+
; CHECK: # %bb.0: # %entry
657
+
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
658
+
; CHECK-NEXT: vsadd.vv v10, v8, v9
659
+
; CHECK-NEXT: vsadd.vv v8, v9, v8
660
+
; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma
661
+
; CHECK-NEXT: vadd.vv v8, v10, v8
662
+
; CHECK-NEXT: ret
663
+
entry:
664
+
%a = call <vscale x 1 x i64> @llvm.riscv.vsadd.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2)
665
+
%b = call <vscale x 1 x i64> @llvm.riscv.vsadd.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %1, <vscale x 1 x i64> %0, iXLen %2)
666
+
%ret = add <vscale x 1 x i64> %a, %b
667
+
ret <vscale x 1 x i64> %ret
668
+
}
669
+
670
+
declare <vscale x 1 x i64> @llvm.riscv.vsadd.mask.nxv1i64.nxv1i64(<vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i1>, iXLen, iXLen);
671
+
define <vscale x 1 x i64> @commutable_vsadd_vv_masked(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %mask, iXLen %2) {
672
+
; CHECK-LABEL: commutable_vsadd_vv_masked:
673
+
; CHECK: # %bb.0:
674
+
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
675
+
; CHECK-NEXT: vsadd.vv v10, v8, v9, v0.t
676
+
; CHECK-NEXT: vsadd.vv v8, v9, v8, v0.t
677
+
; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma
678
+
; CHECK-NEXT: vadd.vv v8, v10, v8
679
+
; CHECK-NEXT: ret
680
+
%a = call <vscale x 1 x i64> @llvm.riscv.vsadd.mask.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %mask, iXLen %2, iXLen 1)
681
+
%b = call <vscale x 1 x i64> @llvm.riscv.vsadd.mask.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %1, <vscale x 1 x i64> %0, <vscale x 1 x i1> %mask, iXLen %2, iXLen 1)
682
+
%ret = add <vscale x 1 x i64> %a, %b
683
+
ret <vscale x 1 x i64> %ret
684
+
}
685
+
686
+
; vsaddu.vv
687
+
declare <vscale x 1 x i64> @llvm.riscv.vsaddu.nxv1i64.nxv1i64(<vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, iXLen);
688
+
define <vscale x 1 x i64> @commutable_vsaddu_vv(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2) nounwind {
689
+
; CHECK-LABEL: commutable_vsaddu_vv:
690
+
; CHECK: # %bb.0: # %entry
691
+
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
692
+
; CHECK-NEXT: vsaddu.vv v10, v8, v9
693
+
; CHECK-NEXT: vsaddu.vv v8, v9, v8
694
+
; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma
695
+
; CHECK-NEXT: vadd.vv v8, v10, v8
696
+
; CHECK-NEXT: ret
697
+
entry:
698
+
%a = call <vscale x 1 x i64> @llvm.riscv.vsaddu.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2)
699
+
%b = call <vscale x 1 x i64> @llvm.riscv.vsaddu.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %1, <vscale x 1 x i64> %0, iXLen %2)
700
+
%ret = add <vscale x 1 x i64> %a, %b
701
+
ret <vscale x 1 x i64> %ret
702
+
}
703
+
704
+
declare <vscale x 1 x i64> @llvm.riscv.vsaddu.mask.nxv1i64.nxv1i64(<vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i1>, iXLen, iXLen);
705
+
define <vscale x 1 x i64> @commutable_vsaddu_vv_masked(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %mask, iXLen %2) {
706
+
; CHECK-LABEL: commutable_vsaddu_vv_masked:
707
+
; CHECK: # %bb.0:
708
+
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
709
+
; CHECK-NEXT: vsaddu.vv v10, v8, v9, v0.t
710
+
; CHECK-NEXT: vsaddu.vv v8, v9, v8, v0.t
711
+
; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma
712
+
; CHECK-NEXT: vadd.vv v8, v10, v8
713
+
; CHECK-NEXT: ret
714
+
%a = call <vscale x 1 x i64> @llvm.riscv.vsaddu.mask.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %mask, iXLen %2, iXLen 1)
715
+
%b = call <vscale x 1 x i64> @llvm.riscv.vsaddu.mask.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %1, <vscale x 1 x i64> %0, <vscale x 1 x i1> %mask, iXLen %2, iXLen 1)
716
+
%ret = add <vscale x 1 x i64> %a, %b
717
+
ret <vscale x 1 x i64> %ret
718
+
}
719
+
720
+
; vaadd.vv
721
+
declare <vscale x 1 x i64> @llvm.riscv.vaadd.nxv1i64.nxv1i64(<vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, iXLen, iXLen);
722
+
define <vscale x 1 x i64> @commutable_vaadd_vv(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2) nounwind {
723
+
; CHECK-LABEL: commutable_vaadd_vv:
724
+
; CHECK: # %bb.0: # %entry
725
+
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
726
+
; CHECK-NEXT: csrwi vxrm, 0
727
+
; CHECK-NEXT: vaadd.vv v8, v8, v9
728
+
; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma
729
+
; CHECK-NEXT: vadd.vv v8, v8, v8
730
+
; CHECK-NEXT: ret
731
+
entry:
732
+
%a = call <vscale x 1 x i64> @llvm.riscv.vaadd.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen 0, iXLen %2)
733
+
%b = call <vscale x 1 x i64> @llvm.riscv.vaadd.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %1, <vscale x 1 x i64> %0, iXLen 0, iXLen %2)
734
+
%ret = add <vscale x 1 x i64> %a, %b
735
+
ret <vscale x 1 x i64> %ret
736
+
}
737
+
738
+
declare <vscale x 1 x i64> @llvm.riscv.vaadd.mask.nxv1i64.nxv1i64(<vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i1>, iXLen, iXLen, iXLen);
739
+
define <vscale x 1 x i64> @commutable_vaadd_vv_masked(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %mask, iXLen %2) {
740
+
; CHECK-LABEL: commutable_vaadd_vv_masked:
741
+
; CHECK: # %bb.0:
742
+
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
743
+
; CHECK-NEXT: csrwi vxrm, 0
744
+
; CHECK-NEXT: vaadd.vv v10, v8, v9, v0.t
745
+
; CHECK-NEXT: vaadd.vv v8, v8, v9, v0.t
746
+
; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma
747
+
; CHECK-NEXT: vadd.vv v8, v10, v8
748
+
; CHECK-NEXT: ret
749
+
%a = call <vscale x 1 x i64> @llvm.riscv.vaadd.mask.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %mask, iXLen 0, iXLen %2, iXLen 1)
750
+
%b = call <vscale x 1 x i64> @llvm.riscv.vaadd.mask.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %1, <vscale x 1 x i64> %0, <vscale x 1 x i1> %mask, iXLen 0, iXLen %2, iXLen 1)
751
+
%ret = add <vscale x 1 x i64> %a, %b
752
+
ret <vscale x 1 x i64> %ret
753
+
}
754
+
755
+
; vaaddu.vv
756
+
declare <vscale x 1 x i64> @llvm.riscv.vaaddu.nxv1i64.nxv1i64(<vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, iXLen, iXLen);
757
+
define <vscale x 1 x i64> @commutable_vaaddu_vv(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2) nounwind {
758
+
; CHECK-LABEL: commutable_vaaddu_vv:
759
+
; CHECK: # %bb.0: # %entry
760
+
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
761
+
; CHECK-NEXT: csrwi vxrm, 0
762
+
; CHECK-NEXT: vaaddu.vv v8, v8, v9
763
+
; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma
764
+
; CHECK-NEXT: vadd.vv v8, v8, v8
765
+
; CHECK-NEXT: ret
766
+
entry:
767
+
%a = call <vscale x 1 x i64> @llvm.riscv.vaaddu.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen 0, iXLen %2)
768
+
%b = call <vscale x 1 x i64> @llvm.riscv.vaaddu.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %1, <vscale x 1 x i64> %0, iXLen 0, iXLen %2)
769
+
%ret = add <vscale x 1 x i64> %a, %b
770
+
ret <vscale x 1 x i64> %ret
771
+
}
772
+
773
+
declare <vscale x 1 x i64> @llvm.riscv.vaaddu.mask.nxv1i64.nxv1i64(<vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i1>, iXLen, iXLen, iXLen);
774
+
define <vscale x 1 x i64> @commutable_vaaddu_vv_masked(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %mask, iXLen %2) {
775
+
; CHECK-LABEL: commutable_vaaddu_vv_masked:
776
+
; CHECK: # %bb.0:
777
+
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
778
+
; CHECK-NEXT: csrwi vxrm, 0
779
+
; CHECK-NEXT: vaaddu.vv v10, v8, v9, v0.t
780
+
; CHECK-NEXT: vaaddu.vv v8, v8, v9, v0.t
781
+
; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma
782
+
; CHECK-NEXT: vadd.vv v8, v10, v8
783
+
; CHECK-NEXT: ret
784
+
%a = call <vscale x 1 x i64> @llvm.riscv.vaaddu.mask.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %mask, iXLen 0, iXLen %2, iXLen 1)
785
+
%b = call <vscale x 1 x i64> @llvm.riscv.vaaddu.mask.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %1, <vscale x 1 x i64> %0, <vscale x 1 x i1> %mask, iXLen 0, iXLen %2, iXLen 1)
786
+
%ret = add <vscale x 1 x i64> %a, %b
787
+
ret <vscale x 1 x i64> %ret
788
+
}
789
+
790
+
; vsmul.vv
791
+
declare <vscale x 1 x i64> @llvm.riscv.vsmul.nxv1i64.nxv1i64(<vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, iXLen, iXLen);
792
+
define <vscale x 1 x i64> @commutable_vsmul_vv(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2) nounwind {
793
+
; CHECK-LABEL: commutable_vsmul_vv:
794
+
; CHECK: # %bb.0: # %entry
795
+
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
796
+
; CHECK-NEXT: csrwi vxrm, 0
797
+
; CHECK-NEXT: vsmul.vv v10, v8, v9
798
+
; CHECK-NEXT: vsmul.vv v8, v9, v8
799
+
; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma
800
+
; CHECK-NEXT: vadd.vv v8, v10, v8
801
+
; CHECK-NEXT: ret
802
+
entry:
803
+
%a = call <vscale x 1 x i64> @llvm.riscv.vsmul.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen 0, iXLen %2)
804
+
%b = call <vscale x 1 x i64> @llvm.riscv.vsmul.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %1, <vscale x 1 x i64> %0, iXLen 0, iXLen %2)
805
+
%ret = add <vscale x 1 x i64> %a, %b
806
+
ret <vscale x 1 x i64> %ret
807
+
}
808
+
809
+
declare <vscale x 1 x i64> @llvm.riscv.vsmul.mask.nxv1i64.nxv1i64(<vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i1>, iXLen, iXLen, iXLen);
810
+
define <vscale x 1 x i64> @commutable_vsmul_vv_masked(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %mask, iXLen %2) {
811
+
; CHECK-LABEL: commutable_vsmul_vv_masked:
812
+
; CHECK: # %bb.0:
813
+
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
814
+
; CHECK-NEXT: csrwi vxrm, 0
815
+
; CHECK-NEXT: vsmul.vv v10, v8, v9, v0.t
816
+
; CHECK-NEXT: vsmul.vv v8, v9, v8, v0.t
817
+
; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma
818
+
; CHECK-NEXT: vadd.vv v8, v10, v8
819
+
; CHECK-NEXT: ret
820
+
%a = call <vscale x 1 x i64> @llvm.riscv.vsmul.mask.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %mask, iXLen 0, iXLen %2, iXLen 1)
821
+
%b = call <vscale x 1 x i64> @llvm.riscv.vsmul.mask.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %1, <vscale x 1 x i64> %0, <vscale x 1 x i1> %mask, iXLen 0, iXLen %2, iXLen 1)
0 commit comments