Skip to content

Commit dbb65dd

Browse files
[LLVM][tests/CodeGen/RISCV] Convert instances of ConstantExpr based splats to use splat().
This is mostly NFC but some output does change due to consistently inserting into poison rather than undef and using i64 as the index type for inserts.
1 parent d6ff986 commit dbb65dd

34 files changed

+269
-269
lines changed

llvm/test/CodeGen/RISCV/rvv/ctpop-sdnode.ll

+8-8
Original file line numberDiff line numberDiff line change
@@ -606,7 +606,7 @@ define <vscale x 16 x i1> @ctpop_nxv16i32_ult_two(<vscale x 16 x i32> %va) {
606606
; CHECK-ZVBB-NEXT: vmsleu.vi v0, v8, 1
607607
; CHECK-ZVBB-NEXT: ret
608608
%a = call <vscale x 16 x i32> @llvm.ctpop.nxv16i32(<vscale x 16 x i32> %va)
609-
%cmp = icmp ult <vscale x 16 x i32> %a, shufflevector (<vscale x 16 x i32> insertelement (<vscale x 16 x i32> poison, i32 2, i64 0), <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer)
609+
%cmp = icmp ult <vscale x 16 x i32> %a, splat (i32 2)
610610
ret <vscale x 16 x i1> %cmp
611611
}
612612

@@ -626,7 +626,7 @@ define <vscale x 16 x i1> @ctpop_nxv16i32_ugt_one(<vscale x 16 x i32> %va) {
626626
; CHECK-ZVBB-NEXT: vmsgtu.vi v0, v8, 1
627627
; CHECK-ZVBB-NEXT: ret
628628
%a = call <vscale x 16 x i32> @llvm.ctpop.nxv16i32(<vscale x 16 x i32> %va)
629-
%cmp = icmp ugt <vscale x 16 x i32> %a, shufflevector (<vscale x 16 x i32> insertelement (<vscale x 16 x i32> poison, i32 1, i64 0), <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer)
629+
%cmp = icmp ugt <vscale x 16 x i32> %a, splat (i32 1)
630630
ret <vscale x 16 x i1> %cmp
631631
}
632632

@@ -646,7 +646,7 @@ define <vscale x 16 x i1> @ctpop_nxv16i32_eq_one(<vscale x 16 x i32> %va) {
646646
; CHECK-ZVBB-NEXT: vmseq.vi v0, v8, 1
647647
; CHECK-ZVBB-NEXT: ret
648648
%a = call <vscale x 16 x i32> @llvm.ctpop.nxv16i32(<vscale x 16 x i32> %va)
649-
%cmp = icmp eq <vscale x 16 x i32> %a, shufflevector (<vscale x 16 x i32> insertelement (<vscale x 16 x i32> poison, i32 1, i64 0), <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer)
649+
%cmp = icmp eq <vscale x 16 x i32> %a, splat (i32 1)
650650
ret <vscale x 16 x i1> %cmp
651651
}
652652

@@ -666,7 +666,7 @@ define <vscale x 16 x i1> @ctpop_nxv16i32_ne_one(<vscale x 16 x i32> %va) {
666666
; CHECK-ZVBB-NEXT: vmsne.vi v0, v8, 1
667667
; CHECK-ZVBB-NEXT: ret
668668
%a = call <vscale x 16 x i32> @llvm.ctpop.nxv16i32(<vscale x 16 x i32> %va)
669-
%cmp = icmp ne <vscale x 16 x i32> %a, shufflevector (<vscale x 16 x i32> insertelement (<vscale x 16 x i32> poison, i32 1, i64 0), <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer)
669+
%cmp = icmp ne <vscale x 16 x i32> %a, splat (i32 1)
670670
ret <vscale x 16 x i1> %cmp
671671
}
672672

@@ -1020,7 +1020,7 @@ define <vscale x 8 x i1> @ctpop_nxv8i64_ult_two(<vscale x 8 x i64> %va) {
10201020
; CHECK-ZVBB-NEXT: vmsleu.vi v0, v8, 1
10211021
; CHECK-ZVBB-NEXT: ret
10221022
%a = call <vscale x 8 x i64> @llvm.ctpop.nxv8i64(<vscale x 8 x i64> %va)
1023-
%cmp = icmp ult <vscale x 8 x i64> %a, shufflevector (<vscale x 8 x i64> insertelement (<vscale x 8 x i64> poison, i64 2, i64 0), <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer)
1023+
%cmp = icmp ult <vscale x 8 x i64> %a, splat (i64 2)
10241024
ret <vscale x 8 x i1> %cmp
10251025
}
10261026

@@ -1040,7 +1040,7 @@ define <vscale x 8 x i1> @ctpop_nxv8i64_ugt_one(<vscale x 8 x i64> %va) {
10401040
; CHECK-ZVBB-NEXT: vmsgtu.vi v0, v8, 1
10411041
; CHECK-ZVBB-NEXT: ret
10421042
%a = call <vscale x 8 x i64> @llvm.ctpop.nxv8i64(<vscale x 8 x i64> %va)
1043-
%cmp = icmp ugt <vscale x 8 x i64> %a, shufflevector (<vscale x 8 x i64> insertelement (<vscale x 8 x i64> poison, i64 1, i64 0), <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer)
1043+
%cmp = icmp ugt <vscale x 8 x i64> %a, splat (i64 1)
10441044
ret <vscale x 8 x i1> %cmp
10451045
}
10461046

@@ -1060,7 +1060,7 @@ define <vscale x 8 x i1> @ctpop_nxv8i64_eq_one(<vscale x 8 x i64> %va) {
10601060
; CHECK-ZVBB-NEXT: vmseq.vi v0, v8, 1
10611061
; CHECK-ZVBB-NEXT: ret
10621062
%a = call <vscale x 8 x i64> @llvm.ctpop.nxv8i64(<vscale x 8 x i64> %va)
1063-
%cmp = icmp eq <vscale x 8 x i64> %a, shufflevector (<vscale x 8 x i64> insertelement (<vscale x 8 x i64> poison, i64 1, i64 0), <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer)
1063+
%cmp = icmp eq <vscale x 8 x i64> %a, splat (i64 1)
10641064
ret <vscale x 8 x i1> %cmp
10651065
}
10661066

@@ -1080,7 +1080,7 @@ define <vscale x 8 x i1> @ctpop_nxv8i64_ne_one(<vscale x 8 x i64> %va) {
10801080
; CHECK-ZVBB-NEXT: vmsne.vi v0, v8, 1
10811081
; CHECK-ZVBB-NEXT: ret
10821082
%a = call <vscale x 8 x i64> @llvm.ctpop.nxv8i64(<vscale x 8 x i64> %va)
1083-
%cmp = icmp ne <vscale x 8 x i64> %a, shufflevector (<vscale x 8 x i64> insertelement (<vscale x 8 x i64> poison, i64 1, i64 0), <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer)
1083+
%cmp = icmp ne <vscale x 8 x i64> %a, splat (i64 1)
10841084
ret <vscale x 8 x i1> %cmp
10851085
}
10861086

llvm/test/CodeGen/RISCV/rvv/mscatter-combine.ll

+3-3
Original file line numberDiff line numberDiff line change
@@ -58,7 +58,7 @@ define void @strided_store_zero_start(i64 %n, ptr %p) {
5858
; RV64-NEXT: ret
5959
%step = tail call <vscale x 1 x i64> @llvm.experimental.stepvector.nxv1i64()
6060
%gep = getelementptr inbounds %struct, ptr %p, <vscale x 1 x i64> %step, i32 6
61-
tail call void @llvm.masked.scatter.nxv1i64.nxv1p0(<vscale x 1 x i64> zeroinitializer, <vscale x 1 x ptr> %gep, i32 8, <vscale x 1 x i1> shufflevector (<vscale x 1 x i1> insertelement (<vscale x 1 x i1> poison, i1 true, i32 0), <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer))
61+
tail call void @llvm.masked.scatter.nxv1i64.nxv1p0(<vscale x 1 x i64> zeroinitializer, <vscale x 1 x ptr> %gep, i32 8, <vscale x 1 x i1> splat (i1 true))
6262
ret void
6363
}
6464

@@ -93,7 +93,7 @@ define void @strided_store_offset_start(i64 %n, ptr %p) {
9393
%.splat = shufflevector <vscale x 1 x i64> %.splatinsert, <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer
9494
%add = add <vscale x 1 x i64> %step, %.splat
9595
%gep = getelementptr inbounds %struct, ptr %p, <vscale x 1 x i64> %add, i32 6
96-
tail call void @llvm.masked.scatter.nxv1i64.nxv1p0(<vscale x 1 x i64> zeroinitializer, <vscale x 1 x ptr> %gep, i32 8, <vscale x 1 x i1> shufflevector (<vscale x 1 x i1> insertelement (<vscale x 1 x i1> poison, i1 true, i32 0), <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer))
96+
tail call void @llvm.masked.scatter.nxv1i64.nxv1p0(<vscale x 1 x i64> zeroinitializer, <vscale x 1 x ptr> %gep, i32 8, <vscale x 1 x i1> splat (i1 true))
9797
ret void
9898
}
9999

@@ -118,7 +118,7 @@ define void @stride_one_store(i64 %n, ptr %p) {
118118
; RV64-NEXT: ret
119119
%step = tail call <vscale x 1 x i64> @llvm.experimental.stepvector.nxv1i64()
120120
%gep = getelementptr inbounds i64, ptr %p, <vscale x 1 x i64> %step
121-
tail call void @llvm.masked.scatter.nxv1i64.nxv1p0(<vscale x 1 x i64> zeroinitializer, <vscale x 1 x ptr> %gep, i32 8, <vscale x 1 x i1> shufflevector (<vscale x 1 x i1> insertelement (<vscale x 1 x i1> poison, i1 true, i32 0), <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer))
121+
tail call void @llvm.masked.scatter.nxv1i64.nxv1p0(<vscale x 1 x i64> zeroinitializer, <vscale x 1 x ptr> %gep, i32 8, <vscale x 1 x i1> splat (i1 true))
122122
ret void
123123
}
124124

llvm/test/CodeGen/RISCV/rvv/pr61561.ll

+5-5
Original file line numberDiff line numberDiff line change
@@ -23,11 +23,11 @@ define <vscale x 4 x i8> @foo(ptr %p) {
2323
; CHECK-NEXT: ret
2424
%i13 = load <vscale x 4 x i16>, ptr %p, align 2
2525
%i14 = zext <vscale x 4 x i16> %i13 to <vscale x 4 x i32>
26-
%i15 = shl nuw nsw <vscale x 4 x i32> %i14, shufflevector (<vscale x 4 x i32> insertelement (<vscale x 4 x i32> poison, i32 3, i64 0), <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer)
27-
%i16 = and <vscale x 4 x i32> %i15, shufflevector (<vscale x 4 x i32> insertelement (<vscale x 4 x i32> poison, i32 248, i64 0), <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer)
28-
%i17 = mul nuw nsw <vscale x 4 x i32> %i16, shufflevector (<vscale x 4 x i32> insertelement (<vscale x 4 x i32> poison, i32 3735, i64 0), <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer)
29-
%i18 = add nuw nsw <vscale x 4 x i32> %i17, shufflevector (<vscale x 4 x i32> insertelement (<vscale x 4 x i32> poison, i32 16384, i64 0), <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer)
30-
%i21 = lshr <vscale x 4 x i32> %i18, shufflevector (<vscale x 4 x i32> insertelement (<vscale x 4 x i32> poison, i32 15, i64 0), <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer)
26+
%i15 = shl nuw nsw <vscale x 4 x i32> %i14, splat (i32 3)
27+
%i16 = and <vscale x 4 x i32> %i15, splat (i32 248)
28+
%i17 = mul nuw nsw <vscale x 4 x i32> %i16, splat (i32 3735)
29+
%i18 = add nuw nsw <vscale x 4 x i32> %i17, splat (i32 16384)
30+
%i21 = lshr <vscale x 4 x i32> %i18, splat (i32 15)
3131
%i22 = trunc <vscale x 4 x i32> %i21 to <vscale x 4 x i8>
3232
ret <vscale x 4 x i8> %i22
3333
}

llvm/test/CodeGen/RISCV/rvv/pr63459.ll

+1-1
Original file line numberDiff line numberDiff line change
@@ -14,7 +14,7 @@ define void @snork(ptr %arg, <vscale x 2 x i64> %arg1) {
1414
; CHECK-NEXT: ret
1515
bb:
1616
%getelementptr = getelementptr inbounds <vscale x 2 x i32>, ptr %arg, <vscale x 2 x i64> %arg1
17-
tail call void @llvm.vp.scatter.nxv2i32.nxv2p0(<vscale x 2 x i32> shufflevector (<vscale x 2 x i32> insertelement (<vscale x 2 x i32> poison, i32 1, i32 0), <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer), <vscale x 2 x ptr> align 4 %getelementptr, <vscale x 2 x i1> shufflevector (<vscale x 2 x i1> insertelement (<vscale x 2 x i1> poison, i1 true, i64 0), <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer), i32 4)
17+
tail call void @llvm.vp.scatter.nxv2i32.nxv2p0(<vscale x 2 x i32> splat (i32 1), <vscale x 2 x ptr> align 4 %getelementptr, <vscale x 2 x i1> splat (i1 true), i32 4)
1818
ret void
1919
}
2020

llvm/test/CodeGen/RISCV/rvv/stepvector.ll

+5-5
Original file line numberDiff line numberDiff line change
@@ -743,7 +743,7 @@ define <vscale x 2 x i64> @hi_bits_known_zero() vscale_range(2, 4) {
743743
; CHECK-NEXT: vmv.v.i v8, 0
744744
; CHECK-NEXT: ret
745745
%step = call <vscale x 2 x i64> @llvm.experimental.stepvector.nxv2i64()
746-
%and = and <vscale x 2 x i64> %step, shufflevector(<vscale x 2 x i64> insertelement(<vscale x 2 x i64> poison, i64 u0xfffffffffffffff8, i32 0), <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer)
746+
%and = and <vscale x 2 x i64> %step, splat (i64 u0xfffffffffffffff8)
747747
ret <vscale x 2 x i64> %and
748748
}
749749

@@ -758,8 +758,8 @@ define <vscale x 2 x i64> @hi_bits_known_zero_overflow() vscale_range(2, 4) {
758758
; CHECK-NEXT: vand.vi v8, v8, -8
759759
; CHECK-NEXT: ret
760760
%step = call <vscale x 2 x i64> @llvm.experimental.stepvector.nxv2i64()
761-
%step.mul = mul <vscale x 2 x i64> %step, shufflevector(<vscale x 2 x i64> insertelement(<vscale x 2 x i64> poison, i64 u0xffffffffffffffff, i32 0), <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer)
762-
%and = and <vscale x 2 x i64> %step.mul, shufflevector(<vscale x 2 x i64> insertelement(<vscale x 2 x i64> poison, i64 u0xfffffffffffffff8, i32 0), <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer)
761+
%step.mul = mul <vscale x 2 x i64> %step, splat (i64 u0xffffffffffffffff)
762+
%and = and <vscale x 2 x i64> %step.mul, splat (i64 u0xfffffffffffffff8)
763763
ret <vscale x 2 x i64> %and
764764
}
765765

@@ -771,7 +771,7 @@ define <vscale x 2 x i64> @lo_bits_known_zero() {
771771
; CHECK-NEXT: vmv.v.i v8, 0
772772
; CHECK-NEXT: ret
773773
%step = call <vscale x 2 x i64> @llvm.experimental.stepvector.nxv2i64()
774-
%step.mul = mul <vscale x 2 x i64> %step, shufflevector(<vscale x 2 x i64> insertelement(<vscale x 2 x i64> poison, i64 8, i32 0), <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer)
775-
%and = and <vscale x 2 x i64> %step.mul, shufflevector(<vscale x 2 x i64> insertelement(<vscale x 2 x i64> poison, i64 7, i32 0), <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer)
774+
%step.mul = mul <vscale x 2 x i64> %step, splat (i64 8)
775+
%and = and <vscale x 2 x i64> %step.mul, splat (i64 7)
776776
ret <vscale x 2 x i64> %and
777777
}

0 commit comments

Comments
 (0)