Skip to content

Commit 900bea9

Browse files
[LLVM][test] Convert remaining instances of ConstantExpr based splats to use splat().
This is mostly NFC but some output does change due to consistently inserting into poison rather than undef and using i64 as the index type for inserts.
1 parent 6a17929 commit 900bea9

File tree

5 files changed

+132
-132
lines changed

5 files changed

+132
-132
lines changed

Diff for: llvm/test/Analysis/CostModel/AArch64/ext-rhadd.ll

+14-14
Original file line numberDiff line numberDiff line change
@@ -37,9 +37,9 @@ define void @srhadd_i8_sext_i16_scalable(ptr %a, ptr %b, ptr %dst) {
3737
%ld2 = load <vscale x 16 x i8>, ptr %b
3838
%ext1 = sext <vscale x 16 x i8> %ld1 to <vscale x 16 x i16>
3939
%ext2 = sext <vscale x 16 x i8> %ld2 to <vscale x 16 x i16>
40-
%add1 = add nuw nsw <vscale x 16 x i16> %ext1, shufflevector (<vscale x 16 x i16> insertelement (<vscale x 16 x i16> poison, i16 1, i64 0), <vscale x 16 x i16> poison, <vscale x 16 x i32> zeroinitializer)
40+
%add1 = add nuw nsw <vscale x 16 x i16> %ext1, splat (i16 1)
4141
%add2 = add nuw nsw <vscale x 16 x i16> %add1, %ext2
42-
%shr = lshr <vscale x 16 x i16> %add2, shufflevector (<vscale x 16 x i16> insertelement (<vscale x 16 x i16> poison, i16 1, i64 0), <vscale x 16 x i16> poison, <vscale x 16 x i32> zeroinitializer)
42+
%shr = lshr <vscale x 16 x i16> %add2, splat (i16 1)
4343
%trunc = trunc <vscale x 16 x i16> %shr to <vscale x 16 x i8>
4444
store <vscale x 16 x i8> %trunc, ptr %a
4545
ret void
@@ -58,9 +58,9 @@ define void @srhadd_i16_sext_i64_scalable(ptr %a, ptr %b, ptr %dst) {
5858
%ld2 = load <vscale x 8 x i16>, ptr %b
5959
%ext1 = sext <vscale x 8 x i16> %ld1 to <vscale x 8 x i64>
6060
%ext2 = sext <vscale x 8 x i16> %ld2 to <vscale x 8 x i64>
61-
%add1 = add nuw nsw <vscale x 8 x i64> %ext1, shufflevector (<vscale x 8 x i64> insertelement (<vscale x 8 x i64> poison, i64 1, i64 0), <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer)
61+
%add1 = add nuw nsw <vscale x 8 x i64> %ext1, splat (i64 1)
6262
%add2 = add nuw nsw <vscale x 8 x i64> %add1, %ext2
63-
%shr = lshr <vscale x 8 x i64> %add2, shufflevector (<vscale x 8 x i64> insertelement (<vscale x 8 x i64> poison, i64 1, i64 0), <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer)
63+
%shr = lshr <vscale x 8 x i64> %add2, splat (i64 1)
6464
%trunc = trunc <vscale x 8 x i64> %shr to <vscale x 8 x i16>
6565
store <vscale x 8 x i16> %trunc, ptr %a
6666
ret void
@@ -102,9 +102,9 @@ define void @urhadd_i8_zext_i64(ptr %a, ptr %b, ptr %dst) {
102102
%ld2 = load <vscale x 16 x i8>, ptr %b
103103
%ext1 = zext <vscale x 16 x i8> %ld1 to <vscale x 16 x i64>
104104
%ext2 = zext <vscale x 16 x i8> %ld2 to <vscale x 16 x i64>
105-
%add1 = add nuw nsw <vscale x 16 x i64> %ext1, shufflevector (<vscale x 16 x i64> insertelement (<vscale x 16 x i64> poison, i64 1, i64 0), <vscale x 16 x i64> poison, <vscale x 16 x i32> zeroinitializer)
105+
%add1 = add nuw nsw <vscale x 16 x i64> %ext1, splat (i64 1)
106106
%add2 = add nuw nsw <vscale x 16 x i64> %add1, %ext2
107-
%shr = lshr <vscale x 16 x i64> %add2, shufflevector (<vscale x 16 x i64> insertelement (<vscale x 16 x i64> poison, i64 1, i64 0), <vscale x 16 x i64> poison, <vscale x 16 x i32> zeroinitializer)
107+
%shr = lshr <vscale x 16 x i64> %add2, splat (i64 1)
108108
%trunc = trunc <vscale x 16 x i64> %shr to <vscale x 16 x i8>
109109
store <vscale x 16 x i8> %trunc, ptr %a
110110
ret void
@@ -123,9 +123,9 @@ define void @urhadd_i16_zext_i32(ptr %a, ptr %b, ptr %dst) {
123123
%ld2 = load <vscale x 8 x i16>, ptr %b
124124
%ext1 = zext <vscale x 8 x i16> %ld1 to <vscale x 8 x i32>
125125
%ext2 = zext <vscale x 8 x i16> %ld2 to <vscale x 8 x i32>
126-
%add1 = add nuw nsw <vscale x 8 x i32> %ext1, shufflevector (<vscale x 8 x i32> insertelement (<vscale x 8 x i32> poison, i32 1, i64 0), <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer)
126+
%add1 = add nuw nsw <vscale x 8 x i32> %ext1, splat (i32 1)
127127
%add2 = add nuw nsw <vscale x 8 x i32> %add1, %ext2
128-
%shr = lshr <vscale x 8 x i32> %add2, shufflevector (<vscale x 8 x i32> insertelement (<vscale x 8 x i32> poison, i32 1, i64 0), <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer)
128+
%shr = lshr <vscale x 8 x i32> %add2, splat (i32 1)
129129
%trunc = trunc <vscale x 8 x i32> %shr to <vscale x 8 x i16>
130130
store <vscale x 8 x i16> %trunc, ptr %a
131131
ret void
@@ -146,9 +146,9 @@ define void @ext_operand_mismatch(ptr %a, ptr %b, ptr %dst) {
146146
%ld2 = load <vscale x 16 x i8>, ptr %b
147147
%ext1 = sext <vscale x 16 x i8> %ld1 to <vscale x 16 x i16>
148148
%ext2 = zext <vscale x 16 x i8> %ld2 to <vscale x 16 x i16>
149-
%add1 = add nuw nsw <vscale x 16 x i16> %ext1, shufflevector (<vscale x 16 x i16> insertelement (<vscale x 16 x i16> poison, i16 1, i64 0), <vscale x 16 x i16> poison, <vscale x 16 x i32> zeroinitializer)
149+
%add1 = add nuw nsw <vscale x 16 x i16> %ext1, splat (i16 1)
150150
%add2 = add nuw nsw <vscale x 16 x i16> %add1, %ext2
151-
%shr = lshr <vscale x 16 x i16> %add2, shufflevector (<vscale x 16 x i16> insertelement (<vscale x 16 x i16> poison, i16 1, i64 0), <vscale x 16 x i16> poison, <vscale x 16 x i32> zeroinitializer)
151+
%shr = lshr <vscale x 16 x i16> %add2, splat (i16 1)
152152
%trunc = trunc <vscale x 16 x i16> %shr to <vscale x 16 x i8>
153153
store <vscale x 16 x i8> %trunc, ptr %a
154154
ret void
@@ -167,9 +167,9 @@ define void @add_multiple_uses(ptr %a, ptr %b, ptr %dst) {
167167
%ld2 = load <vscale x 8 x i16>, ptr %b
168168
%ext1 = sext <vscale x 8 x i16> %ld1 to <vscale x 8 x i32>
169169
%ext2 = sext <vscale x 8 x i16> %ld2 to <vscale x 8 x i32>
170-
%add1 = add nuw nsw <vscale x 8 x i32> %ext1, shufflevector (<vscale x 8 x i32> insertelement (<vscale x 8 x i32> poison, i32 1, i64 0), <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer)
170+
%add1 = add nuw nsw <vscale x 8 x i32> %ext1, splat (i32 1)
171171
%add2 = add nuw nsw <vscale x 8 x i32> %add1, %ext2
172-
%shr = lshr <vscale x 8 x i32> %add2, shufflevector (<vscale x 8 x i32> insertelement (<vscale x 8 x i32> poison, i32 1, i64 0), <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer)
172+
%shr = lshr <vscale x 8 x i32> %add2, splat (i32 1)
173173
%trunc = trunc <vscale x 8 x i32> %shr to <vscale x 8 x i16>
174174
%add.res = add nuw nsw <vscale x 8 x i32> %add1, %add2
175175
%res = trunc <vscale x 8 x i32> %add.res to <vscale x 8 x i16>
@@ -190,9 +190,9 @@ define void @shift_multiple_uses(ptr %a, ptr %b, ptr %dst) {
190190
%ld2 = load <vscale x 16 x i8>, ptr %b
191191
%ext1 = zext <vscale x 16 x i8> %ld1 to <vscale x 16 x i16>
192192
%ext2 = zext <vscale x 16 x i8> %ld2 to <vscale x 16 x i16>
193-
%add1 = add nuw nsw <vscale x 16 x i16> %ext1, shufflevector (<vscale x 16 x i16> insertelement (<vscale x 16 x i16> poison, i16 1, i64 0), <vscale x 16 x i16> poison, <vscale x 16 x i32> zeroinitializer)
193+
%add1 = add nuw nsw <vscale x 16 x i16> %ext1, splat (i16 1)
194194
%add2 = add nuw nsw <vscale x 16 x i16> %add1, %ext2
195-
%shr = lshr <vscale x 16 x i16> %add2, shufflevector (<vscale x 16 x i16> insertelement (<vscale x 16 x i16> poison, i16 1, i64 0), <vscale x 16 x i16> poison, <vscale x 16 x i32> zeroinitializer)
195+
%shr = lshr <vscale x 16 x i16> %add2, splat (i16 1)
196196
%trunc = trunc <vscale x 16 x i16> %shr to <vscale x 16 x i8>
197197
%add3 = add nuw nsw <vscale x 16 x i16> %shr, %add2
198198
%res = trunc <vscale x 16 x i16> %add3 to <vscale x 16 x i8>

Diff for: llvm/test/CodeGen/Generic/expand-vp-load-store.ll

+4-4
Original file line numberDiff line numberDiff line change
@@ -127,7 +127,7 @@ define <vscale x 1 x i64> @vpload_nxv1i64_allones_mask(ptr %ptr, i32 zeroext %ev
127127
; CHECK-NEXT: [[TMP3:%.*]] = call <vscale x 1 x i64> @llvm.masked.load.nxv1i64.p0(ptr [[PTR:%.*]], i32 1, <vscale x 1 x i1> [[TMP2]], <vscale x 1 x i64> poison)
128128
; CHECK-NEXT: ret <vscale x 1 x i64> [[TMP3]]
129129
;
130-
%load = call <vscale x 1 x i64> @llvm.vp.load.nxv1i64.p0(ptr %ptr, <vscale x 1 x i1> shufflevector (<vscale x 1 x i1> insertelement (<vscale x 1 x i1> poison, i1 true, i64 0), <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer), i32 %evl)
130+
%load = call <vscale x 1 x i64> @llvm.vp.load.nxv1i64.p0(ptr %ptr, <vscale x 1 x i1> splat (i1 true), i32 %evl)
131131
ret <vscale x 1 x i64> %load
132132
}
133133

@@ -140,7 +140,7 @@ define <vscale x 1 x i64> @vpload_nxv1i64_allones_mask_vscale(ptr %ptr) {
140140
;
141141
%vscale = call i32 @llvm.vscale.i32()
142142
%vlmax = mul nuw i32 %vscale, 1
143-
%load = call <vscale x 1 x i64> @llvm.vp.load.nxv1i64.p0(ptr %ptr, <vscale x 1 x i1> shufflevector (<vscale x 1 x i1> insertelement (<vscale x 1 x i1> poison, i1 true, i64 0), <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer), i32 %vlmax)
143+
%load = call <vscale x 1 x i64> @llvm.vp.load.nxv1i64.p0(ptr %ptr, <vscale x 1 x i1> splat (i1 true), i32 %vlmax)
144144
ret <vscale x 1 x i64> %load
145145
}
146146

@@ -179,7 +179,7 @@ define void @vpstore_nxv1i64_allones_mask(<vscale x 1 x i64> %val, ptr %ptr, i32
179179
; CHECK-NEXT: call void @llvm.masked.store.nxv1i64.p0(<vscale x 1 x i64> [[VAL:%.*]], ptr [[PTR:%.*]], i32 1, <vscale x 1 x i1> [[TMP2]])
180180
; CHECK-NEXT: ret void
181181
;
182-
call void @llvm.vp.store.nxv1i64.p0(<vscale x 1 x i64> %val, ptr %ptr, <vscale x 1 x i1> shufflevector (<vscale x 1 x i1> insertelement (<vscale x 1 x i1> poison, i1 true, i64 0), <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer), i32 %evl)
182+
call void @llvm.vp.store.nxv1i64.p0(<vscale x 1 x i64> %val, ptr %ptr, <vscale x 1 x i1> splat (i1 true), i32 %evl)
183183
ret void
184184
}
185185

@@ -192,7 +192,7 @@ define void @vpstore_nxv1i64_allones_mask_vscale(<vscale x 1 x i64> %val, ptr %p
192192
;
193193
%vscale = call i32 @llvm.vscale.i32()
194194
%vlmax = mul nuw i32 %vscale, 1
195-
call void @llvm.vp.store.nxv1i64.p0(<vscale x 1 x i64> %val, ptr %ptr, <vscale x 1 x i1> shufflevector (<vscale x 1 x i1> insertelement (<vscale x 1 x i1> poison, i1 true, i64 0), <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer), i32 %vlmax)
195+
call void @llvm.vp.store.nxv1i64.p0(<vscale x 1 x i64> %val, ptr %ptr, <vscale x 1 x i1> splat (i1 true), i32 %vlmax)
196196
ret void
197197
}
198198

Diff for: llvm/test/Transforms/AggressiveInstCombine/vector-or-load.ll

+2-2
Original file line numberDiff line numberDiff line change
@@ -35,7 +35,7 @@ define <vscale x 8 x i16> @or-load-scalable-vector(ptr %p1) {
3535
; CHECK-NEXT: [[L2:%.*]] = load <vscale x 8 x i8>, ptr [[P2]], align 1
3636
; CHECK-NEXT: [[E1:%.*]] = zext <vscale x 8 x i8> [[L1]] to <vscale x 8 x i16>
3737
; CHECK-NEXT: [[E2:%.*]] = zext <vscale x 8 x i8> [[L2]] to <vscale x 8 x i16>
38-
; CHECK-NEXT: [[S2:%.*]] = shl <vscale x 8 x i16> [[E2]], shufflevector (<vscale x 8 x i16> insertelement (<vscale x 8 x i16> poison, i16 8, i32 0), <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer)
38+
; CHECK-NEXT: [[S2:%.*]] = shl <vscale x 8 x i16> [[E2]], shufflevector (<vscale x 8 x i16> insertelement (<vscale x 8 x i16> poison, i16 8, i64 0), <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer)
3939
; CHECK-NEXT: [[OR:%.*]] = or <vscale x 8 x i16> [[E1]], [[S2]]
4040
; CHECK-NEXT: ret <vscale x 8 x i16> [[OR]]
4141
;
@@ -44,7 +44,7 @@ define <vscale x 8 x i16> @or-load-scalable-vector(ptr %p1) {
4444
%l2 = load <vscale x 8 x i8>, ptr %p2, align 1
4545
%e1 = zext <vscale x 8 x i8> %l1 to <vscale x 8 x i16>
4646
%e2 = zext <vscale x 8 x i8> %l2 to <vscale x 8 x i16>
47-
%s2 = shl <vscale x 8 x i16> %e2, shufflevector (<vscale x 8 x i16> insertelement (<vscale x 8 x i16> poison, i16 8, i32 0), <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer)
47+
%s2 = shl <vscale x 8 x i16> %e2, splat (i16 8)
4848
%or = or <vscale x 8 x i16> %e1, %s2
4949
ret <vscale x 8 x i16> %or
5050
}

Diff for: llvm/test/Transforms/MemCpyOpt/vscale-crashes.ll

+2-2
Original file line numberDiff line numberDiff line change
@@ -71,15 +71,15 @@ define void @callslotoptzn(<vscale x 4 x float> %val, ptr %out) {
7171
; CHECK-NEXT: [[ALLOC:%.*]] = alloca <vscale x 4 x float>, align 16
7272
; CHECK-NEXT: [[IDX:%.*]] = tail call <vscale x 4 x i32> @llvm.experimental.stepvector.nxv4i32()
7373
; CHECK-NEXT: [[STRIDE:%.*]] = getelementptr inbounds float, ptr [[ALLOC]], <vscale x 4 x i32> [[IDX]]
74-
; CHECK-NEXT: call void @llvm.masked.scatter.nxv4f32.nxv4p0(<vscale x 4 x float> [[VAL:%.*]], <vscale x 4 x ptr> [[STRIDE]], i32 4, <vscale x 4 x i1> shufflevector (<vscale x 4 x i1> insertelement (<vscale x 4 x i1> poison, i1 true, i32 0), <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer))
74+
; CHECK-NEXT: call void @llvm.masked.scatter.nxv4f32.nxv4p0(<vscale x 4 x float> [[VAL:%.*]], <vscale x 4 x ptr> [[STRIDE]], i32 4, <vscale x 4 x i1> shufflevector (<vscale x 4 x i1> insertelement (<vscale x 4 x i1> poison, i1 true, i64 0), <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer))
7575
; CHECK-NEXT: [[LI:%.*]] = load <vscale x 4 x float>, ptr [[ALLOC]], align 4
7676
; CHECK-NEXT: store <vscale x 4 x float> [[LI]], ptr [[OUT:%.*]], align 4
7777
; CHECK-NEXT: ret void
7878
;
7979
%alloc = alloca <vscale x 4 x float>, align 16
8080
%idx = tail call <vscale x 4 x i32> @llvm.experimental.stepvector.nxv4i32()
8181
%stride = getelementptr inbounds float, ptr %alloc, <vscale x 4 x i32> %idx
82-
call void @llvm.masked.scatter.nxv4f32.nxv4p0(<vscale x 4 x float> %val, <vscale x 4 x ptr> %stride, i32 4, <vscale x 4 x i1> shufflevector (<vscale x 4 x i1> insertelement (<vscale x 4 x i1> poison, i1 true, i32 0), <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer))
82+
call void @llvm.masked.scatter.nxv4f32.nxv4p0(<vscale x 4 x float> %val, <vscale x 4 x ptr> %stride, i32 4, <vscale x 4 x i1> splat (i1 true))
8383
%li = load <vscale x 4 x float>, ptr %alloc, align 4
8484
store <vscale x 4 x float> %li, ptr %out, align 4
8585
ret void

0 commit comments

Comments
 (0)