Skip to content

Commit a1c892b

Browse files
committed
[X86][SSE] lowerVECTOR_SHUFFLE - canonicalize with horizontal ops.
Before lowering shuffles, see if we can merge horizontal ops or canonicalize the shuffle mask to point to the same LHS/RHS of the HOps when an HOp's args are repeated.
1 parent dcf659e commit a1c892b

File tree

2 files changed

+66
-59
lines changed

2 files changed

+66
-59
lines changed

llvm/lib/Target/X86/X86ISelLowering.cpp

Lines changed: 21 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -18690,7 +18690,13 @@ static bool canonicalizeShuffleMaskWithCommute(ArrayRef<int> Mask) {
1869018690
return false;
1869118691
}
1869218692

18693-
/// Top-level lowering for x86 vector shuffles.
18693+
// Forward declaration.
18694+
static SDValue canonicalizeShuffleMaskWithHorizOp(
18695+
MutableArrayRef<SDValue> Ops, MutableArrayRef<int> Mask,
18696+
unsigned RootSizeInBits, const SDLoc &DL, SelectionDAG &DAG,
18697+
const X86Subtarget &Subtarget);
18698+
18699+
/// Top-level lowering for x86 vector shuffles.
1869418700
///
1869518701
/// This handles decomposition, canonicalization, and lowering of all x86
1869618702
/// vector shuffles. Most of the specific lowering strategies are encapsulated
@@ -18799,8 +18805,21 @@ static SDValue lowerVECTOR_SHUFFLE(SDValue Op, const X86Subtarget &Subtarget,
1879918805
}
1880018806
}
1880118807

18808+
SmallVector<SDValue> Ops = {V1, V2};
18809+
SmallVector<int> Mask(OrigMask.begin(), OrigMask.end());
18810+
18811+
// Canonicalize the shuffle with any horizontal ops inputs.
18812+
// NOTE: This may update Ops and Mask.
18813+
if (SDValue HOp = canonicalizeShuffleMaskWithHorizOp(
18814+
Ops, Mask, VT.getSizeInBits(), DL, DAG, Subtarget))
18815+
return DAG.getBitcast(VT, HOp);
18816+
18817+
V1 = DAG.getBitcast(VT, Ops[0]);
18818+
V2 = DAG.getBitcast(VT, Ops[1]);
18819+
assert(Mask.size() == NumElements && "canonicalizeShuffleMaskWithHorizOp "
18820+
"shouldn't alter the shuffle mask size");
18821+
1880218822
// Commute the shuffle if it will improve canonicalization.
18803-
SmallVector<int, 64> Mask(OrigMask.begin(), OrigMask.end());
1880418823
if (canonicalizeShuffleMaskWithCommute(Mask)) {
1880518824
ShuffleVectorSDNode::commuteMask(Mask);
1880618825
std::swap(V1, V2);

llvm/test/CodeGen/X86/horizontal-sum.ll

Lines changed: 45 additions & 57 deletions
Original file line numberDiff line numberDiff line change
@@ -105,8 +105,8 @@ define <4 x i32> @pair_sum_v4i32_v4i32(<4 x i32> %0, <4 x i32> %1, <4 x i32> %2,
105105
;
106106
; SSSE3-FAST-LABEL: pair_sum_v4i32_v4i32:
107107
; SSSE3-FAST: # %bb.0:
108-
; SSSE3-FAST-NEXT: phaddd %xmm3, %xmm2
109108
; SSSE3-FAST-NEXT: phaddd %xmm1, %xmm0
109+
; SSSE3-FAST-NEXT: phaddd %xmm3, %xmm2
110110
; SSSE3-FAST-NEXT: phaddd %xmm2, %xmm0
111111
; SSSE3-FAST-NEXT: retq
112112
;
@@ -126,12 +126,12 @@ define <4 x i32> @pair_sum_v4i32_v4i32(<4 x i32> %0, <4 x i32> %1, <4 x i32> %2,
126126
; AVX1-SLOW-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5],xmm1[6,7]
127127
; AVX1-SLOW-NEXT: retq
128128
;
129-
; AVX-FAST-LABEL: pair_sum_v4i32_v4i32:
130-
; AVX-FAST: # %bb.0:
131-
; AVX-FAST-NEXT: vphaddd %xmm3, %xmm2, %xmm2
132-
; AVX-FAST-NEXT: vphaddd %xmm1, %xmm0, %xmm0
133-
; AVX-FAST-NEXT: vphaddd %xmm2, %xmm0, %xmm0
134-
; AVX-FAST-NEXT: retq
129+
; AVX1-FAST-LABEL: pair_sum_v4i32_v4i32:
130+
; AVX1-FAST: # %bb.0:
131+
; AVX1-FAST-NEXT: vphaddd %xmm1, %xmm0, %xmm0
132+
; AVX1-FAST-NEXT: vphaddd %xmm3, %xmm2, %xmm1
133+
; AVX1-FAST-NEXT: vphaddd %xmm1, %xmm0, %xmm0
134+
; AVX1-FAST-NEXT: retq
135135
;
136136
; AVX2-SLOW-LABEL: pair_sum_v4i32_v4i32:
137137
; AVX2-SLOW: # %bb.0:
@@ -147,6 +147,13 @@ define <4 x i32> @pair_sum_v4i32_v4i32(<4 x i32> %0, <4 x i32> %1, <4 x i32> %2,
147147
; AVX2-SLOW-NEXT: vpaddd %xmm1, %xmm2, %xmm1
148148
; AVX2-SLOW-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0,1,2],xmm1[3]
149149
; AVX2-SLOW-NEXT: retq
150+
;
151+
; AVX2-FAST-LABEL: pair_sum_v4i32_v4i32:
152+
; AVX2-FAST: # %bb.0:
153+
; AVX2-FAST-NEXT: vphaddd %xmm3, %xmm2, %xmm2
154+
; AVX2-FAST-NEXT: vphaddd %xmm1, %xmm0, %xmm0
155+
; AVX2-FAST-NEXT: vphaddd %xmm2, %xmm0, %xmm0
156+
; AVX2-FAST-NEXT: retq
150157
%5 = shufflevector <4 x i32> %0, <4 x i32> poison, <2 x i32> <i32 0, i32 2>
151158
%6 = shufflevector <4 x i32> %0, <4 x i32> poison, <2 x i32> <i32 1, i32 3>
152159
%7 = add <2 x i32> %5, %6
@@ -451,20 +458,20 @@ define <8 x i32> @pair_sum_v8i32_v4i32(<4 x i32> %0, <4 x i32> %1, <4 x i32> %2,
451458
; AVX2-FAST: # %bb.0:
452459
; AVX2-FAST-NEXT: vphaddd %xmm1, %xmm0, %xmm0
453460
; AVX2-FAST-NEXT: vphaddd %xmm0, %xmm0, %xmm0
454-
; AVX2-FAST-NEXT: vphaddd %xmm4, %xmm4, %xmm1
455-
; AVX2-FAST-NEXT: vphaddd %xmm5, %xmm5, %xmm4
461+
; AVX2-FAST-NEXT: vphaddd %xmm0, %xmm0, %xmm1
462+
; AVX2-FAST-NEXT: vphaddd %xmm4, %xmm4, %xmm4
463+
; AVX2-FAST-NEXT: vphaddd %xmm5, %xmm5, %xmm5
456464
; AVX2-FAST-NEXT: vphaddd %xmm3, %xmm2, %xmm2
457-
; AVX2-FAST-NEXT: vshufps {{.*#+}} xmm3 = xmm2[0,2],xmm1[0,3]
458-
; AVX2-FAST-NEXT: vinsertps {{.*#+}} xmm3 = xmm3[0,1,2],xmm4[0]
459-
; AVX2-FAST-NEXT: vshufps {{.*#+}} xmm1 = xmm2[1,3],xmm1[1,3]
460-
; AVX2-FAST-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0,1,2],xmm4[3]
461-
; AVX2-FAST-NEXT: vpaddd %xmm1, %xmm3, %xmm1
462-
; AVX2-FAST-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
463-
; AVX2-FAST-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[2,3,2,3]
464-
; AVX2-FAST-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
465-
; AVX2-FAST-NEXT: vphaddd %xmm7, %xmm6, %xmm1
466-
; AVX2-FAST-NEXT: vphaddd %xmm0, %xmm0, %xmm2
467-
; AVX2-FAST-NEXT: vphaddd %xmm2, %xmm1, %xmm1
465+
; AVX2-FAST-NEXT: vshufps {{.*#+}} xmm3 = xmm2[0,2],xmm4[0,3]
466+
; AVX2-FAST-NEXT: vinsertps {{.*#+}} xmm3 = xmm3[0,1,2],xmm5[0]
467+
; AVX2-FAST-NEXT: vshufps {{.*#+}} xmm2 = xmm2[1,3],xmm4[1,3]
468+
; AVX2-FAST-NEXT: vblendps {{.*#+}} xmm2 = xmm2[0,1,2],xmm5[3]
469+
; AVX2-FAST-NEXT: vpaddd %xmm2, %xmm3, %xmm2
470+
; AVX2-FAST-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0]
471+
; AVX2-FAST-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[2,3,2,3]
472+
; AVX2-FAST-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm0
473+
; AVX2-FAST-NEXT: vphaddd %xmm7, %xmm6, %xmm2
474+
; AVX2-FAST-NEXT: vphaddd %xmm1, %xmm2, %xmm1
468475
; AVX2-FAST-NEXT: vpbroadcastq %xmm1, %ymm1
469476
; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
470477
; AVX2-FAST-NEXT: retq
@@ -993,8 +1000,8 @@ define <4 x float> @reduction_sum_v4f32_v4f32_reassoc(<4 x float> %0, <4 x float
9931000
; SSSE3-FAST-NEXT: movaps %xmm3, %xmm1
9941001
; SSSE3-FAST-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm3[1]
9951002
; SSSE3-FAST-NEXT: addps %xmm3, %xmm1
996-
; SSSE3-FAST-NEXT: haddps %xmm0, %xmm1
997-
; SSSE3-FAST-NEXT: shufps {{.*#+}} xmm4 = xmm4[0,2],xmm1[2,0]
1003+
; SSSE3-FAST-NEXT: haddps %xmm1, %xmm0
1004+
; SSSE3-FAST-NEXT: shufps {{.*#+}} xmm4 = xmm4[0,2],xmm0[0,2]
9981005
; SSSE3-FAST-NEXT: movaps %xmm4, %xmm0
9991006
; SSSE3-FAST-NEXT: retq
10001007
;
@@ -1028,8 +1035,8 @@ define <4 x float> @reduction_sum_v4f32_v4f32_reassoc(<4 x float> %0, <4 x float
10281035
; AVX-FAST-NEXT: vaddps %xmm1, %xmm2, %xmm1
10291036
; AVX-FAST-NEXT: vpermilpd {{.*#+}} xmm2 = xmm3[1,0]
10301037
; AVX-FAST-NEXT: vaddps %xmm2, %xmm3, %xmm2
1031-
; AVX-FAST-NEXT: vhaddps %xmm1, %xmm2, %xmm1
1032-
; AVX-FAST-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[2,0]
1038+
; AVX-FAST-NEXT: vhaddps %xmm2, %xmm1, %xmm1
1039+
; AVX-FAST-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
10331040
; AVX-FAST-NEXT: retq
10341041
%5 = call reassoc float @llvm.vector.reduce.fadd.f32.v4f32(float -0.0, <4 x float> %0)
10351042
%6 = call reassoc float @llvm.vector.reduce.fadd.f32.v4f32(float -0.0, <4 x float> %1)
@@ -1105,39 +1112,20 @@ define <4 x i32> @reduction_sum_v4i32_v4i32(<4 x i32> %0, <4 x i32> %1, <4 x i32
11051112
; AVX-SLOW-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0]
11061113
; AVX-SLOW-NEXT: retq
11071114
;
1108-
; AVX1-FAST-LABEL: reduction_sum_v4i32_v4i32:
1109-
; AVX1-FAST: # %bb.0:
1110-
; AVX1-FAST-NEXT: vpshufd {{.*#+}} xmm4 = xmm0[2,3,2,3]
1111-
; AVX1-FAST-NEXT: vpaddd %xmm4, %xmm0, %xmm0
1112-
; AVX1-FAST-NEXT: vpshufd {{.*#+}} xmm4 = xmm1[2,3,2,3]
1113-
; AVX1-FAST-NEXT: vpaddd %xmm4, %xmm1, %xmm1
1114-
; AVX1-FAST-NEXT: vphaddd %xmm1, %xmm0, %xmm0
1115-
; AVX1-FAST-NEXT: vpshufd {{.*#+}} xmm1 = xmm2[2,3,2,3]
1116-
; AVX1-FAST-NEXT: vpaddd %xmm1, %xmm2, %xmm1
1117-
; AVX1-FAST-NEXT: vpshufd {{.*#+}} xmm2 = xmm3[2,3,2,3]
1118-
; AVX1-FAST-NEXT: vpaddd %xmm2, %xmm3, %xmm2
1119-
; AVX1-FAST-NEXT: vphaddd %xmm2, %xmm1, %xmm1
1120-
; AVX1-FAST-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,2,0,2]
1121-
; AVX1-FAST-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
1122-
; AVX1-FAST-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7]
1123-
; AVX1-FAST-NEXT: retq
1124-
;
1125-
; AVX2-FAST-LABEL: reduction_sum_v4i32_v4i32:
1126-
; AVX2-FAST: # %bb.0:
1127-
; AVX2-FAST-NEXT: vpshufd {{.*#+}} xmm4 = xmm0[2,3,2,3]
1128-
; AVX2-FAST-NEXT: vpaddd %xmm4, %xmm0, %xmm0
1129-
; AVX2-FAST-NEXT: vpshufd {{.*#+}} xmm4 = xmm1[2,3,2,3]
1130-
; AVX2-FAST-NEXT: vpaddd %xmm4, %xmm1, %xmm1
1131-
; AVX2-FAST-NEXT: vphaddd %xmm1, %xmm0, %xmm0
1132-
; AVX2-FAST-NEXT: vpshufd {{.*#+}} xmm1 = xmm2[2,3,2,3]
1133-
; AVX2-FAST-NEXT: vpaddd %xmm1, %xmm2, %xmm1
1134-
; AVX2-FAST-NEXT: vpshufd {{.*#+}} xmm2 = xmm3[2,3,2,3]
1135-
; AVX2-FAST-NEXT: vpaddd %xmm2, %xmm3, %xmm2
1136-
; AVX2-FAST-NEXT: vphaddd %xmm2, %xmm1, %xmm1
1137-
; AVX2-FAST-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,2,0,2]
1138-
; AVX2-FAST-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
1139-
; AVX2-FAST-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3]
1140-
; AVX2-FAST-NEXT: retq
1115+
; AVX-FAST-LABEL: reduction_sum_v4i32_v4i32:
1116+
; AVX-FAST: # %bb.0:
1117+
; AVX-FAST-NEXT: vpshufd {{.*#+}} xmm4 = xmm0[2,3,2,3]
1118+
; AVX-FAST-NEXT: vpaddd %xmm4, %xmm0, %xmm0
1119+
; AVX-FAST-NEXT: vpshufd {{.*#+}} xmm4 = xmm1[2,3,2,3]
1120+
; AVX-FAST-NEXT: vpaddd %xmm4, %xmm1, %xmm1
1121+
; AVX-FAST-NEXT: vphaddd %xmm1, %xmm0, %xmm0
1122+
; AVX-FAST-NEXT: vpshufd {{.*#+}} xmm1 = xmm2[2,3,2,3]
1123+
; AVX-FAST-NEXT: vpaddd %xmm1, %xmm2, %xmm1
1124+
; AVX-FAST-NEXT: vpshufd {{.*#+}} xmm2 = xmm3[2,3,2,3]
1125+
; AVX-FAST-NEXT: vpaddd %xmm2, %xmm3, %xmm2
1126+
; AVX-FAST-NEXT: vphaddd %xmm2, %xmm1, %xmm1
1127+
; AVX-FAST-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
1128+
; AVX-FAST-NEXT: retq
11411129
%5 = call i32 @llvm.vector.reduce.add.i32.v4i32(<4 x i32> %0)
11421130
%6 = call i32 @llvm.vector.reduce.add.i32.v4i32(<4 x i32> %1)
11431131
%7 = call i32 @llvm.vector.reduce.add.i32.v4i32(<4 x i32> %2)

0 commit comments

Comments
 (0)