Skip to content

Commit bb0078e

Browse files
committed
[X86][SSE] Fold SIGN_EXTEND(SIGN_EXTEND_VECTOR_INREG(X)) -> SIGN_EXTEND_VECTOR_INREG(X)
It should be possible to make this generic, but we're not great at checking legality of *_EXTEND_VECTOR_INREG ops so I'm conservatively putting this inside X86ISelLowering.cpp
1 parent 7903ae4 commit bb0078e

File tree

2 files changed

+11
-11
lines changed

2 files changed

+11
-11
lines changed

llvm/lib/Target/X86/X86ISelLowering.cpp

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -46769,10 +46769,14 @@ static SDValue combineSext(SDNode *N, SelectionDAG &DAG,
4676946769
if (SDValue V = combineToExtendBoolVectorInReg(N, DAG, DCI, Subtarget))
4677046770
return V;
4677146771

46772-
if (VT.isVector())
46772+
if (VT.isVector()) {
4677346773
if (SDValue R = PromoteMaskArithmetic(N, DAG, Subtarget))
4677446774
return R;
4677546775

46776+
if (N0.getOpcode() == ISD::SIGN_EXTEND_VECTOR_INREG)
46777+
return DAG.getNode(N0.getOpcode(), DL, VT, N0.getOperand(0));
46778+
}
46779+
4677646780
if (SDValue NewAdd = promoteExtBeforeAdd(N, DAG, Subtarget))
4677746781
return NewAdd;
4677846782

llvm/test/CodeGen/X86/masked_load.ll

Lines changed: 6 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -462,9 +462,8 @@ define <8 x double> @load_v8f64_v8i16(<8 x i16> %trigger, <8 x double>* %addr, <
462462
; AVX1-NEXT: vpxor %xmm4, %xmm4, %xmm4
463463
; AVX1-NEXT: vpcmpeqw %xmm4, %xmm3, %xmm3
464464
; AVX1-NEXT: vpmovsxwq %xmm3, %xmm5
465-
; AVX1-NEXT: vpmovsxwd %xmm3, %xmm3
466-
; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm3[2,3,2,3]
467-
; AVX1-NEXT: vpmovsxdq %xmm3, %xmm3
465+
; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm3[1,1,1,1]
466+
; AVX1-NEXT: vpmovsxwq %xmm3, %xmm3
468467
; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm5, %ymm3
469468
; AVX1-NEXT: vpcmpeqw %xmm4, %xmm0, %xmm0
470469
; AVX1-NEXT: vpmovsxwq %xmm0, %xmm4
@@ -482,8 +481,7 @@ define <8 x double> @load_v8f64_v8i16(<8 x i16> %trigger, <8 x double>* %addr, <
482481
; AVX2-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[2,3,2,3]
483482
; AVX2-NEXT: vpxor %xmm4, %xmm4, %xmm4
484483
; AVX2-NEXT: vpcmpeqw %xmm4, %xmm3, %xmm3
485-
; AVX2-NEXT: vpmovsxwd %xmm3, %xmm3
486-
; AVX2-NEXT: vpmovsxdq %xmm3, %ymm3
484+
; AVX2-NEXT: vpmovsxwq %xmm3, %ymm3
487485
; AVX2-NEXT: vpcmpeqw %xmm4, %xmm0, %xmm0
488486
; AVX2-NEXT: vpmovsxwq %xmm0, %ymm0
489487
; AVX2-NEXT: vmaskmovpd (%rdi), %ymm0, %ymm4
@@ -1782,9 +1780,8 @@ define <8 x i64> @load_v8i64_v8i16(<8 x i16> %trigger, <8 x i64>* %addr, <8 x i6
17821780
; AVX1-NEXT: vpxor %xmm4, %xmm4, %xmm4
17831781
; AVX1-NEXT: vpcmpeqw %xmm4, %xmm3, %xmm3
17841782
; AVX1-NEXT: vpmovsxwq %xmm3, %xmm5
1785-
; AVX1-NEXT: vpmovsxwd %xmm3, %xmm3
1786-
; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm3[2,3,2,3]
1787-
; AVX1-NEXT: vpmovsxdq %xmm3, %xmm3
1783+
; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm3[1,1,1,1]
1784+
; AVX1-NEXT: vpmovsxwq %xmm3, %xmm3
17881785
; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm5, %ymm3
17891786
; AVX1-NEXT: vpcmpeqw %xmm4, %xmm0, %xmm0
17901787
; AVX1-NEXT: vpmovsxwq %xmm0, %xmm4
@@ -1802,8 +1799,7 @@ define <8 x i64> @load_v8i64_v8i16(<8 x i16> %trigger, <8 x i64>* %addr, <8 x i6
18021799
; AVX2-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[2,3,2,3]
18031800
; AVX2-NEXT: vpxor %xmm4, %xmm4, %xmm4
18041801
; AVX2-NEXT: vpcmpeqw %xmm4, %xmm3, %xmm3
1805-
; AVX2-NEXT: vpmovsxwd %xmm3, %xmm3
1806-
; AVX2-NEXT: vpmovsxdq %xmm3, %ymm3
1802+
; AVX2-NEXT: vpmovsxwq %xmm3, %ymm3
18071803
; AVX2-NEXT: vpcmpeqw %xmm4, %xmm0, %xmm0
18081804
; AVX2-NEXT: vpmovsxwq %xmm0, %ymm0
18091805
; AVX2-NEXT: vpmaskmovq (%rdi), %ymm0, %ymm4

0 commit comments

Comments
 (0)