|
| 1 | +; Check that if option prefer-no-gather/scatter can disable gather/scatter instructions. |
| 2 | +; RUN: llc -mattr=+avx2,+fast-gather %s -o - | FileCheck %s --check-prefixes=GATHER |
| 3 | +; RUN: llc -mattr=+avx2,+fast-gather,+prefer-no-gather %s -o - | FileCheck %s --check-prefixes=NO-GATHER |
| 4 | +; RUN: llc -mtriple=x86_64-unknown-linux-gnu -mattr=+avx512vl,+avx512dq < %s | FileCheck %s --check-prefix=SCATTER |
| 5 | +; RUN: llc -mtriple=x86_64-unknown-linux-gnu -mattr=+avx512vl,+avx512dq,+prefer-no-gather < %s | FileCheck %s --check-prefix=SCATTER-NO-GATHER |
| 6 | +; RUN: llc -mtriple=x86_64-unknown-linux-gnu -mattr=+avx512vl,+avx512dq,+prefer-no-scatter < %s | FileCheck %s --check-prefix=GATHER-NO-SCATTER |
| 7 | +; RUN: llc -mtriple=x86_64-unknown-linux-gnu -mattr=+avx512vl,+avx512dq,+prefer-no-gather,+prefer-no-scatter < %s | FileCheck %s --check-prefix=NO-SCATTER-GATHER |
| 8 | + |
| 9 | +@A = global [1024 x i8] zeroinitializer, align 128 |
| 10 | +@B = global [1024 x i64] zeroinitializer, align 128 |
| 11 | +@C = global [1024 x i64] zeroinitializer, align 128 |
| 12 | + |
| 13 | +; This tests the function that if prefer-no-gather can disable lowerMGather |
| 14 | +define void @test() #0 { |
| 15 | +; GATHER-LABEL: test: |
| 16 | +; GATHER: vpgatherdq |
| 17 | +; |
| 18 | +; NO-GATHER-LABEL: test: |
| 19 | +; NO-GATHER-NOT: vpgatherdq |
| 20 | +; |
| 21 | +; GATHER-NO-SCATTER-LABEL: test: |
| 22 | +; GATHER-NO-SCATTER: vpgatherdq |
| 23 | +; |
| 24 | +; NO-SCATTER-GATHER-LABEL: test: |
| 25 | +; NO-SCATTER-GATHER-NOT: vpgatherdq |
| 26 | +iter.check: |
| 27 | + br i1 false, label %vec.epilog.scalar.ph, label %vector.main.loop.iter.check |
| 28 | + |
| 29 | +vector.main.loop.iter.check: ; preds = %iter.check |
| 30 | + br i1 false, label %vec.epilog.ph, label %vector.ph |
| 31 | + |
| 32 | +vector.ph: ; preds = %vector.main.loop.iter.check |
| 33 | + br label %vector.body |
| 34 | + |
| 35 | +vector.body: ; preds = %vector.body, %vector.ph |
| 36 | + %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ] |
| 37 | + %0 = add i64 %index, 0 |
| 38 | + %1 = getelementptr inbounds [1024 x i8], ptr @A, i64 0, i64 %0 |
| 39 | + %2 = getelementptr inbounds i8, ptr %1, i32 0 |
| 40 | + %wide.load = load <32 x i8>, ptr %2, align 1 |
| 41 | + %3 = sext <32 x i8> %wide.load to <32 x i64> |
| 42 | + %4 = getelementptr inbounds [1024 x i64], ptr @B, i64 0, <32 x i64> %3 |
| 43 | + %wide.masked.gather = call <32 x i64> @llvm.masked.gather.v32i64.v32p0(<32 x ptr> %4, i32 8, <32 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <32 x i64> poison) |
| 44 | + %5 = getelementptr inbounds [1024 x i64], ptr @C, i64 0, i64 %0 |
| 45 | + %6 = getelementptr inbounds i64, ptr %5, i32 0 |
| 46 | + store <32 x i64> %wide.masked.gather, ptr %6, align 8 |
| 47 | + %index.next = add nuw i64 %index, 32 |
| 48 | + %7 = icmp eq i64 %index.next, 1024 |
| 49 | + br i1 %7, label %middle.block, label %vector.body, !llvm.loop !0 |
| 50 | + |
| 51 | +middle.block: ; preds = %vector.body |
| 52 | + %cmp.n = icmp eq i64 1024, 1024 |
| 53 | + br i1 %cmp.n, label %for.cond.cleanup, label %vec.epilog.iter.check |
| 54 | + |
| 55 | +vec.epilog.iter.check: ; preds = %middle.block |
| 56 | + br i1 true, label %vec.epilog.scalar.ph, label %vec.epilog.ph |
| 57 | + |
| 58 | +vec.epilog.ph: ; preds = %vector.main.loop.iter.check, %vec.epilog.iter.check |
| 59 | + %vec.epilog.resume.val = phi i64 [ 1024, %vec.epilog.iter.check ], [ 0, %vector.main.loop.iter.check ] |
| 60 | + br label %vec.epilog.vector.body |
| 61 | + |
| 62 | +vec.epilog.vector.body: ; preds = %vec.epilog.vector.body, %vec.epilog.ph |
| 63 | + %index2 = phi i64 [ %vec.epilog.resume.val, %vec.epilog.ph ], [ %index.next5, %vec.epilog.vector.body ] |
| 64 | + %8 = add i64 %index2, 0 |
| 65 | + %9 = getelementptr inbounds [1024 x i8], ptr @A, i64 0, i64 %8 |
| 66 | + %10 = getelementptr inbounds i8, ptr %9, i32 0 |
| 67 | + %wide.load3 = load <16 x i8>, ptr %10, align 1 |
| 68 | + %11 = sext <16 x i8> %wide.load3 to <16 x i64> |
| 69 | + %12 = getelementptr inbounds [1024 x i64], ptr @B, i64 0, <16 x i64> %11 |
| 70 | + %wide.masked.gather4 = call <16 x i64> @llvm.masked.gather.v16i64.v16p0(<16 x ptr> %12, i32 8, <16 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <16 x i64> poison) |
| 71 | + %13 = getelementptr inbounds [1024 x i64], ptr @C, i64 0, i64 %8 |
| 72 | + %14 = getelementptr inbounds i64, ptr %13, i32 0 |
| 73 | + store <16 x i64> %wide.masked.gather4, ptr %14, align 8 |
| 74 | + %index.next5 = add nuw i64 %index2, 16 |
| 75 | + %15 = icmp eq i64 %index.next5, 1024 |
| 76 | + br i1 %15, label %vec.epilog.middle.block, label %vec.epilog.vector.body, !llvm.loop !2 |
| 77 | + |
| 78 | +vec.epilog.middle.block: ; preds = %vec.epilog.vector.body |
| 79 | + %cmp.n1 = icmp eq i64 1024, 1024 |
| 80 | + br i1 %cmp.n1, label %for.cond.cleanup, label %vec.epilog.scalar.ph |
| 81 | + |
| 82 | +vec.epilog.scalar.ph: ; preds = %iter.check, %vec.epilog.iter.check, %vec.epilog.middle.block |
| 83 | + %bc.resume.val = phi i64 [ 1024, %vec.epilog.middle.block ], [ 1024, %vec.epilog.iter.check ], [ 0, %iter.check ] |
| 84 | + br label %for.body |
| 85 | + |
| 86 | +for.body: ; preds = %for.body, %vec.epilog.scalar.ph |
| 87 | + %iv = phi i64 [ %bc.resume.val, %vec.epilog.scalar.ph ], [ %iv.next, %for.body ] |
| 88 | + %inA = getelementptr inbounds [1024 x i8], ptr @A, i64 0, i64 %iv |
| 89 | + %valA = load i8, ptr %inA, align 1 |
| 90 | + %valA.ext = sext i8 %valA to i64 |
| 91 | + %inB = getelementptr inbounds [1024 x i64], ptr @B, i64 0, i64 %valA.ext |
| 92 | + %valB = load i64, ptr %inB, align 8 |
| 93 | + %out = getelementptr inbounds [1024 x i64], ptr @C, i64 0, i64 %iv |
| 94 | + store i64 %valB, ptr %out, align 8 |
| 95 | + %iv.next = add nuw nsw i64 %iv, 1 |
| 96 | + %cmp = icmp ult i64 %iv.next, 1024 |
| 97 | + br i1 %cmp, label %for.body, label %for.cond.cleanup, !llvm.loop !4 |
| 98 | + |
| 99 | +for.cond.cleanup: ; preds = %vec.epilog.middle.block, %middle.block, %for.body |
| 100 | + ret void |
| 101 | +} |
| 102 | + |
| 103 | +declare <32 x i64> @llvm.masked.gather.v32i64.v32p0(<32 x ptr>, i32 immarg, <32 x i1>, <32 x i64>) #1 |
| 104 | + |
| 105 | +declare <16 x i64> @llvm.masked.gather.v16i64.v16p0(<16 x ptr>, i32 immarg, <16 x i1>, <16 x i64>) #1 |
| 106 | +!0 = distinct !{!0, !1} |
| 107 | +!1 = !{!"llvm.loop.isvectorized", i32 1} |
| 108 | +!2 = distinct !{!2, !1, !3} |
| 109 | +!3 = !{!"llvm.loop.unroll.runtime.disable"} |
| 110 | +!4 = distinct !{!4, !3, !1} |
| 111 | + |
| 112 | +; This tests the function that if prefer-no-gather can disable ScalarizeMaskedGather |
| 113 | +define <4 x float> @gather_v4f32_ptr_v4i32(<4 x ptr> %ptr, <4 x i32> %trigger, <4 x float> %passthru) { |
| 114 | +; GATHER-LABEL: gather_v4f32_ptr_v4i32: |
| 115 | +; GATHER: vgatherqps |
| 116 | +; |
| 117 | +; NO-GATHER-LABEL: gather_v4f32_ptr_v4i32: |
| 118 | +; NO-GATHER-NOT: vgatherqps |
| 119 | +; |
| 120 | +; GATHER-NO-SCATTER-LABEL: gather_v4f32_ptr_v4i32: |
| 121 | +; GATHER-NO-SCATTER: vgatherqps |
| 122 | +; |
| 123 | +; NO-SCATTER-GATHER-LABEL: gather_v4f32_ptr_v4i32: |
| 124 | +; NO-SCATTER-GATHER-NOT: vgatherqps |
| 125 | + %mask = icmp eq <4 x i32> %trigger, zeroinitializer |
| 126 | + %res = call <4 x float> @llvm.masked.gather.v4f32.v4p0(<4 x ptr> %ptr, i32 4, <4 x i1> %mask, <4 x float> %passthru) |
| 127 | + ret <4 x float> %res |
| 128 | +} |
| 129 | + |
| 130 | +declare <4 x float> @llvm.masked.gather.v4f32.v4p0(<4 x ptr>, i32, <4 x i1>, <4 x float>) |
| 131 | + |
| 132 | +%struct.a = type { [4 x i32], [4 x i8], %struct.b, i32 } |
| 133 | +%struct.b = type { i32, i32 } |
| 134 | +@c = external dso_local global %struct.a, align 4 |
| 135 | + |
| 136 | +; This tests the function that if prefer-no-gather can disable ScalarizeMaskedGather |
| 137 | +define <8 x i32> @gather_v8i32_v8i32(<8 x i32> %trigger) { |
| 138 | +; GATHER-LABEL: gather_v8i32_v8i32: |
| 139 | +; GATHER: vpgatherdd |
| 140 | +; |
| 141 | +; NO-GATHER-LABEL: gather_v8i32_v8i32: |
| 142 | +; NO-GATHER-NOT: vpgatherdd |
| 143 | +; |
| 144 | +; NO-SCATTER-GATHER-LABEL: gather_v8i32_v8i32: |
| 145 | +; NO-SCATTER-GATHER-NOT: vpgatherdd |
| 146 | + %1 = icmp eq <8 x i32> %trigger, zeroinitializer |
| 147 | + %2 = call <8 x i32> @llvm.masked.gather.v8i32.v8p0(<8 x ptr> getelementptr (%struct.a, <8 x ptr> <ptr @c, ptr @c, ptr @c, ptr @c, ptr @c, ptr @c, ptr @c, ptr @c>, <8 x i64> zeroinitializer, i32 0, <8 x i64> <i64 3, i64 3, i64 3, i64 3, i64 3, i64 3, i64 3, i64 3>), i32 4, <8 x i1> %1, <8 x i32> undef) |
| 148 | + %3 = call <8 x i32> @llvm.masked.gather.v8i32.v8p0(<8 x ptr> getelementptr (%struct.a, <8 x ptr> <ptr @c, ptr @c, ptr @c, ptr @c, ptr @c, ptr @c, ptr @c, ptr @c>, <8 x i64> zeroinitializer, i32 3), i32 4, <8 x i1> %1, <8 x i32> undef) |
| 149 | + %4 = add <8 x i32> %2, %3 |
| 150 | + %5 = call <8 x i32> @llvm.masked.gather.v8i32.v8p0(<8 x ptr> getelementptr (%struct.a, <8 x ptr> <ptr @c, ptr @c, ptr @c, ptr @c, ptr @c, ptr @c, ptr @c, ptr @c>, <8 x i64> zeroinitializer, i32 3), i32 4, <8 x i1> %1, <8 x i32> undef) |
| 151 | + %6 = add <8 x i32> %4, %5 |
| 152 | + ret <8 x i32> %6 |
| 153 | +} |
| 154 | + |
| 155 | +declare <8 x i32> @llvm.masked.gather.v8i32.v8p0(<8 x ptr>, i32, <8 x i1>, <8 x i32>) |
| 156 | + |
| 157 | +; scatter test cases |
| 158 | +define void @scatter_test1(ptr %base, <16 x i32> %ind, i16 %mask, <16 x i32>%val) { |
| 159 | +; SCATTER-LABEL: scatter_test1: |
| 160 | +; SCATTER: vpscatterdd |
| 161 | +; |
| 162 | +; SCATTER-NO-GATHER-LABEL: scatter_test1: |
| 163 | +; SCATTER-NO-GATHER: vpscatterdd |
| 164 | +; |
| 165 | +; GATHER-NO-SCATTER-LABEL: scatter_test1: |
| 166 | +; GATHER-NO-SCATTER-NOT: vpscatterdd |
| 167 | +; |
| 168 | +; NO-SCATTER-GATHER-LABEL: scatter_test1: |
| 169 | +; NO-SCATTER-GATHER-NOT: vpscatterdd |
| 170 | + %broadcast.splatinsert = insertelement <16 x ptr> undef, ptr %base, i32 0 |
| 171 | + %broadcast.splat = shufflevector <16 x ptr> %broadcast.splatinsert, <16 x ptr> undef, <16 x i32> zeroinitializer |
| 172 | + |
| 173 | + %gep.random = getelementptr i32, <16 x ptr> %broadcast.splat, <16 x i32> %ind |
| 174 | + %imask = bitcast i16 %mask to <16 x i1> |
| 175 | + call void @llvm.masked.scatter.v16i32.v16p0(<16 x i32>%val, <16 x ptr> %gep.random, i32 4, <16 x i1> %imask) |
| 176 | + call void @llvm.masked.scatter.v16i32.v16p0(<16 x i32>%val, <16 x ptr> %gep.random, i32 4, <16 x i1> %imask) |
| 177 | + ret void |
| 178 | +} |
| 179 | + |
| 180 | +declare void @llvm.masked.scatter.v8i32.v8p0(<8 x i32> , <8 x ptr> , i32 , <8 x i1> ) |
| 181 | +declare void @llvm.masked.scatter.v16i32.v16p0(<16 x i32> , <16 x ptr> , i32 , <16 x i1> ) |
| 182 | + |
| 183 | +define <8 x i32> @scatter_test2(<8 x i32>%a1, <8 x ptr> %ptr) { |
| 184 | +; SCATTER-LABEL: scatter_test2: |
| 185 | +; SCATTER: vpscatterqd |
| 186 | +; |
| 187 | +; SCATTER-NO-GATHER-LABEL: scatter_test2: |
| 188 | +; SCATTER-NO-GATHER: vpscatterqd |
| 189 | +; |
| 190 | +; GATHER-NO-SCATTER-LABEL: scatter_test2: |
| 191 | +; GATHER-NO-SCATTER-NOT: vpscatterqd |
| 192 | +; |
| 193 | +; NO-SCATTER-GATHER-LABEL: scatter_test2: |
| 194 | +; NO-SCATTER-GATHER-NOT: vpscatterqd |
| 195 | + %a = call <8 x i32> @llvm.masked.gather.v8i32.v8p0(<8 x ptr> %ptr, i32 4, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <8 x i32> undef) |
| 196 | + |
| 197 | + call void @llvm.masked.scatter.v8i32.v8p0(<8 x i32> %a1, <8 x ptr> %ptr, i32 4, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>) |
| 198 | + ret <8 x i32>%a |
| 199 | +} |
0 commit comments