Skip to content

Commit 5fc28eb

Browse files
[mlir][Linalg] NFC - Add bbarg pretty printing to linalg::generic
Differential Revision: https://reviews.llvm.org/D135151
1 parent 36a2002 commit 5fc28eb

File tree

10 files changed

+377
-293
lines changed

10 files changed

+377
-293
lines changed

mlir/include/mlir/Dialect/Linalg/IR/LinalgStructuredOps.td

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -58,7 +58,9 @@ class LinalgStructuredBase_Op<string mnemonic, list<Trait> props>
5858
// Generic Linalg ops.
5959
//===----------------------------------------------------------------------===//
6060

61-
def GenericOp : LinalgStructuredBase_Op<"generic", [AttrSizedOperandSegments]> {
61+
def GenericOp : LinalgStructuredBase_Op<"generic", [
62+
DeclareOpInterfaceMethods<OpAsmOpInterface, ["getAsmBlockArgumentNames"]>,
63+
AttrSizedOperandSegments]> {
6264
let description = [{
6365
Generic Linalg op form where the key properties of the computation are
6466
specified as attributes. In pretty form, a `linalg.generic` op is written

mlir/lib/Dialect/Linalg/IR/LinalgOps.cpp

Lines changed: 10 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -663,8 +663,17 @@ void FillOp::getCanonicalizationPatterns(RewritePatternSet &results,
663663
}
664664

665665
//===----------------------------------------------------------------------===//
666-
// GenericOps
666+
// GenericOp
667667
//===----------------------------------------------------------------------===//
668+
669+
void GenericOp::getAsmBlockArgumentNames(Region &region,
670+
OpAsmSetValueNameFn setNameFn) {
671+
for (Value v : getRegionInputArgs())
672+
setNameFn(v, "in");
673+
for (Value v : getRegionOutputArgs())
674+
setNameFn(v, "out");
675+
}
676+
668677
void GenericOp::build(
669678
OpBuilder &builder, OperationState &result, TypeRange resultTensorTypes,
670679
ValueRange inputs, ValueRange outputs, ArrayAttr indexingMaps,

mlir/test/Analysis/test-match-reduction.mlir

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,7 @@
77
func.func @linalg_red_add(%in0t : tensor<?xf32>, %out0t : tensor<1xf32>) {
88
// expected-remark@below {{Reduction found in output #0!}}
99
// expected-remark@below {{Reduced Value: <block argument> of type 'f32' at index: 0}}
10-
// expected-remark@below {{Combiner Op: %1 = arith.addf %arg2, %arg3 : f32}}
10+
// expected-remark@below {{Combiner Op: %1 = arith.addf }}
1111
%red = linalg.generic {indexing_maps = [affine_map<(d0) -> (d0)>,
1212
affine_map<(d0) -> (0)>],
1313
iterator_types = ["reduction"]}
@@ -27,8 +27,8 @@ func.func @affine_red_add(%in: memref<256x512xf32>, %out: memref<256xf32>) {
2727
%cst = arith.constant 0.000000e+00 : f32
2828
affine.for %i = 0 to 256 {
2929
// expected-remark@below {{Reduction found in output #0!}}
30-
// expected-remark@below {{Reduced Value: %1 = affine.load %arg0[%arg2, %arg3] : memref<256x512xf32>}}
31-
// expected-remark@below {{Combiner Op: %2 = arith.addf %arg4, %1 : f32}}
30+
// expected-remark@below {{Reduced Value: %1 = affine.load }}
31+
// expected-remark@below {{Combiner Op: %2 = arith.addf }}
3232
%final_red = affine.for %j = 0 to 512 iter_args(%red_iter = %cst) -> (f32) {
3333
%ld = affine.load %in[%i, %j] : memref<256x512xf32>
3434
%add = arith.addf %red_iter, %ld : f32
@@ -63,8 +63,8 @@ func.func @linalg_red_max(%in0t: tensor<4x4xf32>, %out0t: tensor<4xf32>) {
6363
// expected-remark@below {{Testing function}}
6464
func.func @linalg_fused_red_add(%in0t: tensor<4x4xf32>, %out0t: tensor<4xf32>) {
6565
// expected-remark@below {{Reduction found in output #0!}}
66-
// expected-remark@below {{Reduced Value: %2 = arith.subf %1, %arg2 : f32}}
67-
// expected-remark@below {{Combiner Op: %3 = arith.addf %2, %arg3 : f32}}
66+
// expected-remark@below {{Reduced Value: %2 = arith.subf}}
67+
// expected-remark@below {{Combiner Op: %3 = arith.addf}}
6868
%red = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>,
6969
affine_map<(d0, d1) -> (d0)>],
7070
iterator_types = ["parallel", "reduction"]}

mlir/test/Conversion/TosaToLinalg/tosa-to-linalg-named.mlir

Lines changed: 21 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -80,8 +80,8 @@ func.func @fully_connected(%arg0: tensor<5x3xf32>, %arg1: tensor<6x3xf32>, %arg2
8080
// CHECK: [[INITB:%.+]] = tensor.empty()
8181
// CHECK: [[MATMUL:%.+]] = linalg.matmul ins(%arg0, [[TRANSPOSE]] : tensor<5x3xf32>, tensor<3x6xf32>) outs([[FILL]] : tensor<5x6xf32>) -> tensor<5x6xf32>
8282
// CHECK: [[ADDED:%.+]] = linalg.generic {indexing_maps = [#[[$MAP1]], #[[$MAP2]], #[[$MAP2]]], iterator_types = ["parallel", "parallel"]} ins(%arg2, [[MATMUL]] : tensor<6xf32>, tensor<5x6xf32>) outs([[INITB]] : tensor<5x6xf32>) {
83-
// CHECK: ^bb0(%arg3: f32, %arg4: f32, %arg5: f32):
84-
// CHECK: [[ADD:%.+]] = arith.addf %arg3, %arg4 : f32
83+
// CHECK: ^bb0(%[[ARG3:[0-9a-zA-Z_]+]]: f32, %[[ARG4:[0-9a-zA-Z_]+]]: f32, %[[ARG5:[0-9a-zA-Z_]+]]: f32):
84+
// CHECK: [[ADD:%.+]] = arith.addf %[[ARG3]], %[[ARG4]] : f32
8585
// CHECK: linalg.yield [[ADD]] : f32
8686

8787
%0 = "tosa.fully_connected"(%arg0, %arg1, %arg2) : (tensor<5x3xf32>, tensor<6x3xf32>, tensor<6xf32>) -> (tensor<5x6xf32>)
@@ -129,8 +129,8 @@ func.func @fully_connected_dyn(%arg0: tensor<?x3xf32>, %arg1: tensor<6x3xf32>, %
129129
// CHECK: %[[INITB:.+]] = tensor.empty(%[[DIM]])
130130
// CHECK: %[[MATMUL:.+]] = linalg.matmul ins(%arg0, %[[TRANSPOSE]] : tensor<?x3xf32>, tensor<3x6xf32>) outs(%[[FILL]] : tensor<?x6xf32>) -> tensor<?x6xf32>
131131
// CHECK: %[[ADDED:.+]] = linalg.generic {indexing_maps = [#[[$MAP1]], #[[$MAP2]], #[[$MAP2]]], iterator_types = ["parallel", "parallel"]} ins(%arg2, %[[MATMUL]] : tensor<6xf32>, tensor<?x6xf32>) outs(%[[INITB]] : tensor<?x6xf32>) {
132-
// CHECK: ^bb0(%arg3: f32, %arg4: f32, %arg5: f32):
133-
// CHECK: %[[ADD:.+]] = arith.addf %arg3, %arg4 : f32
132+
// CHECK: ^bb0(%[[ARG3:[0-9a-zA-Z_]+]]: f32, %[[ARG4:[0-9a-zA-Z_]+]]: f32, %[[ARG5:[0-9a-zA-Z_]+]]: f32):
133+
// CHECK: %[[ADD:.+]] = arith.addf %[[ARG3]], %[[ARG4]] : f32
134134
// CHECK: linalg.yield %[[ADD]] : f32
135135

136136
%0 = "tosa.fully_connected"(%arg0, %arg1, %arg2) : (tensor<?x3xf32>, tensor<6x3xf32>, tensor<6xf32>) -> (tensor<?x6xf32>)
@@ -214,6 +214,7 @@ func.func @avg_pool(%arg0: tensor<1x6x34x62xf32>) -> (tensor<1x5x33x62xf32>) {
214214
// CHECK: [[POOL:%.+]] = linalg.pooling_nhwc_sum {dilations = dense<1> : vector<2xi64>, strides = dense<1> : vector<2xi64>} ins([[PAD]], [[KERNEL]] : tensor<1x8x36x62xf32>, tensor<4x4xf32>) outs([[FILL]] : tensor<1x5x33x62xf32>)
215215
// CHECK: [[INIT:%.+]] = tensor.empty()
216216
// CHECK: [[GENERIC:%.+]] = linalg.generic {indexing_maps = [#map, #map], iterator_types = ["parallel", "parallel", "parallel", "parallel"]} ins([[POOL]] : tensor<1x5x33x62xf32>) outs([[INIT]] : tensor<1x5x33x62xf32>)
217+
// CHECK: ^bb0(%[[BBARG1:[a-zA-Z0-9_]+]]: f32,
217218
// CHECK: [[ZERO:%.0]] = arith.constant 0
218219
// CHECK: [[ONE:%.+]] = arith.constant 1
219220
// CHECK: [[HEIGHT:%.+]] = arith.constant 4
@@ -255,7 +256,7 @@ func.func @avg_pool(%arg0: tensor<1x6x34x62xf32>) -> (tensor<1x5x33x62xf32>) {
255256
// CHECK: [[C:%.+]] = arith.muli [[YSEL]], [[XSEL]]
256257
// CHECK: [[CI:%.+]] = arith.index_cast [[C]]
257258
// CHECK: [[CF:%.+]] = arith.sitofp [[CI]]
258-
// CHECK: [[RESULT:%.+]] = arith.divf %arg1, [[CF]]
259+
// CHECK: [[RESULT:%.+]] = arith.divf %[[BBARG1]], [[CF]]
259260
// CHECK: linalg.yield [[RESULT]]
260261
%0 = "tosa.avg_pool2d"(%arg0) {pad = [1, 1, 1, 1], kernel = [4, 4], stride = [1, 1]} : (tensor<1x6x34x62xf32>) -> (tensor<1x5x33x62xf32>)
261262
return %0 : tensor<1x5x33x62xf32>
@@ -286,10 +287,11 @@ func.func @avg_pool_i8(%arg0 : tensor<1x128x128x2xi8>) -> () {
286287

287288
// CHECK: linalg.pooling_nhwc_sum
288289
// CHECK: linalg.generic
290+
// CHECK: ^bb0(%[[BBARG1:[a-zA-Z0-9_]+]]: i32,
289291

290292
// CHECK: %[[INZP:.+]] = arith.constant -128
291293
// CHECK: %[[INZP_OFF:.+]] = arith.muli %{{.+}}, %[[INZP]]
292-
// CHECK: %[[OFFSETED:.+]] = arith.subi %arg1, %[[INZP_OFF]]
294+
// CHECK: %[[OFFSETED:.+]] = arith.subi %[[BBARG1]], %[[INZP_OFF]]
293295
// CHECK: %[[NUMERATOR:.+]] = arith.constant 1073741825
294296
// CHECK: %[[MULTIPLIER:.+]] = arith.divui %[[NUMERATOR]], %{{.+}}
295297
// CHECK: %[[SHIFT:.+]] = arith.constant 30
@@ -315,10 +317,11 @@ func.func @avg_pool_i16(%arg0 : tensor<1x128x128x2xi16>) -> () {
315317

316318
// CHECK: linalg.pooling_nhwc_sum
317319
// CHECK: linalg.generic
320+
// CHECK: ^bb0(%[[BBARG1:[a-zA-Z0-9_]+]]: i32,
318321

319322
// CHECK: %[[INZP:.+]] = arith.constant -128
320323
// CHECK: %[[INZP_OFF:.+]] = arith.muli %{{.+}}, %[[INZP]]
321-
// CHECK: %[[OFFSETED:.+]] = arith.subi %arg1, %[[INZP_OFF]]
324+
// CHECK: %[[OFFSETED:.+]] = arith.subi %[[BBARG1]], %[[INZP_OFF]]
322325
// CHECK: %[[NUMERATOR:.+]] = arith.constant 1073741825
323326
// CHECK: %[[MULTIPLIER:.+]] = arith.divui %[[NUMERATOR]], %{{.+}}
324327
// CHECK: %[[SHIFT:.+]] = arith.constant 30
@@ -479,8 +482,8 @@ func.func @depthwise_conv(%arg0 : tensor<1x7x5x3xf32>, %arg1 : tensor<3x1x3x11xf
479482
// CHECK: [[DEPTH:%.+]] = linalg.depthwise_conv_2d_nhwc_hwcm {dilations = dense<1> : tensor<2xi64>, strides = dense<1> : tensor<2xi64>} ins(%arg0, %arg1 : tensor<1x7x5x3xf32>, tensor<3x1x3x11xf32>) outs([[FILL]] : tensor<1x5x5x3x11xf32>)
480483
// CHECK: [[COLLAPSED:%.+]] = tensor.collapse_shape [[DEPTH]] {{\[}}[0], [1], [2], [3, 4]]
481484
// CHECK: [[BIAS:%.+]] = linalg.generic {indexing_maps = [#[[$MAP0]], #[[$MAP1]], #[[$MAP1]]], iterator_types = ["parallel", "parallel", "parallel", "parallel"]} ins(%arg2, [[COLLAPSED]] : tensor<33xf32>, tensor<1x5x5x33xf32>) outs([[OUT]] : tensor<1x5x5x33xf32>) {
482-
// CHECK: ^bb0(%arg3: f32, %arg4: f32, %arg5: f32):
483-
// CHECK: [[ADD:%.+]] = arith.addf %arg3, %arg4 : f32
485+
// CHECK: ^bb0(%[[ARG3:[0-9a-zA-Z_]+]]: f32, %[[ARG4:[0-9a-zA-Z_]+]]: f32, %[[ARG5:[0-9a-zA-Z_]+]]: f32):
486+
// CHECK: [[ADD:%.+]] = arith.addf %[[ARG3]], %[[ARG4]] : f32
484487
// CHECK: linalg.yield [[ADD]] : f32
485488
// CHECK: } -> tensor<1x5x5x33xf32>
486489
%2 = "tosa.depthwise_conv2d"(%arg0, %arg1, %arg2) { pad = [0, 0, 0, 0], stride = [1, 1], dilation = [1, 1] } : (tensor<1x7x5x3xf32>, tensor<3x1x3x11xf32>, tensor<33xf32>) -> (tensor<1x5x5x33xf32>)
@@ -503,8 +506,8 @@ func.func @depthwise_conv_dyn(%arg0 : tensor<?x7x5x3xf32>, %arg1 : tensor<3x1x3x
503506
// CHECK: %[[DEPTH:.+]] = linalg.depthwise_conv_2d_nhwc_hwcm {dilations = dense<1> : tensor<2xi64>, strides = dense<1> : tensor<2xi64>} ins(%arg0, %arg1 : tensor<?x7x5x3xf32>, tensor<3x1x3x11xf32>) outs(%[[FILL]] : tensor<?x5x5x3x11xf32>)
504507
// CHECK: %[[COLLAPSED:.+]] = tensor.collapse_shape %[[DEPTH]] {{\[}}[0], [1], [2], [3, 4]]
505508
// CHECK: %[[BIAS:.+]] = linalg.generic {indexing_maps = [#[[$MAP0]], #[[$MAP1]], #[[$MAP1]]], iterator_types = ["parallel", "parallel", "parallel", "parallel"]} ins(%arg2, %[[COLLAPSED]] : tensor<33xf32>, tensor<?x5x5x33xf32>) outs(%[[OUT]] : tensor<?x5x5x33xf32>) {
506-
// CHECK: ^bb0(%arg3: f32, %arg4: f32, %arg5: f32):
507-
// CHECK: %[[ADD:.+]] = arith.addf %arg3, %arg4 : f32
509+
// CHECK: ^bb0(%[[ARG3:[0-9a-zA-Z_]+]]: f32, %[[ARG4:[0-9a-zA-Z_]+]]: f32, %[[ARG5:[0-9a-zA-Z_]+]]: f32):
510+
// CHECK: %[[ADD:.+]] = arith.addf %[[ARG3]], %[[ARG4]] : f32
508511
// CHECK: linalg.yield %[[ADD]] : f32
509512
// CHECK: } -> tensor<?x5x5x33xf32>
510513
%2 = "tosa.depthwise_conv2d"(%arg0, %arg1, %arg2) { pad = [0, 0, 0, 0], stride = [1, 1], dilation = [1, 1] } : (tensor<?x7x5x3xf32>, tensor<3x1x3x11xf32>, tensor<33xf32>) -> (tensor<?x5x5x33xf32>)
@@ -525,8 +528,8 @@ func.func @depthwise_conv_strides(%arg0 : tensor<1x11x9x3xf32>, %arg1 : tensor<3
525528
// CHECK: [[DEPTH:%.+]] = linalg.depthwise_conv_2d_nhwc_hwcm {dilations = dense<1> : tensor<2xi64>, strides = dense<2> : tensor<2xi64>} ins(%arg0, %arg1 : tensor<1x11x9x3xf32>, tensor<3x1x3x11xf32>) outs([[FILL]] : tensor<1x5x5x3x11xf32>)
526529
// CHECK: [[COLLAPSED:%.+]] = tensor.collapse_shape [[DEPTH]] {{\[}}[0], [1], [2], [3, 4]]
527530
// CHECK: [[BIAS:%.+]] = linalg.generic {indexing_maps = [#[[$MAP0]], #[[$MAP1]], #[[$MAP1]]], iterator_types = ["parallel", "parallel", "parallel", "parallel"]} ins(%arg2, [[COLLAPSED]] : tensor<33xf32>, tensor<1x5x5x33xf32>) outs([[OUT]] : tensor<1x5x5x33xf32>) {
528-
// CHECK: ^bb0(%arg3: f32, %arg4: f32, %arg5: f32):
529-
// CHECK: [[ADD:%.+]] = arith.addf %arg3, %arg4 : f32
531+
// CHECK: ^bb0(%[[ARG3:[0-9a-zA-Z_]+]]: f32, %[[ARG4:[0-9a-zA-Z_]+]]: f32, %[[ARG5:[0-9a-zA-Z_]+]]: f32):
532+
// CHECK: [[ADD:%.+]] = arith.addf %[[ARG3]], %[[ARG4]] : f32
530533
// CHECK: linalg.yield [[ADD]] : f32
531534
// CHECK: } -> tensor<1x5x5x33xf32>
532535
%2 = "tosa.depthwise_conv2d"(%arg0, %arg1, %arg2) { pad = [0, 0, 0, 0], stride = [2, 2], dilation = [1, 1] } : (tensor<1x11x9x3xf32>, tensor<3x1x3x11xf32>, tensor<33xf32>) -> (tensor<1x5x5x33xf32>)
@@ -553,8 +556,8 @@ func.func @depthwise_conv_quant(%arg0 : tensor<1x12x12x4xi8>, %arg1 : tensor<3x3
553556
// CHECK: [[DEPTH:%.+]] = linalg.depthwise_conv_2d_nhwc_hwcm_q {dilations = dense<1> : tensor<2xi64>, strides = dense<1> : tensor<2xi64>} ins([[PAD]], %arg1, [[C128]], [[C42]] : tensor<1x14x14x4xi8>, tensor<3x3x4x128xi8>, i32, i32) outs([[FILL]] : tensor<1x12x12x4x128xi32>)
554557
// CHECK: [[COLLAPSED:%.+]] = tensor.collapse_shape [[DEPTH]] {{\[}}[0], [1], [2], [3, 4]]
555558
// CHECK: [[BIAS:%.+]] = linalg.generic {indexing_maps = [#[[$MAP0]], #[[$MAP1]], #[[$MAP1]]], iterator_types = ["parallel", "parallel", "parallel", "parallel"]} ins(%arg2, [[COLLAPSED]] : tensor<512xi32>, tensor<1x12x12x512xi32>) outs([[OUT]] : tensor<1x12x12x512xi32>) {
556-
// CHECK: ^bb0(%arg3: i32, %arg4: i32, %arg5: i32):
557-
// CHECK: [[ADD:%.+]] = arith.addi %arg3, %arg4 : i32
559+
// CHECK: ^bb0(%[[ARG3:[0-9a-zA-Z_]+]]: i32, %[[ARG4:[0-9a-zA-Z_]+]]: i32, %[[ARG5:[0-9a-zA-Z_]+]]: i32):
560+
// CHECK: [[ADD:%.+]] = arith.addi %[[ARG3]], %[[ARG4]] : i32
558561
// CHECK: linalg.yield [[ADD]] : i32
559562
// CHECK: } -> tensor<1x12x12x512xi32>
560563
%0 = "tosa.depthwise_conv2d"(%arg0, %arg1, %arg2) {pad = [1, 1, 1, 1], quantization_info = #tosa.conv_quant<input_zp = -128, weight_zp = 42>, stride = [1, 1], dilation = [1, 1] } : (tensor<1x12x12x4xi8>, tensor<3x3x4x128xi8>, tensor<512xi32>) -> tensor<1x12x12x512xi32>
@@ -577,8 +580,8 @@ func.func @depthwise_conv_quant_dilations(%arg0 : tensor<1x14x14x4xi8>, %arg1 :
577580
// CHECK: [[DEPTH:%.+]] = linalg.depthwise_conv_2d_nhwc_hwcm_q {dilations = dense<2> : tensor<2xi64>, strides = dense<1> : tensor<2xi64>} ins(%arg0, %arg1, [[C128]], [[C42]] : tensor<1x14x14x4xi8>, tensor<3x3x4x128xi8>, i32, i32) outs([[FILL]] : tensor<1x10x10x4x128xi32>)
578581
// CHECK: [[COLLAPSED:%.+]] = tensor.collapse_shape [[DEPTH]] {{\[}}[0], [1], [2], [3, 4]]
579582
// CHECK: [[BIAS:%.+]] = linalg.generic {indexing_maps = [#[[$MAP0]], #[[$MAP1]], #[[$MAP1]]], iterator_types = ["parallel", "parallel", "parallel", "parallel"]} ins(%arg2, [[COLLAPSED]] : tensor<512xi32>, tensor<1x10x10x512xi32>) outs([[OUT]] : tensor<1x10x10x512xi32>) {
580-
// CHECK: ^bb0(%arg3: i32, %arg4: i32, %arg5: i32):
581-
// CHECK: [[ADD:%.+]] = arith.addi %arg3, %arg4 : i32
583+
// CHECK: ^bb0(%[[ARG3:[0-9a-zA-Z_]+]]: i32, %[[ARG4:[0-9a-zA-Z_]+]]: i32, %[[ARG5:[0-9a-zA-Z_]+]]: i32):
584+
// CHECK: [[ADD:%.+]] = arith.addi %[[ARG3]], %[[ARG4]] : i32
582585
// CHECK: linalg.yield [[ADD]] : i32
583586
// CHECK: } -> tensor<1x10x10x512xi32>
584587
%0 = "tosa.depthwise_conv2d"(%arg0, %arg1, %arg2) {pad = [0, 0, 0, 0], quantization_info = #tosa.conv_quant<input_zp = -128, weight_zp = 42>, stride = [1, 1], dilation = [2, 2] } : (tensor<1x14x14x4xi8>, tensor<3x3x4x128xi8>, tensor<512xi32>) -> tensor<1x10x10x512xi32>
@@ -592,7 +595,7 @@ func.func @depthwise_conv2d_dyn_w_h(%arg0: tensor<2x?x?x3xf32>, %arg1: tensor<3x
592595
// CHECK: arith.muli
593596
// CHECK: arith.divui
594597
// CHECK: %[[PADDED:.+]] = tensor.pad %arg0 low[0, 1, 3, 0] high[0, 2, 4, 0] {
595-
// CHECK: ^bb0(%arg3: index, %arg4: index, %arg5: index, %arg6: index):
598+
// CHECK: ^bb0(%[[ARG3:[0-9a-zA-Z_]+]]: index, %[[ARG4:[0-9a-zA-Z_]+]]: index, %[[ARG5:[0-9a-zA-Z_]+]]: index, %[[ARG6:[0-9a-zA-Z_]+]]: index):
596599
// CHECK: tensor.yield %cst : f32
597600
// CHECK: } : tensor<2x?x?x3xf32> to tensor<2x?x?x3xf32>
598601
// CHECK: %[[CONV:.+]] = linalg.depthwise_conv_2d_nhwc_hwcm {dilations = dense<[2, 1]> : tensor<2xi64>, strides = dense<[1, 2]> : tensor<2xi64>} ins(%[[PADDED]], %arg1 : tensor<2x?x?x3xf32>, tensor<3x6x3x5xf32>) outs(%{{.*}} : tensor<2x?x?x3x5xf32>) -> tensor<2x?x?x3x5xf32>

0 commit comments

Comments
 (0)