|
35 | 35 | dimOrdering = affine_map<(i,j) -> (j,i)>
|
36 | 36 | }>
|
37 | 37 |
|
| 38 | +#BlockRow = #sparse_tensor.encoding<{ |
| 39 | + dimLevelType = [ "compressed", "dense" ] |
| 40 | +}> |
| 41 | + |
| 42 | +#BlockCol = #sparse_tensor.encoding<{ |
| 43 | + dimLevelType = [ "compressed", "dense" ], |
| 44 | + dimOrdering = affine_map<(i,j) -> (j,i)> |
| 45 | +}> |
| 46 | + |
38 | 47 | //
|
39 | 48 | // Integration test that looks "under the hood" of sparse storage schemes.
|
40 | 49 | //
|
@@ -74,6 +83,8 @@ module {
|
74 | 83 | %2 = sparse_tensor.convert %t : tensor<10x8xf64> to tensor<10x8xf64, #DCSR>
|
75 | 84 | %3 = sparse_tensor.convert %t : tensor<10x8xf64> to tensor<10x8xf64, #CSC>
|
76 | 85 | %4 = sparse_tensor.convert %t : tensor<10x8xf64> to tensor<10x8xf64, #DCSC>
|
| 86 | + %x = sparse_tensor.convert %t : tensor<10x8xf64> to tensor<10x8xf64, #BlockRow> |
| 87 | + %y = sparse_tensor.convert %t : tensor<10x8xf64> to tensor<10x8xf64, #BlockCol> |
77 | 88 |
|
78 | 89 | //
|
79 | 90 | // Inspect storage scheme of Dense.
|
@@ -192,6 +203,53 @@ module {
|
192 | 203 | %38 = vector.transfer_read %37[%c0], %d0: memref<?xf64>, vector<17xf64>
|
193 | 204 | vector.print %38 : vector<17xf64>
|
194 | 205 |
|
| 206 | + // |
| 207 | + // Inspect storage scheme of BlockRow. |
| 208 | + // |
| 209 | + // pointers(0) |
| 210 | + // indices(0) |
| 211 | + // values |
| 212 | + // |
| 213 | + // CHECK: ( 0, 8 ) |
| 214 | + // CHECK: ( 0, 2, 3, 4, 5, 6, 7, 9 ) |
| 215 | + // CHECK: ( 1, 0, 2, 0, 0, 0, 0, 3, 0, 0, 4, 0, 0, 0, 0, 0, |
| 216 | + // CHECK-SAME: 0, 0, 0, 5, 0, 0, 0, 0, 0, 0, 0, 0, 6, 0, 0, 0, |
| 217 | + // CHECK-SAME: 0, 7, 8, 0, 0, 0, 0, 9, 0, 0, 10, 0, 0, 0, 11, 12, |
| 218 | + // CHECK-SAME: 0, 13, 14, 0, 0, 0, 15, 16, 0, 0, 0, 0, 0, 0, 17, 0 ) |
| 219 | + // |
| 220 | + %39 = sparse_tensor.pointers %x, %c0 : tensor<10x8xf64, #BlockRow> to memref<?xindex> |
| 221 | + %40 = vector.transfer_read %39[%c0], %c0: memref<?xindex>, vector<2xindex> |
| 222 | + vector.print %40 : vector<2xindex> |
| 223 | + %41 = sparse_tensor.indices %x, %c0 : tensor<10x8xf64, #BlockRow> to memref<?xindex> |
| 224 | + %42 = vector.transfer_read %41[%c0], %c0: memref<?xindex>, vector<8xindex> |
| 225 | + vector.print %42 : vector<8xindex> |
| 226 | + %43 = sparse_tensor.values %x : tensor<10x8xf64, #BlockRow> to memref<?xf64> |
| 227 | + %44 = vector.transfer_read %43[%c0], %d0: memref<?xf64>, vector<64xf64> |
| 228 | + vector.print %44 : vector<64xf64> |
| 229 | + |
| 230 | + // |
| 231 | + // Inspect storage scheme of BlockCol. |
| 232 | + // |
| 233 | + // pointers(0) |
| 234 | + // indices(0) |
| 235 | + // values |
| 236 | + // |
| 237 | + // CHECK: ( 0, 7 ) |
| 238 | + // CHECK: ( 0, 1, 2, 3, 4, 6, 7 ) |
| 239 | + // CHECK: ( 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 7, 0, 13, 0, 0, 2, 0, 4, 0, |
| 240 | + // CHECK-SAME: 0, 8, 10, 14, 0, 0, 0, 0, 0, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 6, 0, 0, |
| 241 | + // CHECK-SAME: 0, 0, 0, 0, 0, 0, 0, 0, 0, 11, 15, 0, 17, 3, 0, 0, 0, 0, 9, 12, 16, 0, 0 ) |
| 242 | + // |
| 243 | + %45 = sparse_tensor.pointers %y, %c0 : tensor<10x8xf64, #BlockCol> to memref<?xindex> |
| 244 | + %46 = vector.transfer_read %45[%c0], %c0: memref<?xindex>, vector<2xindex> |
| 245 | + vector.print %46 : vector<2xindex> |
| 246 | + %47 = sparse_tensor.indices %y, %c0 : tensor<10x8xf64, #BlockCol> to memref<?xindex> |
| 247 | + %48 = vector.transfer_read %47[%c0], %c0: memref<?xindex>, vector<7xindex> |
| 248 | + vector.print %48 : vector<7xindex> |
| 249 | + %49 = sparse_tensor.values %y : tensor<10x8xf64, #BlockCol> to memref<?xf64> |
| 250 | + %50 = vector.transfer_read %49[%c0], %d0: memref<?xf64>, vector<70xf64> |
| 251 | + vector.print %50 : vector<70xf64> |
| 252 | + |
195 | 253 | return
|
196 | 254 | }
|
197 | 255 | }
|
0 commit comments