Skip to content

Commit 9309168

Browse files
committed
Address comments
Signed-off-by: hanhanW <[email protected]>
1 parent 199f9f0 commit 9309168

File tree

3 files changed

+30
-7
lines changed

3 files changed

+30
-7
lines changed

mlir/include/mlir/Dialect/Linalg/IR/LinalgRelayoutOps.td

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -153,7 +153,10 @@ def Linalg_PackOp : Linalg_RelayoutOp<"pack", [
153153
- If absent, it assumes the tile perfectly divides the dimension.
154154
- If present, it will pad along high dimensions (high-padding) to make the
155155
tile complete. Note that it is not allowed to have artificial padding that
156-
is not strictly required by linalg.pack.
156+
is not strictly required by linalg.pack (i.e., padding past what is needed
157+
to complete the last tile along each packed dimension).. It is UB if extra
158+
padding is requested for dynamic cases. For static cases, they are caught
159+
by the verifier.
157160

158161
Example:
159162
```mlir

mlir/lib/Dialect/Linalg/IR/LinalgOps.cpp

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -4699,9 +4699,9 @@ static LogicalResult commonVerifierPackAndUnPackOp(OpTy packOrUnPack) {
46994699
}
47004700
if (failed(verifyCompatibleShape(expectedPackedType.getShape(),
47014701
packedType.getShape()))) {
4702-
return op->emitError("the shape of unpacked domain value is not large "
4703-
"enough to hold the packed data. Expected at least ")
4704-
<< expectedPackedType << ", got " << packedType;
4702+
return op->emitError("expected ")
4703+
<< expectedPackedType << " for the unpacked domain value, got "
4704+
<< packedType;
47054705
}
47064706
return success();
47074707
}

mlir/test/Dialect/Linalg/invalid.mlir

Lines changed: 23 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1825,26 +1825,46 @@ func.func @unpack_invalid_outer_dims_perm(%source: tensor<128x256xf32>, %dest: t
18251825

18261826
// -----
18271827

1828+
func.func @pack_with_artificial_padding(%input: tensor<9xf32>, %output: tensor<3x8xf32>) -> tensor<3x8xf32> {
1829+
%cst = arith.constant 0.0 : f32
1830+
// expected-error@+1 {{expected 'tensor<2x8xf32>' for the unpacked domain value, got 'tensor<3x8xf32>'}}
1831+
%0 = linalg.pack %input padding_value(%cst : f32) inner_dims_pos = [0]
1832+
inner_tiles = [8] into %output
1833+
: tensor<9xf32> -> tensor<3x8xf32>
1834+
return %0 : tensor<3x8xf32>
1835+
}
1836+
1837+
// -----
1838+
18281839
// The outer dims in the output tensor are incorrectly/unexpectedly transposed.
18291840
// This could be fixed by adding `outer_dims_perm = [1, 0]` (the default value assumes no transpose).
18301841
func.func @pack_invalid_result_shape(%input: tensor<256x128xf32>, %output: tensor<4x16x32x16xf32>) -> tensor<4x16x32x16xf32> {
1831-
// expected-error@+1 {{the shape of unpacked domain value is not large enough to hold the packed data. Expected at least 'tensor<16x4x32x16xf32>', got 'tensor<4x16x32x16xf32>'}}
1842+
// expected-error@+1 {{expected 'tensor<16x4x32x16xf32>' for the unpacked domain value, got 'tensor<4x16x32x16xf32>'}}
18321843
%0 = linalg.pack %input inner_dims_pos = [1, 0] inner_tiles = [32, 16] into %output : tensor<256x128xf32> -> tensor<4x16x32x16xf32>
18331844
return %0 : tensor<4x16x32x16xf32>
18341845
}
18351846

18361847
// -----
18371848

18381849
func.func @pack_invalid(%input: tensor<256x128xf32>, %output: tensor<8x7x16x32xf32>) -> tensor<8x7x16x32xf32> {
1839-
// expected-error@+1 {{the shape of unpacked domain value is not large enough to hold the packed data. Expected at least 'tensor<8x8x16x32xf32>', got 'tensor<8x7x16x32xf32>'}}
1850+
// expected-error@+1 {{expected 'tensor<8x8x16x32xf32>' for the unpacked domain value, got 'tensor<8x7x16x32xf32>'}}
18401851
%0 = linalg.pack %input inner_dims_pos = [1, 0] inner_tiles = [16, 32] into %output : tensor<256x128xf32> -> tensor<8x7x16x32xf32>
18411852
return %0 : tensor<8x7x16x32xf32>
18421853
}
18431854

18441855
// -----
18451856

1857+
func.func @unpack_with_slicing_tiles(%input: tensor<3x8xf32>, %output: tensor<9xf32>) -> tensor<9xf32> {
1858+
// expected-error@+1 {{expected 'tensor<2x8xf32>' for the unpacked domain value, got 'tensor<3x8xf32>'}}
1859+
%0 = linalg.unpack %input inner_dims_pos = [0] inner_tiles = [8] into %output
1860+
: tensor<3x8xf32> -> tensor<9xf32>
1861+
return %0 : tensor<9xf32>
1862+
}
1863+
1864+
// -----
1865+
18461866
func.func @unpack_invalid(%output: tensor<256x128xf32>, %input: tensor<8x8x4x32xf32>) -> tensor<256x128xf32> {
1847-
// expected-error@+1 {{the shape of unpacked domain value is not large enough to hold the packed data. Expected at least 'tensor<8x32x4x32xf32>', got 'tensor<8x8x4x32xf32>'}}
1867+
// expected-error@+1 {{expected 'tensor<8x32x4x32xf32>' for the unpacked domain value, got 'tensor<8x8x4x32xf32>'}}
18481868
%0 = linalg.unpack %input inner_dims_pos = [1, 0] inner_tiles = [4, 32] into %output : tensor<8x8x4x32xf32> -> tensor<256x128xf32>
18491869
return %0 : tensor<256x128xf32>
18501870
}

0 commit comments

Comments
 (0)