From f32a4e67bb8eb825b156a9342d5d467f8e07fd00 Mon Sep 17 00:00:00 2001 From: Hariprasad Ravishankar Date: Mon, 27 Oct 2025 14:49:30 -0400 Subject: [PATCH] [TOSA] Notify failure during torch-to-tosa when AtenEmptyMemoryFormat receives a tensor with a dimension of size zero --- lib/Conversion/TorchToTosa/TorchToTosa.cpp | 5 +++++ test/Conversion/TorchToTosa/basic.mlir | 14 ++++++++++++++ 2 files changed, 19 insertions(+) diff --git a/lib/Conversion/TorchToTosa/TorchToTosa.cpp b/lib/Conversion/TorchToTosa/TorchToTosa.cpp index 0bc93f711ad6..50056bba5b96 100644 --- a/lib/Conversion/TorchToTosa/TorchToTosa.cpp +++ b/lib/Conversion/TorchToTosa/TorchToTosa.cpp @@ -6435,6 +6435,11 @@ class ConvertAtenConstPatternOp : public OpConversionPattern { for (auto s : shape) size *= s; + if (size == 0) { + return rewriter.notifyMatchFailure( + op, "Shape must not have a dimension of size zero"); + } + SmallVector values(size, fillVal); auto constOp = tosa::getConstTensor(rewriter, op, values, shape).value(); diff --git a/test/Conversion/TorchToTosa/basic.mlir b/test/Conversion/TorchToTosa/basic.mlir index d100fe9dcfde..4eee8987f089 100644 --- a/test/Conversion/TorchToTosa/basic.mlir +++ b/test/Conversion/TorchToTosa/basic.mlir @@ -4370,3 +4370,17 @@ func.func @torch.aten.linear$f16(%arg0: !torch.vtensor<[2,4],f16>, %arg1: !torch %0 = torch.aten.linear %arg0, %arg1, %arg2 : !torch.vtensor<[2,4],f16>, !torch.vtensor<[3,4],f16>, !torch.vtensor<[3],f16> -> !torch.vtensor<[2,3],f16> return %0 : !torch.vtensor<[2,3],f16> } + +// ----- +func.func @torch.aten.empty.memory_format() -> !torch.vtensor<[1,0,256],f32>{ + %c1 = torch.constant.int 1 + %c0 = torch.constant.int 0 + %c256 = torch.constant.int 256 + %2452 = torch.prim.ListConstruct %c1, %c0, %c256 : (!torch.int, !torch.int, !torch.int) -> !torch.list + %none = torch.constant.none + %cpu = torch.constant.device "cpu" + %false = torch.constant.bool false + // expected-error @below {{failed to legalize operation 'torch.aten.empty.memory_format' that was explicitly marked illegal}} + %out = torch.aten.empty.memory_format %2452, %none, %none, %cpu, %false, %none : !torch.list, !torch.none, !torch.none, !torch.Device, !torch.bool, !torch.none -> !torch.vtensor<[1,0,256],f32> + return %out : !torch.vtensor<[1,0,256],f32> +}