From c369bdd81b59ab3d7f143485ef8ecd535830f40a Mon Sep 17 00:00:00 2001 From: Tina Jung Date: Mon, 4 Sep 2023 09:09:34 +0100 Subject: [PATCH 1/4] Support for zero-sized dimensions in aten.empty.memory_format --- lib/Conversion/TorchToTosa/TorchToTosa.cpp | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/lib/Conversion/TorchToTosa/TorchToTosa.cpp b/lib/Conversion/TorchToTosa/TorchToTosa.cpp index 4e19c700482b..12722435c2c0 100644 --- a/lib/Conversion/TorchToTosa/TorchToTosa.cpp +++ b/lib/Conversion/TorchToTosa/TorchToTosa.cpp @@ -5393,6 +5393,12 @@ LogicalResult ConvertAtenOp::matchAndRewrite( typeConverter->convertType(op.getType()).template cast(); DenseElementsAttr emptyVal; + // Create an empty tensor if all dimensions are zero + if (llvm::all_of(resultType.getShape(), [](int dimSize){ return dimSize == 0;})) { + rewriter.replaceOpWithNewOp(op, resultType, emptyVal); + return success(); + } + if (op.getDtype().getType().template isa()) { emptyVal = DenseFPElementsAttr::get(resultType, {0.0F}); } else { From 0fe676d878a90a9d998dbbe47a87879012965d06 Mon Sep 17 00:00:00 2001 From: Tina Jung Date: Wed, 6 Sep 2023 14:36:51 +0200 Subject: [PATCH 2/4] Add match failure for 0-sized tensor dimensions --- lib/Conversion/TorchToTosa/TorchToTosa.cpp | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/lib/Conversion/TorchToTosa/TorchToTosa.cpp b/lib/Conversion/TorchToTosa/TorchToTosa.cpp index 12722435c2c0..2b639d9522b4 100644 --- a/lib/Conversion/TorchToTosa/TorchToTosa.cpp +++ b/lib/Conversion/TorchToTosa/TorchToTosa.cpp @@ -5392,13 +5392,15 @@ LogicalResult ConvertAtenOp::matchAndRewrite( auto resultType = typeConverter->convertType(op.getType()).template cast(); - DenseElementsAttr emptyVal; - // Create an empty tensor if all dimensions are zero - if (llvm::all_of(resultType.getShape(), [](int dimSize){ return dimSize == 0;})) { - rewriter.replaceOpWithNewOp(op, resultType, emptyVal); - return success(); + // TOSA does not allow empty dimensions, so we can't lower this while + // preserving the shape. + if (llvm::any_of(resultType.getShape(), + [](int dimSize) { return dimSize == 0; })) { + return rewriter.notifyMatchFailure( + op, "Cannot lower tensors with 0-sized dimensions to TOSA."); } + DenseElementsAttr emptyVal; if (op.getDtype().getType().template isa()) { emptyVal = DenseFPElementsAttr::get(resultType, {0.0F}); } else { From 2656928332241844f2bd8976ebd3f4697c1b20de Mon Sep 17 00:00:00 2001 From: Tina Jung Date: Fri, 8 Sep 2023 16:36:13 +0200 Subject: [PATCH 3/4] Add test case where some dimensions are zero --- e2e_testing/xfail_sets.py | 1 + .../test_suite/constant_alloc.py | 19 +++++++++++++++++++ 2 files changed, 20 insertions(+) diff --git a/e2e_testing/xfail_sets.py b/e2e_testing/xfail_sets.py index 204632619deb..71d07cf68b7a 100644 --- a/e2e_testing/xfail_sets.py +++ b/e2e_testing/xfail_sets.py @@ -696,6 +696,7 @@ "EmptyModule_falsePinMemory", "EmptyModule_int", "EmptyModule_float", + "EmptyModule_sizeZeroDim", "NewEmptyModuleBool_basic", "NewEmptyModuleDefaultDtype_basic", "NewEmptyModuleFalsePinMemory_basic", diff --git a/python/torch_mlir_e2e_test/test_suite/constant_alloc.py b/python/torch_mlir_e2e_test/test_suite/constant_alloc.py index 1b92c8f17135..b3bd2b08287a 100644 --- a/python/torch_mlir_e2e_test/test_suite/constant_alloc.py +++ b/python/torch_mlir_e2e_test/test_suite/constant_alloc.py @@ -308,6 +308,25 @@ def EmptyModule_falsePinMemory(module, tu: TestUtils): module.forward() +class EmptySizeZeroDimTensorModule(torch.nn.Module): + + def __init__(self): + super().__init__() + + @export + @annotate_args([ + None, + ]) + def forward(self): + return torch.empty((3, 0, 4), + memory_format=torch.contiguous_format) + + +@register_test_case(module_factory=lambda: EmptySizeZeroDimTensorModule()) +def EmptyModule_sizeZeroDim(module, tu: TestUtils): + module.forward() + + # ============================================================================== From bf896be9d330e70852df449c0de80732f858bd87 Mon Sep 17 00:00:00 2001 From: Tina Jung Date: Mon, 11 Sep 2023 14:22:39 +0200 Subject: [PATCH 4/4] Update fail sets to account for stable release --- e2e_testing/xfail_sets.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/e2e_testing/xfail_sets.py b/e2e_testing/xfail_sets.py index 71d07cf68b7a..aedcb113330b 100644 --- a/e2e_testing/xfail_sets.py +++ b/e2e_testing/xfail_sets.py @@ -302,6 +302,12 @@ "FakeQuantizePerTensorAffineCachemaskModule_basic", } +# Tests that pass on nightly already, but are still failing on latest stable. +if torch_version_for_comparison() < version.parse("2.1.0.dev"): + TORCHDYNAMO_XFAIL_SET.union({ + "EmptyModule_sizeZeroDim", + }) + TORCHDYNAMO_CRASHING_SET = { # No upstream decompositions. # %6:4 = torch.operator "aten._embedding_bag_forward_only"(%1, %3, %5, %false, %int0, %false, %none, %false, %int-1) : (!torch.tensor<*,f32>, !torch.tensor<*,si64>, !torch.tensor<*,si64>, !torch.bool, !torch.int, !torch.bool, !torch.none, !torch.bool, !torch.int) -> (!torch.tensor, !torch.tensor, !torch.tensor, !torch.tensor)