diff --git a/backends/arm/scripts/parse_test_names.py b/backends/arm/scripts/parse_test_names.py index 8aabf7c2c59..46cf3e17a73 100644 --- a/backends/arm/scripts/parse_test_names.py +++ b/backends/arm/scripts/parse_test_names.py @@ -5,7 +5,15 @@ from executorch.exir.dialects.edge.spec.utils import SAMPLE_INPUT # Add edge ops which we lower but which are not included in exir/dialects/edge/edge.yaml here. -CUSTOM_EDGE_OPS = ["linspace.default", "eye.default"] +CUSTOM_EDGE_OPS = [ + "linspace.default", + "eye.default", + "hardsigmoid.default", + "hardswish.default", + "linear.default", + "maximum.default", + "adaptive_avg_pool2d.default", +] ALL_EDGE_OPS = SAMPLE_INPUT.keys() | CUSTOM_EDGE_OPS # Add all targets and TOSA profiles we support here. diff --git a/backends/arm/test/common.py b/backends/arm/test/common.py index 57606e51f47..3f90c8c056c 100644 --- a/backends/arm/test/common.py +++ b/backends/arm/test/common.py @@ -259,17 +259,15 @@ def decorator_func(func): raise RuntimeError( "xfail info needs to be str, or tuple[str, type[Exception]]" ) - pytest_param = pytest.param( - test_parameters, - id=id, - marks=pytest.mark.xfail( - reason=reason, raises=raises, strict=strict - ), + # Set up our fail marker + marker = ( + pytest.mark.xfail(reason=reason, raises=raises, strict=strict), ) else: - pytest_param = pytest.param(test_parameters, id=id) - pytest_testsuite.append(pytest_param) + marker = () + pytest_param = pytest.param(test_parameters, id=id, marks=marker) + pytest_testsuite.append(pytest_param) return pytest.mark.parametrize(arg_name, pytest_testsuite)(func) return decorator_func diff --git a/backends/arm/test/conftest.py b/backends/arm/test/conftest.py index db097e9d7d9..2d247f7bd42 100644 --- a/backends/arm/test/conftest.py +++ b/backends/arm/test/conftest.py @@ -12,12 +12,6 @@ import pytest -try: - import tosa_tools.v0_80.tosa_reference_model as tosa_reference_model -except ImportError: - logging.warning("tosa_reference_model not found, can't run reference model tests") - tosa_reference_model = None - """ This file contains the pytest hooks, fixtures etc. for the Arm test suite. """ @@ -50,10 +44,11 @@ def pytest_configure(config): if getattr(config.option, "fast_fvp", False): pytest._test_options["fast_fvp"] = config.option.fast_fvp # type: ignore[attr-defined] - # TODO: remove this flag once we have a way to run the reference model tests with Buck - pytest._test_options["tosa_ref_model"] = False # type: ignore[attr-defined] - if tosa_reference_model is not None: - pytest._test_options["tosa_ref_model"] = True # type: ignore[attr-defined] + if config.option.arm_run_tosa_version: + pytest._test_options["tosa_version"] = config.option.arm_run_tosa_version + + pytest._test_options["tosa_ref_model"] = True # type: ignore[attr-defined] + logging.basicConfig(level=logging.INFO, stream=sys.stdout) @@ -76,6 +71,7 @@ def try_addoption(*args, **kwargs): nargs="+", help="List of two files. Firstly .pt file. Secondly .json", ) + try_addoption("--arm_run_tosa_version", action="store", default="0.80") def pytest_sessionstart(session): diff --git a/backends/arm/test/ops/test_abs.py b/backends/arm/test/ops/test_abs.py index 481c7d5ed0d..ed7e616e946 100644 --- a/backends/arm/test/ops/test_abs.py +++ b/backends/arm/test/ops/test_abs.py @@ -1,125 +1,68 @@ # Copyright (c) Meta Platforms, Inc. and affiliates. -# Copyright 2025 Arm Limited and/or its affiliates. # All rights reserved. +# Copyright 2025 Arm Limited and/or its affiliates. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. -import unittest from typing import Tuple -import pytest - import torch -from executorch.backends.arm.test import common, conftest -from executorch.backends.arm.test.tester.arm_tester import ArmTester -from executorch.exir.backend.compile_spec_schema import CompileSpec -from parameterized import parameterized - - -class TestAbs(unittest.TestCase): - class Abs(torch.nn.Module): - test_parameters = [ - (torch.zeros(5),), - (torch.full((5,), -1, dtype=torch.float32),), - (torch.ones(5) * -1,), - (torch.randn(8),), - (torch.randn(2, 3, 4),), - (torch.randn(1, 2, 3, 4),), - (torch.normal(mean=0, std=10, size=(2, 3, 4)),), - ] - - def forward(self, x): - return torch.abs(x) - - def _test_abs_tosa_MI_pipeline( - self, module: torch.nn.Module, test_data: Tuple[torch.Tensor] - ): - ( - ArmTester( - module, - example_inputs=test_data, - compile_spec=common.get_tosa_compile_spec("TOSA-0.80+MI"), - ) - .export() - .check_count({"torch.ops.aten.abs.default": 1}) - .check_not(["torch.ops.quantized_decomposed"]) - .to_edge() - .partition() - .check_not(["torch.ops.aten.abs.default"]) - .check_count({"torch.ops.higher_order.executorch_call_delegate": 1}) - .to_executorch() - .run_method_and_compare_outputs(inputs=test_data) - ) - - def _test_abs_tosa_BI_pipeline( - self, module: torch.nn.Module, test_data: Tuple[torch.Tensor] - ): - ( - ArmTester( - module, - example_inputs=test_data, - compile_spec=common.get_tosa_compile_spec("TOSA-0.80+BI"), - ) - .quantize() - .export() - .check_count({"torch.ops.aten.abs.default": 1}) - .check(["torch.ops.quantized_decomposed"]) - .to_edge() - .partition() - .check_count({"torch.ops.higher_order.executorch_call_delegate": 1}) - .to_executorch() - .run_method_and_compare_outputs(inputs=test_data, qtol=1) - ) - - def _test_abs_ethosu_BI_pipeline( - self, - compile_spec: list[CompileSpec], - module: torch.nn.Module, - test_data: Tuple[torch.Tensor], - ): - tester = ( - ArmTester( - module, - example_inputs=test_data, - compile_spec=compile_spec, - ) - .quantize() - .export() - .check_count({"torch.ops.aten.abs.default": 1}) - .check(["torch.ops.quantized_decomposed"]) - .to_edge() - .partition() - .check_count({"torch.ops.higher_order.executorch_call_delegate": 1}) - .to_executorch() - .serialize() - ) - if conftest.is_option_enabled("corstone_fvp"): - tester.run_method_and_compare_outputs(qtol=1, inputs=test_data) - - @parameterized.expand(Abs.test_parameters) - def test_abs_tosa_MI(self, test_data: torch.Tensor): - test_data = (test_data,) - self._test_abs_tosa_MI_pipeline(self.Abs(), test_data) - - @parameterized.expand(Abs.test_parameters) - def test_abs_tosa_BI(self, test_data: torch.Tensor): - test_data = (test_data,) - self._test_abs_tosa_BI_pipeline(self.Abs(), test_data) - - @parameterized.expand(Abs.test_parameters) - @pytest.mark.corstone_fvp - def test_abs_u55_BI(self, test_data: torch.Tensor): - test_data = (test_data,) - self._test_abs_ethosu_BI_pipeline( - common.get_u55_compile_spec(), self.Abs(), test_data - ) - - @parameterized.expand(Abs.test_parameters) - @pytest.mark.corstone_fvp - def test_abs_u85_BI(self, test_data: torch.Tensor): - test_data = (test_data,) - self._test_abs_ethosu_BI_pipeline( - common.get_u85_compile_spec(), self.Abs(), test_data - ) +from executorch.backends.arm.test import common +from executorch.backends.arm.test.tester.test_pipeline import ( + EthosU55PipelineBI, + EthosU85PipelineBI, + TosaPipelineBI, + TosaPipelineMI, +) + +aten_op = "torch.ops.aten.abs.default" +exir_op = "executorch_exir_dialects_edge__ops_aten_abs_default" + +input_t1 = Tuple[torch.Tensor] # Input x + + +class Abs(torch.nn.Module): + test_parameters = { + "zeros": lambda: (torch.zeros(5),), + "full": lambda: (torch.full((5,), -1, dtype=torch.float32),), + "ones": lambda: (torch.ones(5) * -1,), + "randn_1d": lambda: (torch.randn(8),), + "randn_3d": lambda: (torch.randn(2, 3, 4),), + "randn_4d": lambda: (torch.randn(1, 2, 3, 4),), + "torch_normal": lambda: (torch.normal(mean=0, std=10, size=(2, 3, 4)),), + } + + def forward(self, x): + return torch.abs(x) + + +@common.parametrize("test_data", Abs.test_parameters) +def test_abs_tosa_MI(test_data: torch.Tensor): + pipeline = TosaPipelineMI[input_t1](Abs(), test_data(), aten_op, exir_op) + pipeline.run() + + +@common.parametrize("test_data", Abs.test_parameters) +def test_abs_tosa_BI(test_data: torch.Tensor): + pipeline = TosaPipelineBI[input_t1](Abs(), test_data(), aten_op, exir_op) + pipeline.run() + + +@common.parametrize("test_data", Abs.test_parameters) +@common.XfailIfNoCorstone300 +def test_abs_u55_BI(test_data: torch.Tensor): + pipeline = EthosU55PipelineBI[input_t1]( + Abs(), test_data(), aten_op, exir_op, run_on_fvp=True + ) + pipeline.run() + + +@common.parametrize("test_data", Abs.test_parameters) +@common.XfailIfNoCorstone320 +def test_abs_u85_BI(test_data: torch.Tensor): + pipeline = EthosU85PipelineBI[input_t1]( + Abs(), test_data(), aten_op, exir_op, run_on_fvp=True + ) + pipeline.run() diff --git a/backends/arm/test/ops/test_add.py b/backends/arm/test/ops/test_add.py index 486e53c5f03..67833576886 100644 --- a/backends/arm/test/ops/test_add.py +++ b/backends/arm/test/ops/test_add.py @@ -10,18 +10,18 @@ import torch from executorch.backends.arm.arm_backend import get_tosa_spec from executorch.backends.arm.quantizer import arm_quantizer -from executorch.backends.arm.test import common +from executorch.backends.arm.test import common, conftest from executorch.backends.arm.test.tester.test_pipeline import ( EthosU55PipelineBI, EthosU85PipelineBI, TosaPipelineBI, TosaPipelineMI, ) +from executorch.backends.arm.tosa_specification import TosaSpecification from executorch.backends.xnnpack.test.tester import Quantize from torch.ao.quantization.observer import HistogramObserver from torch.ao.quantization.quantizer import QuantizationSpec - aten_op = "torch.ops.aten.add.Tensor" exir_op = "executorch_exir_dialects_edge__ops_aten_add_Tensor" @@ -33,11 +33,11 @@ def forward(self, x: torch.Tensor): return x + x test_data: list[input_t1] = { - "5d_float": (torch.FloatTensor([1, 2, 3, 5, 7]),), - "1d_ones": ((3 * torch.ones(8),)), - "1d_randn": (10 * torch.randn(8),), - "4d_ones_1": (torch.ones(1, 1, 4, 4),), - "4d_ones_2": (torch.ones(1, 3, 4, 2),), + "5d_float": lambda: (torch.FloatTensor([1, 2, 3, 5, 7]),), + "1d_ones": lambda: ((3 * torch.ones(8),)), + "1d_randn": lambda: (10 * torch.randn(8),), + "4d_ones_1": lambda: (torch.ones(1, 1, 4, 4),), + "4d_ones_2": lambda: (torch.ones(1, 3, 4, 2),), } @@ -49,14 +49,17 @@ def forward(self, x: torch.Tensor, y: torch.Tensor): return x + y test_data: list[input_t2] = { - "5d_float": ( + "5d_float": lambda: ( torch.FloatTensor([1, 2, 3, 5, 7]), (torch.FloatTensor([2, 1, 2, 1, 10])), ), - "4d_ones": (torch.ones(1, 10, 4, 6), torch.ones(1, 10, 4, 6)), - "4d_randn_1": (torch.randn(1, 1, 4, 4), torch.ones(1, 1, 4, 1)), - "4d_randn_2": (torch.randn(1, 3, 4, 4), torch.randn(1, 3, 4, 4)), - "4d_randn_big": (10000 * torch.randn(1, 1, 4, 4), torch.randn(1, 1, 4, 1)), + "4d_ones": lambda: (torch.ones(1, 10, 4, 6), torch.ones(1, 10, 4, 6)), + "4d_randn_1": lambda: (torch.randn(1, 1, 4, 4), torch.ones(1, 1, 4, 1)), + "4d_randn_2": lambda: (torch.randn(1, 3, 4, 4), torch.randn(1, 3, 4, 4)), + "4d_randn_big": lambda: ( + 10000 * torch.randn(1, 1, 4, 4), + torch.randn(1, 1, 4, 1), + ), } @@ -65,31 +68,35 @@ def forward(self, x: torch.Tensor, y: torch.Tensor): return x + y test_data: list[input_t2] = { - "3d_randn_diff_rank": (torch.randn(1, 4, 5), torch.randn(4, 1)), - "4d_randn_diff_rank": (torch.randn(1, 1, 4, 4), torch.randn(4, 1)), - "4d_randn_diff_rank_2": (torch.randn(4, 1), torch.randn(1, 1, 4, 5)), + "3d_randn_diff_rank": lambda: (torch.randn(1, 4, 5), torch.randn(4, 1)), + "4d_randn_diff_rank": lambda: (torch.randn(1, 1, 4, 4), torch.randn(4, 1)), + "4d_randn_diff_rank_2": lambda: (torch.randn(4, 1), torch.randn(1, 1, 4, 5)), } @common.parametrize("test_data", Add.test_data) -def test_add_tosa_MI(test_data: input_t1): - pipeline = TosaPipelineMI[input_t1](Add(), test_data, aten_op, exir_op) +def test_add_tensor_tosa_MI(test_data: input_t1): + pipeline = TosaPipelineMI[input_t1](Add(), test_data(), aten_op, exir_op) pipeline.run() @common.parametrize("test_data", Add.test_data) -def test_add_tosa_BI(test_data: input_t1): - pipeline = TosaPipelineBI[input_t1](Add(), test_data, aten_op, exir_op) +def test_add_tensor_tosa_BI(test_data: input_t1): + pipeline = TosaPipelineBI[input_t1](Add(), test_data(), aten_op, exir_op) pipeline.run() @common.parametrize("test_data", Add.test_data) -def test_add_i32_tosa_BI(test_data: input_t1): - pipeline = TosaPipelineBI[input_t1](Add(), test_data, aten_op, exir_op) - +def test_add_tensor_tosa_BI_i32(test_data: input_t1): + pipeline = TosaPipelineBI[input_t1](Add(), test_data(), aten_op, exir_op) + tosa_version = conftest.get_option("tosa_version") + tosa_profiles = { + "0.80": TosaSpecification.create_from_string("TOSA-0.80+BI"), + "1.0": TosaSpecification.create_from_string("TOSA-1.0+INT"), + } # Create a quantizer with int8 quantization on the input and output but int32 on everything else. quantizer = arm_quantizer.TOSAQuantizer( - get_tosa_spec(common.get_tosa_compile_spec("TOSA-0.80+BI")) + get_tosa_spec(common.get_tosa_compile_spec(tosa_profiles[tosa_version])) ) quantizer.set_io(arm_quantizer.get_symmetric_quantization_config()) observer_options = {"eps": 2**-16} @@ -117,59 +124,59 @@ def test_add_i32_tosa_BI(test_data: input_t1): @common.parametrize("test_data", Add.test_data) @common.XfailIfNoCorstone300 -def test_add_u55_BI(test_data: input_t1): +def test_add_tensor_u55_BI(test_data: input_t1): pipeline = EthosU55PipelineBI[input_t1]( - Add(), test_data, aten_op, exir_op, run_on_fvp=True + Add(), test_data(), aten_op, exir_op, run_on_fvp=True ) pipeline.run() @common.parametrize("test_data", Add.test_data) @common.XfailIfNoCorstone320 -def test_add_u85_BI(test_data: input_t1): +def test_add_tensor_u85_BI(test_data: input_t1): pipeline = EthosU85PipelineBI[input_t1]( - Add(), test_data, aten_op, exir_op, run_on_fvp=True + Add(), test_data(), aten_op, exir_op, run_on_fvp=True ) pipeline.run() @common.parametrize("test_data", Add2.test_data) -def test_add_2_tosa_MI(test_data: input_t2): - pipeline = TosaPipelineMI[input_t2](Add2(), test_data, aten_op, exir_op) +def test_add_tensor_tosa_MI_2(test_data: input_t2): + pipeline = TosaPipelineMI[input_t2](Add2(), test_data(), aten_op, exir_op) pipeline.run() @common.parametrize("test_data", Add3.test_data) -def test_add3_tosa_MI(test_data: input_t2): - pipeline = TosaPipelineMI[input_t2](Add3(), test_data, aten_op, exir_op) +def test_add_tensor_tosa_MI_3(test_data: input_t2): + pipeline = TosaPipelineMI[input_t2](Add3(), test_data(), aten_op, exir_op) pipeline.run() @common.parametrize("test_data", Add3.test_data) -def test_add3_tosa_BI(test_data: input_t2): - pipeline = TosaPipelineBI[input_t2](Add3(), test_data, aten_op, exir_op) +def test_add_tensor_tosa_BI_3(test_data: input_t2): + pipeline = TosaPipelineBI[input_t2](Add3(), test_data(), aten_op, exir_op) pipeline.run() @common.parametrize("test_data", Add2.test_data) -def test_add_2_tosa_BI(test_data: input_t2): - pipeline = TosaPipelineBI[input_t2](Add2(), test_data, aten_op, exir_op) +def test_add_tensor_tosa_BI_2(test_data: input_t2): + pipeline = TosaPipelineBI[input_t2](Add2(), test_data(), aten_op, exir_op) pipeline.run() @common.parametrize("test_data", Add2.test_data) @common.XfailIfNoCorstone300 -def test_add_2_u55_BI(test_data: input_t2): +def test_add_tensor_u55_BI_2(test_data: input_t2): pipeline = EthosU55PipelineBI[input_t2]( - Add2(), test_data, aten_op, exir_op, run_on_fvp=True + Add2(), test_data(), aten_op, exir_op, run_on_fvp=True ) pipeline.run() @common.parametrize("test_data", Add2.test_data) @common.XfailIfNoCorstone320 -def test_add_2_u85_BI(test_data: input_t2): +def test_add_tensor_u85_BI_2(test_data: input_t2): pipeline = EthosU85PipelineBI[input_t2]( - Add2(), test_data, aten_op, exir_op, run_on_fvp=True + Add2(), test_data(), aten_op, exir_op, run_on_fvp=True ) pipeline.run() diff --git a/backends/arm/test/ops/test_alias_copy.py b/backends/arm/test/ops/test_alias_copy.py index 66fa92bc445..44787fed950 100644 --- a/backends/arm/test/ops/test_alias_copy.py +++ b/backends/arm/test/ops/test_alias_copy.py @@ -30,10 +30,10 @@ class AliasCopy(torch.nn.Module): exir_op = "executorch_exir_dialects_edge__ops_aten_alias_copy_default" test_data: dict[input_t1] = { - "1d_ramp": (torch.arange(-16, 16, 0.2),), - "2d_ones": (torch.ones(5, 5),), - "3d_rand": (torch.rand(3, 5, 5),), - "4d_zeros": (torch.zeros(1, 10, 10, 10),), + "1d_ramp": lambda: (torch.arange(-16, 16, 0.2),), + "2d_ones": lambda: (torch.ones(5, 5),), + "3d_rand": lambda: (torch.rand(3, 5, 5),), + "4d_zeros": lambda: (torch.zeros(1, 10, 10, 10),), } def __init__(self): @@ -44,40 +44,40 @@ def forward(self, x: torch.Tensor): @common.parametrize("test_data", AliasCopy.test_data) -def test_alias_copy_tosa_MI(test_data: input_t1): +def test_alias_tosa_MI(test_data: input_t1): TosaPipelineMI[input_t1]( AliasCopy(), - test_data, + test_data(), AliasCopy.aten_op, AliasCopy.exir_op, ).run() @common.parametrize("test_data", AliasCopy.test_data) -def test_alias_copy_tosa_BI(test_data: input_t1): +def test_alias_tosa_BI(test_data: input_t1): TosaPipelineBI[input_t1]( AliasCopy(), - test_data, + test_data(), AliasCopy.aten_op, AliasCopy.exir_op, ).run() @common.parametrize("test_data", AliasCopy.test_data) -def test_alias_copy_u55_BI(test_data: input_t1): +def test_alias_u55_BI(test_data: input_t1): EthosU55PipelineBI[input_t1]( AliasCopy(), - test_data, + test_data(), AliasCopy.aten_op, AliasCopy.exir_op, ).run() @common.parametrize("test_data", AliasCopy.test_data) -def test_alias_copy_u85_BI(test_data: input_t1): +def test_alias_u85_BI(test_data: input_t1): EthosU85PipelineBI[input_t1]( AliasCopy(), - test_data, + test_data(), AliasCopy.aten_op, AliasCopy.exir_op, ).run() diff --git a/backends/arm/test/ops/test_amax.py b/backends/arm/test/ops/test_amax.py index b2639a5f108..0d1f4257b7b 100644 --- a/backends/arm/test/ops/test_amax.py +++ b/backends/arm/test/ops/test_amax.py @@ -30,11 +30,11 @@ def forward(self, x): return torch.amax(x, self.dim, self.keep_dims) test_data: Dict[str, input_t] = { - "rank_1_dim_0": ((torch.rand([10]),), 0, False), - "rank_2_dim_1_keep_dims": ((torch.rand([2, 2]),), (1,), True), - "rank_4_all_dim": ((torch.rand([1, 2, 5, 5]),), (0, 1, 2, 3), False), - "rank_4_0,3_keep_dims": ((torch.rand([1, 2, 2, 2]),), (0, 3), True), - "rank_4_mult_batches": ((torch.rand([2, 2, 2, 2]),), (0), True), + "rank_1_dim_0": lambda: ((torch.rand([10]),), 0, False), + "rank_2_dim_1_keep_dims": lambda: ((torch.rand([2, 2]),), (1,), True), + "rank_4_all_dim": lambda: ((torch.rand([1, 2, 5, 5]),), (0, 1, 2, 3), False), + "rank_4_0,3_keep_dims": lambda: ((torch.rand([1, 2, 2, 2]),), (0, 3), True), + "rank_4_mult_batches": lambda: ((torch.rand([2, 2, 2, 2]),), (0), True), } @@ -51,10 +51,10 @@ def forward(self, x): return x[0] test_data: Dict[str, input_t] = { - "rank_1_dim_0": ((torch.rand([10]),), 0), - "rank_2_dim_1": ((torch.rand([2, 2]),), 1), - "rank_4_dim_2": ((torch.rand([2, 2, 2, 2]),), 2), - "rank_4_dim_3": ((torch.rand([2, 2, 2, 2]),), 3), + "rank_1_dim_0": lambda: ((torch.rand([10]),), 0), + "rank_2_dim_1": lambda: ((torch.rand([2, 2]),), 1), + "rank_4_dim_2": lambda: ((torch.rand([2, 2, 2, 2]),), 2), + "rank_4_dim_3": lambda: ((torch.rand([2, 2, 2, 2]),), 3), } @@ -70,44 +70,26 @@ def forward(self, x): @common.parametrize("test_data", Amax.test_data) def test_amax_tosa_MI(test_data: Amax.input_t): - data, dim, keep_dims = test_data - pipeline = TosaPipelineMI[Amax.input_t]( - Amax(dim, keep_dims), - data, - Amax.aten_op, - ) + data, dim, keep_dims = test_data() + pipeline = TosaPipelineMI[Amax.input_t](Amax(dim, keep_dims), data, Amax.aten_op) pipeline.run() @common.parametrize("test_data", Amax.test_data) def test_amax_tosa_BI(test_data: Amax.input_t): - data, dim, keep_dims = test_data - pipeline = TosaPipelineBI[Amax.input_t]( - Amax(dim, keep_dims), - data, - Amax.aten_op, - ) + data, dim, keep_dims = test_data() + pipeline = TosaPipelineBI[Amax.input_t](Amax(dim, keep_dims), data, Amax.aten_op) pipeline.run() def test_amax_u55_BI_not_delegated(): - data, dim, keep_dims = Amax.test_data["rank_4_all_dim"] + data, dim, keep_dims = Amax.test_data["rank_4_all_dim"]() pipeline = OpNotSupportedPipeline[Amax.input_t]( Amax(dim, keep_dims), data, - "TOSA-0.80+BI+u55", {" executorch_exir_dialects_edge__ops_aten_amax_default": 1}, - ) - pipeline.run() - - -@common.parametrize("test_data", Amax.test_data) -def test_amax_u85_BI(test_data: Amax.input_t): - data, dim, keep_dims = test_data - pipeline = EthosU85PipelineBI[Amax.input_t]( - Amax(dim, keep_dims), - data, - Amax.aten_op, + quantize=True, + u55_subset=True, ) pipeline.run() @@ -116,50 +98,43 @@ def test_amax_u85_BI(test_data: Amax.input_t): @common.parametrize("test_data", Amax.test_data, fvp_xfails) -@common.SkipIfNoCorstone320 -def test_amax_u85_BI_on_fvp(test_data: Amax.input_t): - data, dim, keep_dims = test_data +@common.XfailIfNoCorstone320 +def test_amax_u85_BI(test_data: Amax.input_t): + data, dim, keep_dims = test_data() pipeline = EthosU85PipelineBI[Amax.input_t]( - Amax(dim, keep_dims), data, Amax.aten_op, run_on_fvp=True + Amax(dim, keep_dims), + data, + Amax.aten_op, + run_on_fvp=True, ) pipeline.run() @common.parametrize("test_data", Max.test_data) -def test_max_to_amax_MI(test_data: Max.input_t): - data, dim = test_data - pipeline = TosaPipelineMI[Max.input_t]( - Max(dim), - data, - "torch.ops.aten.max", - ) +def test_max_dim_tosa_MI_to_amax(test_data: Max.input_t): + data, dim = test_data() + pipeline = TosaPipelineMI[Max.input_t](Max(dim), data, "torch.ops.aten.max") pipeline.run() @common.parametrize("test_data", Max.test_data) -def test_max_to_amax_BI(test_data: Max.input_t): - data, dim = test_data +def test_max_dim_tosa_BI_to_amax(test_data: Max.input_t): + data, dim = test_data() module = Max(dim) - pipeline = TosaPipelineBI[Max.input_t]( - module, - data, - "torch.ops.aten.amax", - ) + pipeline = TosaPipelineBI[Max.input_t](module, data, "torch.ops.aten.amax") pipeline.run() @pytest.mark.xfail(reason="MLETORCH-718 : Quantization of indices in arm_quantizer") -def test_max_index_not_delegated_BI(): - data, dim = Max.test_data["rank_4_dim_3"] +def test_max_dim_tosa_BI_not_delegated(): + data, dim = Max.test_data()["rank_4_dim_3"]() pipeline = OpNotSupportedPipeline[Max.input_t]( - MaxWithIndex(dim), data, "TOSA-0.80+BI", {} + MaxWithIndex(dim), data, {}, quantize=True ) pipeline.run() -def test_max_index_not_delegated_MI(): - data, dim = Max.test_data["rank_4_dim_3"] - pipeline = OpNotSupportedPipeline[Max.input_t]( - MaxWithIndex(dim), data, "TOSA-0.80+MI", {} - ) +def test_max_dim_tosa_MI_not_delegated(): + data, dim = Max.test_data["rank_4_dim_3"]() + pipeline = OpNotSupportedPipeline[Max.input_t](MaxWithIndex(dim), data, {}) pipeline.run() diff --git a/backends/arm/test/ops/test_amin.py b/backends/arm/test/ops/test_amin.py index 092ed472bce..d83a5ee8839 100644 --- a/backends/arm/test/ops/test_amin.py +++ b/backends/arm/test/ops/test_amin.py @@ -31,11 +31,11 @@ def forward(self, x): return torch.amin(x, self.dim, self.keep_dims) test_data: Dict[str, input_t] = { - "rank_1_dim_0": ((torch.rand([10]),), 0, False), - "rank_2_dim_1_keep_dims": ((torch.rand([2, 2]),), (1,), True), - "rank_4_all_dim": ((torch.rand([1, 2, 5, 5]),), (0, 1, 2, 3), False), - "rank_4_0,3_keep_dims": ((torch.rand([1, 2, 2, 2]),), (0, 3), True), - "rank_4_mult_batches": ((torch.rand([2, 2, 2, 2]),), (0), True), + "rank_1_dim_0": lambda: ((torch.rand([10]),), 0, False), + "rank_2_dim_1_keep_dims": lambda: ((torch.rand([2, 2]),), (1,), True), + "rank_4_all_dim": lambda: ((torch.rand([1, 2, 5, 5]),), (0, 1, 2, 3), False), + "rank_4_0,3_keep_dims": lambda: ((torch.rand([1, 2, 2, 2]),), (0, 3), True), + "rank_4_mult_batches": lambda: ((torch.rand([2, 2, 2, 2]),), (0), True), } @@ -52,10 +52,10 @@ def forward(self, x): return x[0] test_data: Dict[str, input_t] = { - "rank_1_dim_0": ((torch.rand([10]),), 0), - "rank_2_dim_1": ((torch.rand([2, 2]),), 1), - "rank_4_dim_2": ((torch.rand([2, 2, 2, 2]),), 2), - "rank_4_dim_3": ((torch.rand([2, 2, 2, 2]),), 3), + "rank_1_dim_0": lambda: ((torch.rand([10]),), 0), + "rank_2_dim_1": lambda: ((torch.rand([2, 2]),), 1), + "rank_4_dim_2": lambda: ((torch.rand([2, 2, 2, 2]),), 2), + "rank_4_dim_3": lambda: ((torch.rand([2, 2, 2, 2]),), 3), } @@ -71,7 +71,7 @@ def forward(self, x): @common.parametrize("test_data", Amin.test_data) def test_amin_tosa_MI(test_data: Amin.input_t): - data, dim, keep_dims = test_data + data, dim, keep_dims = test_data() pipeline = TosaPipelineMI[Amin.input_t]( Amin(dim, keep_dims), data, @@ -82,7 +82,7 @@ def test_amin_tosa_MI(test_data: Amin.input_t): @common.parametrize("test_data", Amin.test_data) def test_amin_tosa_BI(test_data: Amin.input_t): - data, dim, keep_dims = test_data + data, dim, keep_dims = test_data() pipeline = TosaPipelineBI[Amin.input_t]( Amin(dim, keep_dims), data, @@ -92,23 +92,13 @@ def test_amin_tosa_BI(test_data: Amin.input_t): def test_amin_u55_BI_not_delegated(): - data, dim, keep_dims = Amin.test_data["rank_4_all_dim"] + data, dim, keep_dims = Amin.test_data["rank_4_all_dim"]() pipeline = OpNotSupportedPipeline[Amin.input_t]( Amin(dim, keep_dims), data, - "TOSA-0.80+BI+u55", {" executorch_exir_dialects_edge__ops_aten_amin_default": 1}, - ) - pipeline.run() - - -@common.parametrize("test_data", Amin.test_data) -def test_amin_u85_BI(test_data: Amin.input_t): - data, dim, keep_dims = test_data - pipeline = EthosU85PipelineBI[Amin.input_t]( - Amin(dim, keep_dims), - data, - Amin.aten_op, + quantize=True, + u55_subset=True, ) pipeline.run() @@ -117,50 +107,46 @@ def test_amin_u85_BI(test_data: Amin.input_t): @common.parametrize("test_data", Amin.test_data, fvp_xfails) -@common.SkipIfNoCorstone320 -def test_amin_u85_BI_on_fvp(test_data: Amin.input_t): - data, dim, keep_dims = test_data +@common.XfailIfNoCorstone320 +def test_amin_u85_BI(test_data: Amin.input_t): + data, dim, keep_dims = test_data() pipeline = EthosU85PipelineBI[Amin.input_t]( - Amin(dim, keep_dims), data, Amin.aten_op, run_on_fvp=True + Amin(dim, keep_dims), + data, + Amin.aten_op, + run_on_fvp=True, ) pipeline.run() @common.parametrize("test_data", Min.test_data) -def test_min_to_amin_MI(test_data: Min.input_t): - data, dim = test_data - pipeline = TosaPipelineMI[Min.input_t]( - Min(dim), - data, - "torch.ops.aten.min", - ) +def test_min_dim_tosa_MI_to_amin(test_data: Min.input_t): + data, dim = test_data() + pipeline = TosaPipelineMI[Min.input_t](Min(dim), data, "torch.ops.aten.min") pipeline.run() @common.parametrize("test_data", Min.test_data) -def test_min_to_amin_BI(test_data: Min.input_t): - data, dim = test_data +def test_min_dim_tosa_BI_to_amin(test_data: Min.input_t): + data, dim = test_data() module = Min(dim) - pipeline = TosaPipelineBI[Min.input_t]( - module, - data, - "torch.ops.aten.amin", - ) + pipeline = TosaPipelineBI[Min.input_t](module, data, "torch.ops.aten.amin") pipeline.run() @pytest.mark.xfail(reason="MLETORCH-718 : Quantization of indices in arm_quantizer") -def test_max_index_not_delegated_BI(): - data, dim = Min.test_data["rank_4_dim_3"] +def test_min_dim_tosa_BI_not_delegated(): + data, dim = Min.test_data["rank_4_dim_3"]() pipeline = OpNotSupportedPipeline[Min.input_t]( - MinWithIndex(dim), data, "TOSA-0.80+BI", {} + MinWithIndex(dim), + data, + {}, + quantize=True, ) pipeline.run() -def test_max_index_not_delegated_MI(): - data, dim = Min.test_data["rank_4_dim_3"] - pipeline = OpNotSupportedPipeline[Min.input_t]( - MinWithIndex(dim), data, "TOSA-0.80+MI", {} - ) +def test_min_dim_tosa_MI_not_delegated(): + data, dim = Min.test_data["rank_4_dim_3"]() + pipeline = OpNotSupportedPipeline[Min.input_t](MinWithIndex(dim), data, {}) pipeline.run() diff --git a/backends/arm/test/ops/test_any.py b/backends/arm/test/ops/test_any.py index b5de87061ea..6ddef1ad0b5 100644 --- a/backends/arm/test/ops/test_any.py +++ b/backends/arm/test/ops/test_any.py @@ -45,90 +45,94 @@ def forward(self, x: torch.Tensor): test_input: dict[input_t1] = { - "rank1": (torch.tensor([True, False, False], dtype=torch.bool), 0, True), - "rank1_squeeze": (torch.tensor([True, False, False], dtype=torch.bool), -1, False), - "rank2": ( + "rank1": lambda: (torch.tensor([True, False, False], dtype=torch.bool), 0, True), + "rank1_squeeze": lambda: ( + torch.tensor([True, False, False], dtype=torch.bool), + -1, + False, + ), + "rank2": lambda: ( torch.randint(0, 2, (2, 3), dtype=torch.bool), 0, True, ), - "rank2_squeeze": ( + "rank2_squeeze": lambda: ( torch.randint(0, 2, (2, 3), dtype=torch.bool), 0, False, ), - "rank2_dims": ( + "rank2_dims": lambda: ( torch.randint(0, 2, (2, 3), dtype=torch.bool), [0, 1], True, ), - "rank2_dims_squeeze": ( + "rank2_dims_squeeze": lambda: ( torch.randint(0, 2, (2, 3), dtype=torch.bool), [-2, 1], False, ), - "rank3_dims_squeeze": ( + "rank3_dims_squeeze": lambda: ( torch.randint(0, 2, (6, 8, 10), dtype=torch.bool), [1, 2], False, ), - "rank4": ( + "rank4": lambda: ( torch.randint(0, 2, (1, 6, 8, 10), dtype=torch.bool), 1, True, ), - "rank4_squeeze": ( + "rank4_squeeze": lambda: ( torch.randint(0, 2, (1, 6, 8, 10), dtype=torch.bool), 1, False, ), - "rank4_dims": ( + "rank4_dims": lambda: ( torch.randint(0, 2, (1, 6, 8, 10), dtype=torch.bool), [0, 2], True, ), - "rank4_dims_squeeze": ( + "rank4_dims_squeeze": lambda: ( torch.randint(0, 2, (1, 6, 8, 10), dtype=torch.bool), [1, -1], False, ), - "rank1_reduce_all": (torch.tensor([True, False, False], dtype=torch.bool),), - "rank2_reduce_all": (torch.randint(0, 2, (2, 3), dtype=torch.bool),), - "rank3_reduce_all": (torch.randint(0, 2, (6, 8, 10), dtype=torch.bool),), - "rank4_reduce_all": (torch.randint(0, 2, (1, 6, 8, 10), dtype=torch.bool),), + "rank1_reduce_all": lambda: (torch.tensor([True, False, False], dtype=torch.bool),), + "rank2_reduce_all": lambda: (torch.randint(0, 2, (2, 3), dtype=torch.bool),), + "rank3_reduce_all": lambda: (torch.randint(0, 2, (6, 8, 10), dtype=torch.bool),), + "rank4_reduce_all": lambda: (torch.randint(0, 2, (1, 6, 8, 10), dtype=torch.bool),), } test_data = { - "any_rank1": (AnyDim(), test_input["rank1"]), - "any_rank1_squeeze": (AnyDim(), test_input["rank1_squeeze"]), - "any_rank2": (AnyDim(), test_input["rank2"]), - "any_rank2_squeeze": (AnyDim(), test_input["rank2_squeeze"]), - "any_rank2_dims": (AnyDims(), test_input["rank2_dims"]), - "any_rank2_dims_squeeze": (AnyDims(), test_input["rank2_dims_squeeze"]), - "any_rank3_dims_squeeze": (AnyDims(), test_input["rank3_dims_squeeze"]), - "any_rank4": (AnyDim(), test_input["rank4"]), - "any_rank4_squeeze": (AnyDim(), test_input["rank4_squeeze"]), - "any_rank4_dims": (AnyDims(), test_input["rank4_dims"]), - "any_rank4_dims_squeeze": (AnyDims(), test_input["rank4_dims_squeeze"]), - "any_rank1_reduce_all": (AnyReduceAll(), test_input["rank1_reduce_all"]), - "any_rank2_reduce_all": (AnyReduceAll(), test_input["rank2_reduce_all"]), - "any_rank3_reduce_all": (AnyReduceAll(), test_input["rank3_reduce_all"]), - "any_rank4_reduce_all": (AnyReduceAll(), test_input["rank4_reduce_all"]), + "any_rank1": lambda: (AnyDim(), test_input["rank1"]), + "any_rank1_squeeze": lambda: (AnyDim(), test_input["rank1_squeeze"]), + "any_rank2": lambda: (AnyDim(), test_input["rank2"]), + "any_rank2_squeeze": lambda: (AnyDim(), test_input["rank2_squeeze"]), + "any_rank2_dims": lambda: (AnyDims(), test_input["rank2_dims"]), + "any_rank2_dims_squeeze": lambda: (AnyDims(), test_input["rank2_dims_squeeze"]), + "any_rank3_dims_squeeze": lambda: (AnyDims(), test_input["rank3_dims_squeeze"]), + "any_rank4": lambda: (AnyDim(), test_input["rank4"]), + "any_rank4_squeeze": lambda: (AnyDim(), test_input["rank4_squeeze"]), + "any_rank4_dims": lambda: (AnyDims(), test_input["rank4_dims"]), + "any_rank4_dims_squeeze": lambda: (AnyDims(), test_input["rank4_dims_squeeze"]), + "any_rank1_reduce_all": lambda: (AnyReduceAll(), test_input["rank1_reduce_all"]), + "any_rank2_reduce_all": lambda: (AnyReduceAll(), test_input["rank2_reduce_all"]), + "any_rank3_reduce_all": lambda: (AnyReduceAll(), test_input["rank3_reduce_all"]), + "any_rank4_reduce_all": lambda: (AnyReduceAll(), test_input["rank4_reduce_all"]), } @common.parametrize("test_data", test_data) def test_any_tosa_MI(test_data: input_t1): - op, test_input = test_data - pipeline = TosaPipelineMI[input_t1](op, test_input, op.aten_op, op.exir_op) + op, test_input = test_data() + pipeline = TosaPipelineMI[input_t1](op, test_input(), op.aten_op, op.exir_op) pipeline.run() @common.parametrize("test_data", test_data) def test_any_tosa_BI(test_data: input_t1): - op, test_input = test_data - pipeline = TosaPipelineBI[input_t1](op, test_input, op.aten_op, op.exir_op) + op, test_input = test_data() + pipeline = TosaPipelineBI[input_t1](op, test_input(), op.aten_op, op.exir_op) pipeline.pop_stage("quantize") pipeline.pop_stage("check.quant_nodes") pipeline.run() @@ -137,9 +141,13 @@ def test_any_tosa_BI(test_data: input_t1): @common.parametrize("test_data", test_data) def test_any_u55_BI(test_data: input_t1): # Tests that we don't delegate these ops since they are not supported on U55. - op, test_input = test_data + op, test_input = test_data() pipeline = OpNotSupportedPipeline[input_t1]( - op, test_input, "TOSA-0.80+BI+u55", {op.exir_op: 1} + op, + test_input(), + {op.exir_op: 1}, + quantize=True, + u55_subset=True, ) pipeline.run() @@ -148,9 +156,13 @@ def test_any_u55_BI(test_data: input_t1): @pytest.mark.xfail(reason="MLETORCH-706: Support ScalarType::Bool in EthosUBackend.") @common.XfailIfNoCorstone320 def test_any_u85_BI(test_data: input_t1): - op, test_input = test_data + op, test_input = test_data() pipeline = EthosU85PipelineBI[input_t1]( - op, test_input, op.aten_op, op.exir_op, run_on_fvp=True + op, + test_input(), + op.aten_op, + op.exir_op, + run_on_fvp=True, ) pipeline.pop_stage("quantize") pipeline.pop_stage("check.quant_nodes") diff --git a/backends/arm/test/ops/test_arange.py b/backends/arm/test/ops/test_arange.py index 124f3ee597e..cb5f329a7f9 100644 --- a/backends/arm/test/ops/test_arange.py +++ b/backends/arm/test/ops/test_arange.py @@ -54,16 +54,22 @@ def forward(self, x: torch.Tensor) -> torch.Tensor: def test_arange_start_step_tosa_MI(test_data: test_data_t): input_data, init_data = test_data pipeline = TosaPipelineMI[input_t]( - ArangeAdd(*init_data), input_data(), ArangeAdd.aten_op, ArangeAdd.exir_op + ArangeAdd(*init_data), + input_data(), + ArangeAdd.aten_op, + ArangeAdd.exir_op, ) pipeline.run() @common.parametrize("test_data", ArangeAdd.test_data_dtypes) -def test_arange_start_step_dtypes_tosa_MI(test_data: test_data_t): +def test_arange_start_step_tosa_MI_dtypes(test_data: test_data_t): input_data, init_data = test_data pipeline = TosaPipelineMI[input_t]( - ArangeAdd(*init_data), input_data(), ArangeAdd.aten_op, ArangeAdd.exir_op + ArangeAdd(*init_data), + input_data(), + ArangeAdd.aten_op, + ArangeAdd.exir_op, ) pipeline.run() @@ -72,27 +78,34 @@ def test_arange_start_step_dtypes_tosa_MI(test_data: test_data_t): def test_arange_start_step_tosa_BI(test_data: test_data_t): input_data, init_data = test_data pipeline = TosaPipelineBI[input_t]( - ArangeAdd(*init_data), input_data(), ArangeAdd.aten_op, ArangeAdd.exir_op + ArangeAdd(*init_data), + input_data(), + ArangeAdd.aten_op, + ArangeAdd.exir_op, ) pipeline.pop_stage("check.quant_nodes") pipeline.run() @common.parametrize("test_data", ArangeAdd.test_data) -def test_arange_start_step_tosa_u55(test_data: test_data_t): +def test_arange_start_step_u55_BI(test_data: test_data_t): input_data, init_data = test_data pipeline = EthosU55PipelineBI[input_t]( - ArangeAdd(*init_data), input_data(), ArangeAdd.aten_op + ArangeAdd(*init_data), + input_data(), + ArangeAdd.aten_op, ) pipeline.pop_stage("check.quant_nodes") pipeline.run() @common.parametrize("test_data", ArangeAdd.test_data) -def test_arange_start_step_tosa_u85(test_data: test_data_t): +def test_arange_start_step_u85_BI(test_data: test_data_t): input_data, init_data = test_data pipeline = EthosU85PipelineBI[input_t]( - ArangeAdd(*init_data), input_data(), ArangeAdd.aten_op + ArangeAdd(*init_data), + input_data(), + ArangeAdd.aten_op, ) pipeline.pop_stage("check.quant_nodes") pipeline.run() @@ -120,7 +133,10 @@ def forward(self, x: torch.Tensor) -> torch.Tensor: def test_linspace_tosa_MI(test_data): input_data, init_data = test_data pipeline = TosaPipelineMI[input_t]( - LinspaceAdd(*init_data), input_data(), LinspaceAdd.aten_op, LinspaceAdd.exir_op + LinspaceAdd(*init_data), + input_data(), + LinspaceAdd.aten_op, + LinspaceAdd.exir_op, ) pipeline.run() @@ -129,7 +145,10 @@ def test_linspace_tosa_MI(test_data): def test_linspace_tosa_BI(test_data: test_data_t): input_data, init_data = test_data pipeline = TosaPipelineBI[input_t]( - LinspaceAdd(*init_data), input_data(), LinspaceAdd.aten_op, LinspaceAdd.exir_op + LinspaceAdd(*init_data), + input_data(), + LinspaceAdd.aten_op, + LinspaceAdd.exir_op, ) pipeline.pop_stage("check.quant_nodes") pipeline.run() diff --git a/backends/arm/test/ops/test_avg_pool2d.py b/backends/arm/test/ops/test_avg_pool2d.py index c48595aec7f..65c1830b9b2 100644 --- a/backends/arm/test/ops/test_avg_pool2d.py +++ b/backends/arm/test/ops/test_avg_pool2d.py @@ -1,6 +1,5 @@ # Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. -# # Copyright 2024-2025 Arm Limited and/or its affiliates. # # This source code is licensed under the BSD-style license found in the @@ -9,8 +8,6 @@ from typing import Tuple -import pytest - import torch from executorch.backends.arm.test import common, conftest @@ -23,9 +20,9 @@ TosaPipelineMI, ) - aten_op = "torch.ops.aten.avg_pool2d.default" exir_op = "executorch_exir_dialects_edge__ops_aten_avg_pool2d_default" + input_t = Tuple[torch.Tensor] @@ -46,19 +43,19 @@ def forward(self, x): test_modules = { - "zeros": (AvgPool2d(4, 2, 0), (torch.zeros(1, 16, 50, 32),)), - "ones": (AvgPool2d(4, 2, 0), (torch.ones(1, 16, 50, 32),)), - "rand": (AvgPool2d(4, 2, 0), (torch.rand(1, 16, 50, 32),)), - "randn": (AvgPool2d(4, 2, 0), (torch.randn(1, 16, 50, 32),)), - "kernel_3x3_stride_1_pad_1": ( + "zeros": lambda: (AvgPool2d(4, 2, 0), (torch.zeros(1, 16, 50, 32),)), + "ones": lambda: (AvgPool2d(4, 2, 0), (torch.ones(1, 16, 50, 32),)), + "rand": lambda: (AvgPool2d(4, 2, 0), (torch.rand(1, 16, 50, 32),)), + "randn": lambda: (AvgPool2d(4, 2, 0), (torch.randn(1, 16, 50, 32),)), + "kernel_3x3_stride_1_pad_1": lambda: ( AvgPool2d((3, 3), (1, 1), 1), (torch.rand(1, 16, 50, 32),), ), - "kernel_3x2_stride_1x2_pad_1x0": ( + "kernel_3x2_stride_1x2_pad_1x0": lambda: ( AvgPool2d((3, 2), (1, 2), (1, 0)), (torch.rand(1, 16, 50, 32),), ), - "kernel_4x6_stride_1x2_pad_2x3": ( + "kernel_4x6_stride_1x2_pad_2x3": lambda: ( AvgPool2d((4, 6), (1, 2), (2, 3)), (torch.rand(1, 16, 50, 32),), ), @@ -66,9 +63,8 @@ def forward(self, x): @common.parametrize("test_module", test_modules) -@pytest.mark.tosa_ref_model -def test_avgpool2d_tosa_MI(test_module): - model, input_tensor = test_module +def test_avg_pool2d_tosa_MI(test_module): + model, input_tensor = test_module() pipeline = TosaPipelineMI[input_t]( model, @@ -83,9 +79,8 @@ def test_avgpool2d_tosa_MI(test_module): @common.parametrize("test_module", test_modules) -@pytest.mark.tosa_ref_model -def test_avgpool2d_tosa_BI(test_module): - model, input_tensor = test_module +def test_avg_pool2d_tosa_BI(test_module): + model, input_tensor = test_module() pipeline = TosaPipelineBI[input_t]( model, @@ -101,41 +96,9 @@ def test_avgpool2d_tosa_BI(test_module): @common.parametrize("test_module", test_modules) -def test_avgpool2d_u55_BI(test_module): - model, input_tensor = test_module - - pipeline = EthosU55PipelineBI[input_t]( - model, - input_tensor, - aten_op, - exir_op, - run_on_fvp=False, - symmetric_io_quantization=True, - ) - - pipeline.run() - - -@common.parametrize("test_module", test_modules) -def test_avgpool2d_u85_BI(test_module): - model, input_tensor = test_module - - pipeline = EthosU85PipelineBI[input_t]( - model, - input_tensor, - aten_op, - exir_op, - run_on_fvp=False, - symmetric_io_quantization=True, - ) - - pipeline.run() - - -@common.parametrize("test_module", test_modules) -@common.SkipIfNoCorstone300 -def test_avgpool2d_u55_BI_on_fvp(test_module): - model, input_tensor = test_module +@common.XfailIfNoCorstone300 +def test_avg_pool2d_u55_BI(test_module): + model, input_tensor = test_module() pipeline = EthosU55PipelineBI[input_t]( model, @@ -150,9 +113,9 @@ def test_avgpool2d_u55_BI_on_fvp(test_module): @common.parametrize("test_module", test_modules) -@common.SkipIfNoCorstone320 -def test_avgpool2d_u85_BI_on_fvp(test_module): - model, input_tensor = test_module +@common.XfailIfNoCorstone320 +def test_avg_pool2d_u85_BI(test_module): + model, input_tensor = test_module() pipeline = EthosU85PipelineBI[input_t]( model, @@ -168,14 +131,20 @@ def test_avgpool2d_u85_BI_on_fvp(test_module): reject_modules = { - "kernel_1x1_stride_1_pad_0": (AvgPool2d(1, 1, 0), torch.rand(2, 5, 5, 5)), - "kernel_2x9_stride_1_pad_1": (AvgPool2d((2, 9), 1, 1), torch.rand(1, 16, 5, 32)), - "kernel_1x4_stride_0_pad_0": (AvgPool2d(1, 4, 0), torch.rand(1, 10, 10, 10)), - "kernel_1x257_stride_1_pad_0_large": ( + "kernel_1x1_stride_1_pad_0": lambda: (AvgPool2d(1, 1, 0), torch.rand(2, 5, 5, 5)), + "kernel_2x9_stride_1_pad_1": lambda: ( + AvgPool2d((2, 9), 1, 1), + torch.rand(1, 16, 5, 32), + ), + "kernel_1x4_stride_0_pad_0": lambda: ( + AvgPool2d(1, 4, 0), + torch.rand(1, 10, 10, 10), + ), + "kernel_1x257_stride_1_pad_0_large": lambda: ( AvgPool2d((1, 257), 1, 0), torch.rand(1, 16, 5, 300), ), - "kernel_800x90_stride_1_pad_0_extreme": ( + "kernel_800x90_stride_1_pad_0_extreme": lambda: ( AvgPool2d((800, 90), 1, 0), torch.rand(1, 16, 850, 100), ), @@ -183,15 +152,15 @@ def test_avgpool2d_u85_BI_on_fvp(test_module): @common.parametrize("reject_module", reject_modules) -def test_reject_avgpool2d(reject_module): +def test_avg_pool2d_tosa_BI_not_delegated(reject_module): - model, test_data = reject_module + model, test_data = reject_module() pipeline = OpNotSupportedPipeline[input_t]( module=model, test_data=(test_data,), - tosa_version="TOSA-0.80+BI", non_delegated_ops={}, n_expected_delegates=0, + quantize=True, ) pipeline.run() diff --git a/backends/arm/test/ops/test_batch_norm.py b/backends/arm/test/ops/test_batch_norm.py index 980ab28df64..5134353c671 100644 --- a/backends/arm/test/ops/test_batch_norm.py +++ b/backends/arm/test/ops/test_batch_norm.py @@ -5,20 +5,25 @@ # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. -import unittest from typing import Tuple +import pytest + import torch from executorch.backends.arm.test import common -from executorch.backends.arm.test.tester.arm_tester import ArmTester -from parameterized import parameterized +from executorch.backends.arm.test.tester.test_pipeline import ( + EthosU55PipelineBI, + TosaPipelineBI, + TosaPipelineMI, +) + +input_t1 = Tuple[torch.Tensor] # Input x -test_data_suite = [ +test_data_suite = { # (test_name, test_data, [num_features, affine, track_running_stats, weight, bias, running_mean, running_var,] ) - ( - "zeros_affineT_runStatsT_default_weight_bias_mean_var", + "zeros_affineT_runStatsT_default_weight_bias_mean_var": lambda: ( torch.zeros(1, 32, 112, 112), [ 32, @@ -26,8 +31,7 @@ True, ], ), - ( - "zeros_affineF_runStatsT_default_weight_bias_mean_var", + "zeros_affineF_runStatsT_default_weight_bias_mean_var": lambda: ( torch.zeros(1, 32, 112, 112), [ 32, @@ -35,8 +39,7 @@ True, ], ), - ( - "zeros_affineT_runStatsT_rand_weight_bias_mean_var", + "zeros_affineT_runStatsT_rand_weight_bias_mean_var": lambda: ( torch.zeros(1, 32, 112, 112), [ 32, @@ -48,8 +51,7 @@ torch.rand(32), ], ), - ( - "zeros_affineF_runStatsT_rand_weight_bias_mean_var", + "zeros_affineF_runStatsT_rand_weight_bias_mean_var": lambda: ( torch.zeros(1, 32, 112, 112), [ 32, @@ -61,8 +63,7 @@ torch.rand(32), ], ), - ( - "ones_affineT_runStatsT_default_weight_bias_mean_var", + "ones_affineT_runStatsT_default_weight_bias_mean_var": lambda: ( torch.ones(1, 32, 112, 112), [ 32, @@ -70,8 +71,7 @@ True, ], ), - ( - "ones_affineF_runStatsT_default_weight_bias_mean_var", + "ones_affineF_runStatsT_default_weight_bias_mean_var": lambda: ( torch.ones(1, 32, 112, 112), [ 32, @@ -79,8 +79,7 @@ True, ], ), - ( - "ones_affineT_runStatsT_rand_weight_bias_mean_var", + "ones_affineT_runStatsT_rand_weight_bias_mean_var": lambda: ( torch.ones(1, 32, 112, 112), [ 32, @@ -92,8 +91,7 @@ torch.rand(32), ], ), - ( - "ones_affineF_runStatsT_rand_weight_bias_mean_var", + "ones_affineF_runStatsT_rand_weight_bias_mean_var": lambda: ( torch.ones(1, 32, 112, 112), [ 32, @@ -105,8 +103,7 @@ torch.rand(32), ], ), - ( - "rand_affineT_runStatsT_default_weight_bias_mean_var", + "rand_affineT_runStatsT_default_weight_bias_mean_var": lambda: ( torch.rand(1, 32, 112, 112), [ 32, @@ -114,8 +111,7 @@ True, ], ), - ( - "rand_affineF_runStatsT_default_weight_bias_mean_var", + "rand_affineF_runStatsT_default_weight_bias_mean_var": lambda: ( torch.rand(1, 32, 112, 112), [ 32, @@ -123,8 +119,7 @@ True, ], ), - ( - "rand_affineT_runStatsT_rand_weight_bias_mean_var", + "rand_affineT_runStatsT_rand_weight_bias_mean_var": lambda: ( torch.rand(1, 32, 112, 112), [ 32, @@ -136,8 +131,7 @@ torch.rand(32), ], ), - ( - "rand_affineF_runStatsT_rand_weight_bias_mean_var", + "rand_affineF_runStatsT_rand_weight_bias_mean_var": lambda: ( torch.rand(1, 32, 112, 112), [ 32, @@ -149,8 +143,7 @@ torch.rand(32), ], ), - ( - "randn_affineT_runStatsT_default_weight_bias_mean_var", + "randn_affineT_runStatsT_default_weight_bias_mean_var": lambda: ( torch.randn(1, 32, 112, 112), [ 32, @@ -158,8 +151,7 @@ True, ], ), - ( - "randn_affineF_runStatsT_default_weight_bias_mean_var", + "randn_affineF_runStatsT_default_weight_bias_mean_var": lambda: ( torch.randn(1, 32, 112, 112), [ 32, @@ -167,8 +159,7 @@ True, ], ), - ( - "randn_affineT_runStatsT_rand_weight_bias_mean_var", + "randn_affineT_runStatsT_rand_weight_bias_mean_var": lambda: ( torch.randn(1, 32, 112, 112), [ 32, @@ -180,8 +171,7 @@ torch.rand(32), ], ), - ( - "randn_affineF_runStatsT_rand_weight_bias_mean_var", + "randn_affineF_runStatsT_rand_weight_bias_mean_var": lambda: ( torch.randn(1, 32, 112, 112), [ 32, @@ -194,100 +184,81 @@ ], ), # Test some different sizes - ( - "size_3_4_5_6_affineT_runStatsT_rand_weight_bias_mean_var", + "size_3_4_5_6_affineT_runStatsT_rand_weight_bias_mean_var": lambda: ( torch.rand(3, 4, 5, 6), [4, True, True, torch.rand(4), torch.rand(4), torch.rand(4), torch.rand(4)], ), - ( - "size_3_4_5_6_affineF_runStatsT_rand_weight_bias_mean_var", + "size_3_4_5_6_affineF_runStatsT_rand_weight_bias_mean_var": lambda: ( torch.rand(3, 4, 5, 6), [4, True, True, torch.rand(4), torch.rand(4), torch.rand(4), torch.rand(4)], ), - ( - "size_1_3_254_254_affineT_runStatsT_rand_weight_bias_mean_var", + "size_1_3_254_254_affineT_runStatsT_rand_weight_bias_mean_var": lambda: ( torch.rand(1, 3, 254, 254), [3, True, True, torch.rand(3), torch.rand(3), torch.rand(3), torch.rand(3)], ), - ( - "size_1_3_254_254_affineF_runStatsT_rand_weight_bias_mean_var", + "size_1_3_254_254_affineF_runStatsT_rand_weight_bias_mean_var": lambda: ( torch.rand(1, 3, 254, 254), [3, True, True, torch.rand(3), torch.rand(3), torch.rand(3), torch.rand(3)], ), # Test combination of weight and bias - ( - "check_weight_bias_affineT_runStatsT_none_none", + "check_weight_bias_affineT_runStatsT_none_none": lambda: ( torch.rand(1, 32, 112, 112), [32, True, True, None, None], ), - ( - "check_weight_bias_affineF_runStatsT_none_none", + "check_weight_bias_affineF_runStatsT_none_none": lambda: ( torch.rand(1, 32, 112, 112), [32, False, True, None, None], ), - ( - "check_weight_bias_affineT_runStatsT_weight_none", + "check_weight_bias_affineT_runStatsT_weight_none": lambda: ( torch.rand(1, 32, 112, 112), [32, True, True, torch.rand(32)], ), - ( - "check_weight_bias_affineF_runStatsT_weight_none", + "check_weight_bias_affineF_runStatsT_weight_none": lambda: ( torch.rand(1, 32, 112, 112), [32, False, True, torch.rand(32)], ), - ( - "check_weight_bias_affineT_runStatsT_none_bias", + "check_weight_bias_affineT_runStatsT_none_bias": lambda: ( torch.rand(1, 32, 112, 112), [32, True, True, None, torch.rand(32)], ), - ( - "check_weight_bias_affineF_runStatsT_none_bias", + "check_weight_bias_affineF_runStatsT_none_bias": lambda: ( torch.rand(1, 32, 112, 112), [32, False, True, None, torch.rand(32)], ), - ( - "check_weight_bias_affineT_runStatsT_weight_bias", + "check_weight_bias_affineT_runStatsT_weight_bias": lambda: ( torch.rand(1, 32, 112, 112), [32, True, True, torch.rand(32), torch.rand(32)], ), - ( - "check_weight_bias_affineF_runStatsT_weight_bias", + "check_weight_bias_affineF_runStatsT_weight_bias": lambda: ( torch.rand(1, 32, 112, 112), [32, False, True, torch.rand(32), torch.rand(32)], ), # Test combination of running_mean and running_var - ( - "check_mean_var_affineT_runStatsT_none_none", + "check_mean_var_affineT_runStatsT_none_none": lambda: ( torch.randn(1, 32, 112, 112), [32, True, True, torch.rand(32), torch.rand(32), None, None], ), - ( - "check_mean_var_affineF_runStatsT_none_none", + "check_mean_var_affineF_runStatsT_none_none": lambda: ( torch.randn(1, 32, 112, 112), [32, False, True, torch.rand(32), torch.rand(32), None, None], ), - ( - "check_mean_var_affineT_runStatsT_mean_none", + "check_mean_var_affineT_runStatsT_mean_none": lambda: ( torch.randn(1, 32, 112, 112), [32, True, True, torch.rand(32), torch.rand(32), torch.rand(32), None], ), - ( - "check_mean_var_affineF_runStatsT_mean_none", + "check_mean_var_affineF_runStatsT_mean_none": lambda: ( torch.randn(1, 32, 112, 112), [32, False, True, torch.rand(32), torch.rand(32), torch.rand(32), None], ), - ( - "check_mean_var_affineT_runStatsT_none_var", + "check_mean_var_affineT_runStatsT_none_var": lambda: ( torch.randn(1, 32, 112, 112), [32, True, True, torch.rand(32), torch.rand(32), None, torch.rand(32)], ), - ( - "check_mean_var_affineF_runStatsT_none_var", + "check_mean_var_affineF_runStatsT_none_var": lambda: ( torch.randn(1, 32, 112, 112), [32, False, True, torch.rand(32), torch.rand(32), None, torch.rand(32)], ), - ( - "check_mean_var_affineT_runStatsT_mean_var", + "check_mean_var_affineT_runStatsT_mean_var": lambda: ( torch.randn(1, 32, 112, 112), [ 32, @@ -299,8 +270,7 @@ torch.rand(32), ], ), - ( - "check_mean_var_affineF_runStatsT_mean_var", + "check_mean_var_affineF_runStatsT_mean_var": lambda: ( torch.randn(1, 32, 112, 112), [ 32, @@ -312,12 +282,11 @@ torch.rand(32), ], ), -] +} -test_no_stats_data_suite = [ +test_no_stats_data_suite = { # (test_name, test_data, [num_features, affine, track_running_stats, weight, bias, running_mean, running_var, ] ) - ( - "zeros_affineT_runStatsF_default_weight_bias", + "zeros_affineT_runStatsF_default_weight_bias": lambda: ( torch.zeros(1, 32, 112, 112), [ 32, @@ -325,8 +294,7 @@ False, ], ), - ( - "zeros_affineF_runStatsF_default_weight_bias", + "zeros_affineF_runStatsF_default_weight_bias": lambda: ( torch.zeros(1, 32, 112, 112), [ 32, @@ -334,18 +302,15 @@ False, ], ), - ( - "zeros_affineT_runStatsF_rand_weight_bias", + "zeros_affineT_runStatsF_rand_weight_bias": lambda: ( torch.zeros(1, 32, 112, 112), [32, True, False, torch.rand(32), torch.rand(32)], ), - ( - "zeros_affineF_runStatsF_rand_weight_bias", + "zeros_affineF_runStatsF_rand_weight_bias": lambda: ( torch.zeros(1, 32, 112, 112), [32, False, False, torch.rand(32), torch.rand(32)], ), - ( - "ones_affineT_runStatsF_default_weight_bias", + "ones_affineT_runStatsF_default_weight_bias": lambda: ( torch.ones(1, 32, 112, 112), [ 32, @@ -353,8 +318,7 @@ False, ], ), - ( - "ones_affineF_runStatsF_default_weight_bias", + "ones_affineF_runStatsF_default_weight_bias": lambda: ( torch.ones(1, 32, 112, 112), [ 32, @@ -362,18 +326,15 @@ False, ], ), - ( - "ones_affineT_runStatsF_rand_weight_bias", + "ones_affineT_runStatsF_rand_weight_bias": lambda: ( torch.ones(1, 32, 112, 112), [32, True, False, torch.rand(32), torch.rand(32)], ), - ( - "ones_affineF_runStatsF", + "ones_affineF_runStatsF": lambda: ( torch.ones(1, 32, 112, 112), [32, False, False, torch.rand(32), torch.rand(32)], ), - ( - "rand_affineT_runStatsF_default_weight_bias", + "rand_affineT_runStatsF_default_weight_bias": lambda: ( torch.rand(1, 32, 112, 112), [ 32, @@ -381,8 +342,7 @@ False, ], ), - ( - "rand_affineF_runStatsF_default_weight_bias", + "rand_affineF_runStatsF_default_weight_bias": lambda: ( torch.rand(1, 32, 112, 112), [ 32, @@ -390,18 +350,15 @@ False, ], ), - ( - "rand_affineT_runStatsF_rand_weight_bias", + "rand_affineT_runStatsF_rand_weight_bias": lambda: ( torch.rand(1, 32, 112, 112), [32, True, False, torch.rand(32), torch.rand(32)], ), - ( - "rand_affineF_runStatsF_rand_weight_bias", + "rand_affineF_runStatsF_rand_weight_bias": lambda: ( torch.rand(1, 32, 112, 112), [32, False, False, torch.rand(32), torch.rand(32)], ), - ( - "randn_affineT_runStatsF_default_weight_bias", + "randn_affineT_runStatsF_default_weight_bias": lambda: ( torch.randn(1, 32, 112, 112), [ 32, @@ -409,8 +366,7 @@ False, ], ), - ( - "randn_affineF_runStatsF_default_weight_bias", + "randn_affineF_runStatsF_default_weight_bias": lambda: ( torch.randn(1, 32, 112, 112), [ 32, @@ -418,304 +374,148 @@ False, ], ), - ( - "randn_affineT_runStatsF_rand_weight_bias", + "randn_affineT_runStatsF_rand_weight_bias": lambda: ( torch.randn(1, 32, 112, 112), [32, True, False, torch.rand(32), torch.rand(32)], ), - ( - "randn_affineF_runStatsF_rand_weight_bias", + "randn_affineF_runStatsF_rand_weight_bias": lambda: ( torch.randn(1, 32, 112, 112), [32, False, False, torch.rand(32), torch.rand(32)], ), # Test some different sizes - ( - "size_3_4_5_6_affineT_runStatsF_rand_weight_bias_mean_var", + "size_3_4_5_6_affineT_runStatsF_rand_weight_bias_mean_var": lambda: ( torch.rand(3, 4, 5, 6), [4, True, False, torch.rand(4), torch.rand(4)], ), - ( - "size_3_4_5_6_affineF_runStatsF_rand_weight_bias_mean_var", + "size_3_4_5_6_affineF_runStatsF_rand_weight_bias_mean_var": lambda: ( torch.rand(3, 4, 5, 6), [4, True, False, torch.rand(4), torch.rand(4)], ), - ( - "size_1_3_254_254_affineT_runStatsF_rand_weight_bias_mean_var", + "size_1_3_254_254_affineT_runStatsF_rand_weight_bias_mean_var": lambda: ( torch.rand(1, 3, 254, 254), [3, True, False, torch.rand(3), torch.rand(3)], ), - ( - "size_1_3_254_254_affineF_runStatsF_rand_weight_bias_mean_var", + "size_1_3_254_254_affineF_runStatsF_rand_weight_bias_mean_var": lambda: ( torch.rand(1, 3, 254, 254), [3, True, False, torch.rand(3), torch.rand(3)], ), # Test combination of weight and bias - ( - "check_weight_bias_affineT_runStatsF_none_none", + "check_weight_bias_affineT_runStatsF_none_none": lambda: ( torch.rand(1, 32, 112, 112), [32, True, False, None, None], ), - ( - "check_weight_bias_affineF_runStatsF_none_none", + "check_weight_bias_affineF_runStatsF_none_none": lambda: ( torch.rand(1, 32, 112, 112), [32, False, False, None, None], ), - ( - "check_weight_bias_affineT_runStatsF_weight_none", + "check_weight_bias_affineT_runStatsF_weight_none": lambda: ( torch.rand(1, 32, 112, 112), [32, True, False, torch.rand(32)], ), - ( - "check_weight_bias_affineF_runStatsF_weight_none", + "check_weight_bias_affineF_runStatsF_weight_none": lambda: ( torch.rand(1, 32, 112, 112), [32, False, False, torch.rand(32)], ), - ( - "check_weight_bias_affineT_runStatsF_none_bias", + "check_weight_bias_affineT_runStatsF_none_bias": lambda: ( torch.rand(1, 32, 112, 112), [32, True, False, None, torch.rand(32)], ), - ( - "check_weight_bias_affineF_runStatsF_none_bias", + "check_weight_bias_affineF_runStatsF_none_bias": lambda: ( torch.rand(1, 32, 112, 112), [32, False, False, None, torch.rand(32)], ), - ( - "check_weight_bias_affineT_runStatsF_weight_bias", + "check_weight_bias_affineT_runStatsF_weight_bias": lambda: ( torch.rand(1, 32, 112, 112), [32, True, False, torch.rand(32), torch.rand(32)], ), - ( - "check_weight_bias_affineF_runStatsF_weight_bias", + "check_weight_bias_affineF_runStatsF_weight_bias": lambda: ( torch.rand(1, 32, 112, 112), [32, False, False, torch.rand(32), torch.rand(32)], ), -] - - -class TestBatchNorm2d(unittest.TestCase): - """Tests BatchNorm2d.""" +} - class BatchNorm2d(torch.nn.Module): - def __init__( - self, - num_features: int = 32, - affine: bool = False, - track_running_stats: bool = True, - weights: torch.tensor = None, - bias: torch.tensor = None, - running_mean: torch.tensor = None, - running_var: torch.tensor = None, - ): - super().__init__() - self.batch_norm_2d = torch.nn.BatchNorm2d( - num_features, affine=affine, track_running_stats=track_running_stats - ) - if weights is not None: - self.batch_norm_2d.weight = torch.nn.Parameter(weights) - if bias is not None: - self.batch_norm_2d.bias = torch.nn.Parameter(bias) - if running_mean is not None: - self.batch_norm_2d.running_mean = running_mean - if running_var is not None: - self.batch_norm_2d.running_var = running_var - def forward(self, x): - return self.batch_norm_2d(x) - - def _test_batchnorm2d_tosa_MI_pipeline( - self, module: torch.nn.Module, test_data: Tuple[torch.Tensor] +class BatchNorm2d(torch.nn.Module): + def __init__( + self, + num_features: int = 32, + affine: bool = False, + track_running_stats: bool = True, + weights: torch.tensor = None, + bias: torch.tensor = None, + running_mean: torch.tensor = None, + running_var: torch.tensor = None, ): - ( - ArmTester( - module, - example_inputs=test_data, - compile_spec=common.get_tosa_compile_spec("TOSA-0.80+MI"), - ) - .export() - .check_not(["torch.ops.quantized_decomposed"]) - .to_edge() - .check_count( - { - "executorch_exir_dialects_edge__ops_aten__native_batch_norm_legit_no_training_default": 1 - } - ) - .partition() - .check_count({"torch.ops.higher_order.executorch_call_delegate": 1}) - .check_not( - [ - "executorch_exir_dialects_edge__ops_aten__native_batch_norm_legit_no_training_default" - ] - ) - .to_executorch() - .run_method_and_compare_outputs(inputs=test_data) + super().__init__() + self.batch_norm_2d = torch.nn.BatchNorm2d( + num_features, affine=affine, track_running_stats=track_running_stats ) + if weights is not None: + self.batch_norm_2d.weight = torch.nn.Parameter(weights) + if bias is not None: + self.batch_norm_2d.bias = torch.nn.Parameter(bias) + if running_mean is not None: + self.batch_norm_2d.running_mean = running_mean + if running_var is not None: + self.batch_norm_2d.running_var = running_var - def _test_batchnorm2d_no_stats_tosa_MI_pipeline( - self, module: torch.nn.Module, test_data: Tuple[torch.Tensor] - ): - ( - ArmTester( - module, - example_example_inputs=test_data, - compile_spec=common.get_tosa_compile_spec("TOSA-0.80+MI"), - ) - .export() - .check_count({"torch.ops.aten._native_batch_norm_legit.no_stats": 1}) - .check_not(["torch.ops.quantized_decomposed"]) - .to_edge() - .check_count( - { - "executorch_exir_dialects_edge__ops_aten__native_batch_norm_legit_no_stats": 1 - } - ) - .partition() - .check_count({"torch.ops.higher_order.executorch_call_delegate": 1}) - .check_not( - [ - "executorch_exir_dialects_edge__ops_aten__native_batch_norm_legit_no_stats" - ] - ) - .to_executorch() - .run_method_and_compare_outputs(inputs=test_data) - ) + def forward(self, x): + return self.batch_norm_2d(x) - def _test_batchnorm2d_tosa_BI_pipeline( - self, module: torch.nn.Module, test_data: Tuple[torch.Tensor] - ): - ( - ArmTester( - module, - example_inputs=test_data, - compile_spec=common.get_tosa_compile_spec("TOSA-0.80+BI"), - ) - .quantize() - .export() - .check_count( - {"torch.ops.aten._native_batch_norm_legit_no_training.default": 1} - ) - .check(["torch.ops.quantized_decomposed"]) - .to_edge() - .check_count( - { - "executorch_exir_dialects_edge__ops_aten__native_batch_norm_legit_no_training_default": 1 - } - ) - .partition() - .check_count({"torch.ops.higher_order.executorch_call_delegate": 1}) - .check_not( - [ - "executorch_exir_dialects_edge__ops_aten__native_batch_norm_legit_no_training_default" - ] - ) - .to_executorch() - .run_method_and_compare_outputs(inputs=test_data) - ) - def _test_batchnorm2d_u55_BI_pipeline( - self, module: torch.nn.Module, test_data: Tuple[torch.Tensor] - ): - ( - ArmTester( - module, - example_inputs=test_data, - compile_spec=common.get_u55_compile_spec(), - ) - .quantize() - .export() - .check_count( - {"torch.ops.aten._native_batch_norm_legit_no_training.default": 1} - ) - .check(["torch.ops.quantized_decomposed"]) - .to_edge() - .check_count( - { - "executorch_exir_dialects_edge__ops_aten__native_batch_norm_legit_no_training_default": 1 - } - ) - .partition() - .check_count({"torch.ops.higher_order.executorch_call_delegate": 1}) - .check_not( - [ - "executorch_exir_dialects_edge__ops_aten__native_batch_norm_legit_no_training_default" - ] - ) - .to_executorch() - ) +@common.parametrize("test_data", test_data_suite) +def test_native_batch_norm_legit_tosa_MI_no_training(test_data: Tuple): + test_data, model_params = test_data() + pipeline = TosaPipelineMI[input_t1]( + BatchNorm2d(*model_params), + (test_data,), + aten_op=[], + exir_op="executorch_exir_dialects_edge__ops_aten__native_batch_norm_legit_no_training_default", + ) + pipeline.run() - @parameterized.expand(test_data_suite) - def test_native_batch_norm_legit_no_training_tosa_MI( - self, - test_name: str, - test_data: torch.Tensor, - model_params: ( - int - | Tuple[ - int, bool, bool, torch.tensor, torch.tensor, torch.tensor, torch.tensor - ] - ), - ): - self._test_batchnorm2d_tosa_MI_pipeline( - self.BatchNorm2d(*model_params), (test_data,) - ) - # Expected to fail since not inplemented - @parameterized.expand(test_no_stats_data_suite) - @unittest.expectedFailure - def test_native_batch_norm_legit_tosa_MI( - self, - test_name: str, - test_data: torch.Tensor, - model_params: ( - int - | Tuple[ - int, bool, bool, torch.tensor, torch.tensor, torch.tensor, torch.tensor - ] - ), - ): - self._test_batchnorm2d_no_stats_tosa_MI_pipeline( - self.BatchNorm2d(*model_params), (test_data,) - ) +@common.parametrize("test_data", test_no_stats_data_suite) +# Expected to fail since not inplemented +@pytest.mark.skip # Not implemented, skip until it is. +def test_native_batch_norm_legit_tosa_MI(test_data: Tuple): + test_data, model_params = test_data() + pipeline = TosaPipelineMI[input_t1]( + BatchNorm2d(*model_params), + (test_data,), + aten_op=[], + exir_op="executorch_exir_dialects_edge__ops_aten__native_batch_norm_legit_no_training_default", + ) + pipeline.pop_stage("check_count.exir") + pipeline.run() + - # Expected to fail since TOSAQuantizer cannot quantize a BatchNorm layer - # TODO(MLETORCH-100) - @parameterized.expand(test_data_suite) - @unittest.skip( - reason="Expected to fail since TOSAQuantizer (for BI) cannot quantize a BatchNorm layer" +# Expected to fail since TOSAQuantizer cannot quantize a BatchNorm layer +# TODO(MLETORCH-100) +@common.parametrize("test_data", test_data_suite) +@pytest.mark.skip # Not implemented, skip until it is. +def test_native_batch_norm_legit_tosa_BI_no_training(test_data: Tuple): + test_data, model_params = test_data() + pipeline = TosaPipelineBI[input_t1]( + BatchNorm2d(*model_params), + (test_data,), + aten_op="torch.ops.aten._native_batch_norm_legit_no_training.default", + exir_op="executorch_exir_dialects_edge__ops_aten__native_batch_norm_legit_no_training_default", ) - def test_native_batch_norm_legit_no_training_tosa_BI( - self, - test_name: str, - test_data: torch.Tensor, - model_params: ( - int - | Tuple[ - int, bool, bool, torch.tensor, torch.tensor, torch.tensor, torch.tensor - ] - ), - ): - self._test_batchnorm2d_tosa_BI_pipeline( - self.BatchNorm2d(*model_params), (test_data,) - ) + pipeline.run() + - # Expected to fail since EthosUQuantizer (TOSAQuantizer (BI)) cannot quantize a BatchNorm layer - # TODO(MLETORCH-100) - @parameterized.expand(test_data_suite) - @unittest.skip( - reason="Expected to fail since EthosUQuantizer cannot quantize a BatchNorm layer" +# Expected to fail since EthosUQuantizer (TOSAQuantizer (BI)) cannot quantize a BatchNorm layer +# TODO(MLETORCH-100) +@common.parametrize("test_data", test_data_suite) +@pytest.mark.skip # Not implemented, skip until it is. +def test_native_batch_norm_legit_u55_BI_no_training(test_data: Tuple): + test_data, model_params = test_data() + pipeline = EthosU55PipelineBI[input_t1]( + BatchNorm2d(*model_params), + test_data, + aten_ops="torch.ops.aten._native_batch_norm_legit_no_training.default", + exir_ops="executorch_exir_dialects_edge__ops_aten__native_batch_norm_legit_no_training_default", + run_on_fvp=True, ) - @unittest.expectedFailure - def test_native_batch_norm_legit_no_training_u55_BI( - self, - test_name: str, - test_data: torch.Tensor, - model_params: ( - int - | Tuple[ - int, bool, bool, torch.tensor, torch.tensor, torch.tensor, torch.tensor - ] - ), - ): - self._test_batchnorm2d_u55_BI_pipeline( - self.BatchNorm2d(*model_params), (test_data,) - ) + pipeline.run() diff --git a/backends/arm/test/ops/test_bitwise.py b/backends/arm/test/ops/test_bitwise.py index 412701b17da..8be8ba35b4e 100644 --- a/backends/arm/test/ops/test_bitwise.py +++ b/backends/arm/test/ops/test_bitwise.py @@ -22,19 +22,19 @@ class BitwiseBinary(torch.nn.Module): test_data: dict[input_t2] = { - "zeros": ( + "zeros": lambda: ( torch.zeros(1, 10, 10, 10, dtype=torch.int32), torch.zeros(1, 10, 10, 10, dtype=torch.int32), ), - "ones": ( + "ones": lambda: ( torch.ones(10, 10, 10, dtype=torch.int8), torch.ones(10, 10, 10, dtype=torch.int8), ), - "rand_rank2": ( + "rand_rank2": lambda: ( torch.randint(-128, 127, (10, 10), dtype=torch.int8), torch.randint(-128, 127, (10, 10), dtype=torch.int8), ), - "rand_rank4": ( + "rand_rank4": lambda: ( torch.randint(-128, -127, (1, 10, 10, 10), dtype=torch.int8), torch.randint(-128, 127, (1, 10, 10, 10), dtype=torch.int8), ), @@ -67,13 +67,17 @@ def forward(self, tensor1: torch.Tensor, tensor2: torch.Tensor): @common.parametrize("test_data", And().test_data) def test_bitwise_and_tensor_tosa_MI(test_data: input_t2): - pipeline = TosaPipelineMI[input_t2](And(), test_data, And().aten_op, And().exir_op) + pipeline = TosaPipelineMI[input_t2]( + And(), test_data(), And().aten_op, And().exir_op + ) pipeline.run() @common.parametrize("test_data", And().test_data) def test_bitwise_and_tensor_tosa_BI(test_data: input_t2): - pipeline = TosaPipelineBI[input_t2](And(), test_data, And().aten_op, And().exir_op) + pipeline = TosaPipelineBI[input_t2]( + And(), test_data(), And().aten_op, And().exir_op + ) pipeline.pop_stage("quantize") pipeline.pop_stage("check.quant_nodes") pipeline.run() @@ -83,7 +87,11 @@ def test_bitwise_and_tensor_tosa_BI(test_data: input_t2): def test_bitwise_and_tensor_u55_BI(test_data: input_t2): # Tests that we don't delegate these ops since they are not supported on U55. pipeline = OpNotSupportedPipeline[input_t2]( - And(), test_data, "TOSA-0.80+BI+u55", {And().exir_op: 1} + And(), + test_data(), + {And().exir_op: 1}, + quantize=True, + u55_subset=True, ) pipeline.run() @@ -93,7 +101,7 @@ def test_bitwise_and_tensor_u55_BI(test_data: input_t2): @common.XfailIfNoCorstone320 def test_bitwise_and_tensor_u85_BI(test_data: input_t2): pipeline = EthosU85PipelineBI[input_t2]( - And(), test_data, And().aten_op, And().exir_op, run_on_fvp=True + And(), test_data(), And().aten_op, And().exir_op, run_on_fvp=True ) pipeline.pop_stage("quantize") pipeline.pop_stage("check.quant_nodes") @@ -102,13 +110,17 @@ def test_bitwise_and_tensor_u85_BI(test_data: input_t2): @common.parametrize("test_data", Xor().test_data) def test_bitwise_xor_tensor_tosa_MI(test_data: input_t2): - pipeline = TosaPipelineMI[input_t2](Xor(), test_data, Xor().aten_op, Xor().exir_op) + pipeline = TosaPipelineMI[input_t2]( + Xor(), test_data(), Xor().aten_op, Xor().exir_op + ) pipeline.run() @common.parametrize("test_data", Xor().test_data) def test_bitwise_xor_tensor_tosa_BI(test_data: input_t2): - pipeline = TosaPipelineBI[input_t2](Xor(), test_data, Xor().aten_op, Xor().exir_op) + pipeline = TosaPipelineBI[input_t2]( + Xor(), test_data(), Xor().aten_op, Xor().exir_op + ) pipeline.pop_stage("quantize") pipeline.pop_stage("check.quant_nodes") pipeline.run() @@ -118,7 +130,11 @@ def test_bitwise_xor_tensor_tosa_BI(test_data: input_t2): def test_bitwise_xor_tensor_u55_BI(test_data: input_t2): # Tests that we don't delegate these ops since they are not supported on U55. pipeline = OpNotSupportedPipeline[input_t2]( - Xor(), test_data, "TOSA-0.80+BI+u55", {Xor().exir_op: 1} + Xor(), + test_data(), + {Xor().exir_op: 1}, + quantize=True, + u55_subset=True, ) pipeline.run() @@ -128,7 +144,7 @@ def test_bitwise_xor_tensor_u55_BI(test_data: input_t2): @common.XfailIfNoCorstone320 def test_bitwise_xor_tensor_u85_BI(test_data: input_t2): pipeline = EthosU85PipelineBI[input_t2]( - Xor(), test_data, Xor().aten_op, Xor().exir_op, run_on_fvp=True + Xor(), test_data(), Xor().aten_op, Xor().exir_op, run_on_fvp=True ) pipeline.pop_stage("quantize") pipeline.pop_stage("check.quant_nodes") @@ -137,13 +153,13 @@ def test_bitwise_xor_tensor_u85_BI(test_data: input_t2): @common.parametrize("test_data", Or().test_data) def test_bitwise_or_tensor_tosa_MI(test_data: input_t2): - pipeline = TosaPipelineMI[input_t2](Or(), test_data, Or().aten_op, Or().exir_op) + pipeline = TosaPipelineMI[input_t2](Or(), test_data(), Or().aten_op, Or().exir_op) pipeline.run() @common.parametrize("test_data", Or().test_data) def test_bitwise_or_tensor_tosa_BI(test_data: input_t2): - pipeline = TosaPipelineBI[input_t2](Or(), test_data, Or().aten_op, Or().exir_op) + pipeline = TosaPipelineBI[input_t2](Or(), test_data(), Or().aten_op, Or().exir_op) pipeline.pop_stage("quantize") pipeline.pop_stage("check.quant_nodes") pipeline.run() @@ -153,7 +169,11 @@ def test_bitwise_or_tensor_tosa_BI(test_data: input_t2): def test_bitwise_or_tensor_u55_BI(test_data: input_t2): # Tests that we don't delegate these ops since they are not supported on U55. pipeline = OpNotSupportedPipeline[input_t2]( - Or(), test_data, "TOSA-0.80+BI+u55", {Or().exir_op: 1} + Or(), + test_data(), + {Or().exir_op: 1}, + quantize=True, + u55_subset=True, ) pipeline.run() @@ -163,7 +183,11 @@ def test_bitwise_or_tensor_u55_BI(test_data: input_t2): @common.XfailIfNoCorstone320 def test_bitwise_or_tensor_u85_BI(test_data: input_t2): pipeline = EthosU85PipelineBI[input_t2]( - Or(), test_data, Or().aten_op, Or().exir_op, run_on_fvp=True + Or(), + test_data(), + Or().aten_op, + Or().exir_op, + run_on_fvp=True, ) pipeline.pop_stage("quantize") pipeline.pop_stage("check.quant_nodes") diff --git a/backends/arm/test/ops/test_bmm.py b/backends/arm/test/ops/test_bmm.py index 375e77cb9b0..bd2c9338275 100644 --- a/backends/arm/test/ops/test_bmm.py +++ b/backends/arm/test/ops/test_bmm.py @@ -1,165 +1,162 @@ # Copyright 2024-2025 Arm Limited and/or its affiliates. -# All rights reserved. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. -import unittest -from typing import Callable, Tuple +from typing import Tuple import pytest import torch -from executorch.backends.arm.test import common, conftest -from executorch.backends.arm.test.tester.arm_tester import ArmTester -from executorch.exir.backend.compile_spec_schema import CompileSpec -from parameterized import parameterized - - -class TestBMM(unittest.TestCase): - """Tests Batch MatMul""" - - class BMM(torch.nn.Module): - test_data_generators = [ - lambda: (torch.rand(2, 1, 1), torch.rand(2, 1, 1)), - lambda: (torch.rand(5, 3, 5), torch.rand(5, 5, 2)), - lambda: (torch.ones(1, 55, 3), torch.ones(1, 3, 44)), - lambda: (10000 * torch.randn(10, 1, 10), torch.randn(10, 10, 5)), - lambda: (-10 * torch.randn(2, 32, 64), 5 + 5 * torch.randn(2, 64, 32)), - ] - - def forward(self, x, y): - return torch.bmm(x, y) - - class BMMSingleInput(torch.nn.Module): - test_data_generators = [ - lambda: (torch.rand(20, 3, 3),), - lambda: (torch.rand(2, 128, 128),), - lambda: (10000 * torch.randn(4, 25, 25),), - lambda: (5 + 5 * torch.randn(3, 64, 64),), - ] - - def forward(self, x): - return torch.bmm(x, x) - - def _test_bmm_tosa_MI_pipeline( - self, module: torch.nn.Module, test_data: Tuple[torch.Tensor, ...] - ): - ( - ArmTester( - module, - example_inputs=test_data, - compile_spec=common.get_tosa_compile_spec("TOSA-0.80+MI"), - ) - .export() - .check_not(["torch.ops.quantized_decomposed"]) - .to_edge() - .check_count({"executorch_exir_dialects_edge__ops_aten_bmm_default": 1}) - .partition() - .check_not(["executorch_exir_dialects_edge__ops_aten_bmm_default"]) - .check_count({"torch.ops.higher_order.executorch_call_delegate": 1}) - .to_executorch() - .run_method_and_compare_outputs(inputs=test_data) - ) - - def _test_bmm_tosa_BI_pipeline( - self, module: torch.nn.Module, test_data: Tuple[torch.Tensor, ...] - ): - ( - ArmTester( - module, - example_inputs=test_data, - compile_spec=common.get_tosa_compile_spec("TOSA-0.80+BI"), - ) - .quantize() - .export() - .check(["torch.ops.quantized_decomposed"]) - .to_edge() - .check_count({"executorch_exir_dialects_edge__ops_aten_bmm_default": 1}) - .partition() - .check_not(["executorch_exir_dialects_edge__ops_aten_bmm_default"]) - .check_count({"torch.ops.higher_order.executorch_call_delegate": 1}) - .to_executorch() - .run_method_and_compare_outputs(inputs=test_data, qtol=1) - ) - - def _test_bmm_ethosu_BI_pipeline( - self, - module: torch.nn.Module, - compile_spec: CompileSpec, - test_data: Tuple[torch.Tensor, ...], - ): - tester = ( - ArmTester( - module, - example_inputs=test_data, - compile_spec=compile_spec, - ) - .quantize() - .export() - .check_count({"torch.ops.aten.bmm.default": 1}) - .check(["torch.ops.quantized_decomposed"]) - .to_edge() - .partition() - .check_count({"torch.ops.higher_order.executorch_call_delegate": 1}) - .to_executorch() - .serialize() - ) - if conftest.is_option_enabled("corstone_fvp"): - tester.run_method_and_compare_outputs(inputs=test_data, qtol=1) - - @parameterized.expand(BMM.test_data_generators) - def test_bmm_tosa_MI(self, test_data_generator: Callable[[], Tuple]): - test_data = test_data_generator() - self._test_bmm_tosa_MI_pipeline(self.BMM(), test_data) - - @parameterized.expand(BMMSingleInput.test_data_generators) - @pytest.mark.flaky # TODO: Investigate flakyness (MLETORCH-534) - def test_bmm_single_input_tosa_MI(self, test_data_generator: Callable[[], Tuple]): - test_data = test_data_generator() - self._test_bmm_tosa_MI_pipeline(self.BMMSingleInput(), test_data) - - @parameterized.expand(BMM.test_data_generators) - def test_bmm_tosa_BI(self, test_data_generator: Callable[[], Tuple]): - test_data = test_data_generator() - self._test_bmm_tosa_BI_pipeline(self.BMM(), test_data) - - @parameterized.expand(BMMSingleInput.test_data_generators) - @pytest.mark.flaky # TODO: Investigate flakyness (MLETORCH-534) - def test_bmm_single_input_tosa_BI(self, test_data_generator: Callable[[], Tuple]): - test_data = test_data_generator() - self._test_bmm_tosa_BI_pipeline(self.BMMSingleInput(), test_data) - - @parameterized.expand(BMM.test_data_generators) - @pytest.mark.corstone_fvp - def test_bmm_u55_BI(self, test_data_generator: Callable[[], Tuple]): - test_data = test_data_generator() - self._test_bmm_ethosu_BI_pipeline( - self.BMM(), common.get_u55_compile_spec(), test_data - ) - - @parameterized.expand(BMM.test_data_generators) - @pytest.mark.corstone_fvp - def test_bmm_u85_BI(self, test_data_generator: Callable[[], Tuple]): - test_data = test_data_generator() - self._test_bmm_ethosu_BI_pipeline( - self.BMM(), common.get_u85_compile_spec(), test_data - ) - - # Expected to fail on FVP as TOSA.MATMUL is not supported on U55 - @parameterized.expand(BMMSingleInput.test_data_generators) - @pytest.mark.corstone_fvp - def test_bmm_single_input_u55_BI(self, test_data_generator: Callable[[], Tuple]): - test_data = test_data_generator() - self._test_bmm_ethosu_BI_pipeline( - self.BMMSingleInput(), common.get_u55_compile_spec(), test_data - ) - - @parameterized.expand(BMMSingleInput.test_data_generators) - @pytest.mark.corstone_fvp - def test_bmm_single_input_u85_BI(self, test_data_generator: Callable[[], Tuple]): - test_data = test_data_generator() - self._test_bmm_ethosu_BI_pipeline( - self.BMMSingleInput(), common.get_u85_compile_spec(), test_data - ) + +from executorch.backends.arm.test import common + +from executorch.backends.arm.test.tester.test_pipeline import ( + EthosU55PipelineBI, + EthosU85PipelineBI, + TosaPipelineBI, + TosaPipelineMI, +) + +aten_op_bmm = "torch.ops.aten.bmm.default" +exir_op_bmm = "executorch_exir_dialects_edge__ops_aten_bmm_default" + +aten_op_mm = "torch.ops.aten.matmul.default" +exir_op_mm = "executorch_exir_dialects_edge__ops_aten_matmul_default" + +input_t1 = Tuple[torch.Tensor, torch.Tensor] # Input x + + +class BMM(torch.nn.Module): + test_data_generators = { + "rand_same": lambda: (torch.rand(2, 1, 1), torch.rand(2, 1, 1)), + "rand_diff": lambda: (torch.rand(5, 3, 5), torch.rand(5, 5, 2)), + "rand_ones": lambda: (torch.ones(1, 55, 3), torch.ones(1, 3, 44)), + "rand_big": lambda: (10000 * torch.randn(10, 1, 10), torch.randn(10, 10, 5)), + "rand_neg": lambda: ( + -10 * torch.randn(2, 32, 64), + 5 + 5 * torch.randn(2, 64, 32), + ), + } + + def forward(self, x, y): + return torch.bmm(x, y) + + +class MatMul(torch.nn.Module): + test_data_generators = { + "rand_3d": lambda: (torch.rand(2, 3, 5), torch.rand(2, 5, 2)), + "rand_4d": lambda: (torch.rand(1, 2, 3, 5), torch.rand(1, 2, 5, 2)), + } + + def forward(self, x, y): + return torch.matmul(x, y) + + +class BMMSingleInput(torch.nn.Module): + test_data_generators = { + "rand_3d_1": lambda: (torch.rand(20, 3, 3),), + "rand_3d_2": lambda: (torch.rand(2, 128, 128),), + "rand_big_1": lambda: (10000 * torch.randn(4, 25, 25),), + "rand_big_2": lambda: (5 + 5 * torch.randn(3, 64, 64),), + } + + def forward(self, x): + return torch.bmm(x, x) + + +@common.parametrize("test_data", BMM.test_data_generators) +def test_bmm_tosa_MI(test_data: input_t1): + pipeline = TosaPipelineMI[input_t1](BMM(), test_data(), aten_op_bmm, exir_op_bmm) + pipeline.run() + + +@pytest.mark.flaky(reruns=5) # TODO: Investigate flakyness (MLETORCH-534) +@common.parametrize("test_data", BMMSingleInput.test_data_generators) +def test_bmm_tosa_MI_single_input(test_data: input_t1): + pipeline = TosaPipelineMI[input_t1]( + BMMSingleInput(), test_data(), aten_op_bmm, exir_op_bmm + ) + pipeline.run() + + +@common.parametrize("test_data", MatMul.test_data_generators) +def test_mm_tosa_MI(test_data: input_t1): + pipeline = TosaPipelineMI[input_t1](MatMul(), test_data(), aten_op_mm, exir_op_mm) + pipeline.run() + + +@common.parametrize("test_data", MatMul.test_data_generators) +def test_mm_tosa_BI(test_data: input_t1): + pipeline = TosaPipelineBI[input_t1](MatMul(), test_data(), aten_op_mm, exir_op_mm) + pipeline.run() + + +@pytest.mark.flaky(reruns=5) # TODO: Investigate flakyness (MLETORCH-534) +@common.parametrize("test_data", BMM.test_data_generators) +def test_bmm_tosa_BI(test_data: input_t1): + pipeline = TosaPipelineBI[input_t1](BMM(), test_data(), aten_op_bmm, exir_op_bmm) + pipeline.run() + + +@pytest.mark.flaky(reruns=5) # TODO: Investigate flakyness (MLETORCH-534) +@common.parametrize("test_data", BMMSingleInput.test_data_generators) +def test_bmm_tosa_BI_single_input(test_data: input_t1): + pipeline = TosaPipelineBI[input_t1]( + BMMSingleInput(), test_data(), aten_op_bmm, exir_op_bmm + ) + pipeline.change_args("run_method_and_compare_outputs", qtol=1) + pipeline.run() + + +@common.parametrize("test_data", BMM.test_data_generators) +@common.XfailIfNoCorstone300 +def test_bmm_u55_BI(test_data: input_t1): + pipeline = EthosU55PipelineBI[input_t1]( + BMM(), + test_data(), + aten_op_bmm, + exir_op_bmm, + run_on_fvp=True, + ) + pipeline.run() + + +@common.parametrize("test_data", BMM.test_data_generators) +@common.XfailIfNoCorstone320 +def test_bmm_u85_BI(test_data: input_t1): + pipeline = EthosU85PipelineBI[input_t1]( + BMM(), + test_data(), + aten_op_bmm, + exir_op_bmm, + run_on_fvp=True, + ) + pipeline.run() + + +@common.parametrize("test_data", BMMSingleInput.test_data_generators) +@common.XfailIfNoCorstone300 +def test_bmm_u55_BI_single_input(test_data: input_t1): + pipeline = EthosU55PipelineBI[input_t1]( + BMMSingleInput(), + test_data(), + aten_op_bmm, + exir_op_bmm, + run_on_fvp=True, + ) + pipeline.run() + + +@common.parametrize("test_data", BMMSingleInput.test_data_generators) +@common.XfailIfNoCorstone320 +def test_bmm_u85_BI_single_input(test_data: input_t1): + pipeline = EthosU85PipelineBI[input_t1]( + BMMSingleInput(), + test_data(), + aten_op_bmm, + exir_op_bmm, + run_on_fvp=True, + ) + pipeline.run() diff --git a/backends/arm/test/ops/test_cat.py b/backends/arm/test/ops/test_cat.py index 63423b9e993..d5ebd6fe569 100644 --- a/backends/arm/test/ops/test_cat.py +++ b/backends/arm/test/ops/test_cat.py @@ -1,172 +1,138 @@ # Copyright (c) Meta Platforms, Inc. and affiliates. -# Copyright 2024-2025 Arm Limited and/or its affiliates. # All rights reserved. +# Copyright 2024-2025 Arm Limited and/or its affiliates. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. -import unittest from typing import Tuple -import pytest - import torch -from executorch.backends.arm.test import common, conftest -from executorch.backends.arm.test.tester.arm_tester import ArmTester -from executorch.exir.backend.compile_spec_schema import CompileSpec -from parameterized import parameterized +from executorch.backends.arm.test import common + +from executorch.backends.arm.test.tester.test_pipeline import ( + EthosU55PipelineBI, + EthosU85PipelineBI, + TosaPipelineBI, + TosaPipelineMI, +) +input_t1 = Tuple[torch.Tensor] # Input x -class TestCat(unittest.TestCase): +aten_op = "torch.ops.aten.cat.default" +exir_op = "executorch_exir_dialects_edge__ops_aten_cat_default" - class Cat(torch.nn.Module): - test_parameters = [ - ((torch.ones(1), torch.ones(1)), 0), - ((torch.ones(1, 2), torch.randn(1, 5), torch.randn(1, 1)), 1), + +class Cat(torch.nn.Module): + test_parameters = { + "cat_ones_two_tensors": lambda: ((torch.ones(1), torch.ones(1)), 0), + "cat_ones_and_rand_three_tensors": lambda: ( + (torch.ones(1, 2), torch.randn(1, 5), torch.randn(1, 1)), + 1, + ), + "cat_ones_and_rand_four_tensors": lambda: ( ( - ( - torch.ones(1, 2, 5), - torch.randn(1, 2, 4), - torch.randn(1, 2, 2), - torch.randn(1, 2, 1), - ), - -1, + torch.ones(1, 2, 5), + torch.randn(1, 2, 4), + torch.randn(1, 2, 2), + torch.randn(1, 2, 1), ), - ((torch.randn(1, 2, 4, 4), torch.randn(1, 2, 4, 1)), 3), - ((torch.randn(1, 2, 4, 4), torch.randn(1, 2, 4, 4)), 0), - ((torch.randn(2, 2, 4, 4), torch.randn(2, 2, 4, 1)), 3), + -1, + ), + "cat_rand_two_tensors": lambda: ( + (torch.randn(1, 2, 4, 4), torch.randn(1, 2, 4, 1)), + 3, + ), + "cat_rand_two_tensors_dim_0": lambda: ( + (torch.randn(1, 2, 4, 4), torch.randn(1, 2, 4, 4)), + 0, + ), + "cat_rand_two_tensors_dim_3": lambda: ( + (torch.randn(2, 2, 4, 4), torch.randn(2, 2, 4, 1)), + 3, + ), + "cat_rand_large": lambda: ( ( - ( - 10000 * torch.randn(2, 3, 1, 4), - torch.randn(2, 7, 1, 4), - torch.randn(2, 1, 1, 4), - ), - -3, + 10000 * torch.randn(2, 3, 1, 4), + torch.randn(2, 7, 1, 4), + torch.randn(2, 1, 1, 4), ), - ] - - def __init__(self): - super().__init__() - - def forward(self, t: tuple[torch.Tensor, ...], dim: int) -> torch.Tensor: - return torch.cat(t, dim=dim) - - def _test_cat_tosa_MI_pipeline( - self, module: torch.nn.Module, test_data: Tuple[tuple[torch.Tensor, ...], int] - ): - ( - ArmTester( - module, - example_inputs=test_data, - compile_spec=common.get_tosa_compile_spec("TOSA-0.80+MI"), - ) - .export() - .check_count({"torch.ops.aten.cat.default": 1}) - .check_not(["torch.ops.quantized_decomposed"]) - .to_edge() - .partition() - .check_not(["executorch_exir_dialects_edge__ops_aten_cat_default"]) - .check_count({"torch.ops.higher_order.executorch_call_delegate": 1}) - .to_executorch() - .run_method_and_compare_outputs(inputs=test_data) - ) - - def _test_cat_tosa_BI_pipeline( - self, module: torch.nn.Module, test_data: Tuple[tuple[torch.Tensor, ...], int] - ): - ( - ArmTester( - module, - example_inputs=test_data, - compile_spec=common.get_tosa_compile_spec("TOSA-0.80+BI"), - ) - .quantize() - .export() - .check_count({"torch.ops.aten.cat.default": 1}) - .check(["torch.ops.quantized_decomposed"]) - .to_edge() - .partition() - .check_not(["executorch_exir_dialects_edge__ops_aten_cat_default"]) - .check_count({"torch.ops.higher_order.executorch_call_delegate": 1}) - .to_executorch() - .run_method_and_compare_outputs(inputs=test_data, qtol=1) - ) - - def _test_cat_ethosu_BI_pipeline( - self, - module: torch.nn.Module, - compile_spec: CompileSpec, - test_data: Tuple[tuple[torch.Tensor, ...], int], - ): - tester = ( - ArmTester( - module, - example_inputs=test_data, - compile_spec=compile_spec, - ) - .quantize() - .export() - .check_count({"torch.ops.aten.cat.default": 1}) - .check(["torch.ops.quantized_decomposed"]) - .to_edge() - .partition() - .check_not(["executorch_exir_dialects_edge__ops_aten_cat_default"]) - .check_count({"torch.ops.higher_order.executorch_call_delegate": 1}) - .to_executorch() - .serialize() - ) - if conftest.is_option_enabled("corstone_fvp"): - tester.run_method_and_compare_outputs(inputs=test_data) - - @parameterized.expand(Cat.test_parameters) - def test_cat_tosa_MI(self, operands: tuple[torch.Tensor, ...], dim: int): - test_data = (operands, dim) - self._test_cat_tosa_MI_pipeline(self.Cat(), test_data) - - def test_cat_4d_tosa_MI(self): - square = torch.ones((2, 2, 2, 2)) - for dim in range(-3, 3): - test_data = ((square, square.clone()), dim) - self._test_cat_tosa_MI_pipeline(self.Cat(), test_data) - - @parameterized.expand(Cat.test_parameters) - def test_cat_tosa_BI(self, operands: tuple[torch.Tensor, ...], dim: int): - test_data = (operands, dim) - self._test_cat_tosa_BI_pipeline(self.Cat(), test_data) - - @parameterized.expand(Cat.test_parameters[:-3]) - @pytest.mark.corstone_fvp - def test_cat_u55_BI(self, operands: tuple[torch.Tensor, ...], dim: int): - test_data = (operands, dim) - self._test_cat_ethosu_BI_pipeline( - self.Cat(), common.get_u55_compile_spec(), test_data - ) - - # MLETORCH-630 Cat does not work on FVP with batch>1 - @parameterized.expand(Cat.test_parameters[-3:]) - @pytest.mark.corstone_fvp - @conftest.expectedFailureOnFVP - def test_cat_u55_BI_xfails(self, operands: tuple[torch.Tensor, ...], dim: int): - test_data = (operands, dim) - self._test_cat_ethosu_BI_pipeline( - self.Cat(), common.get_u55_compile_spec(), test_data - ) - - @parameterized.expand(Cat.test_parameters[:-3]) - @pytest.mark.corstone_fvp - def test_cat_u85_BI(self, operands: tuple[torch.Tensor, ...], dim: int): - test_data = (operands, dim) - self._test_cat_ethosu_BI_pipeline( - self.Cat(), common.get_u85_compile_spec(), test_data - ) - - # MLETORCH-630 Cat does not work on FVP with batch>1 - @parameterized.expand(Cat.test_parameters[-3:]) - @pytest.mark.corstone_fvp - @conftest.expectedFailureOnFVP - def test_cat_u85_BI_xfails(self, operands: tuple[torch.Tensor, ...], dim: int): - test_data = (operands, dim) - self._test_cat_ethosu_BI_pipeline( - self.Cat(), common.get_u85_compile_spec(), test_data + -3, + ), + } + + def __init__(self): + super().__init__() + + def forward(self, t: tuple[torch.Tensor, ...], dim: int) -> torch.Tensor: + return torch.cat(t, dim=dim) + + +@common.parametrize("test_data", Cat.test_parameters) +def test_cat_tosa_MI(test_data: Tuple): + pipeline = TosaPipelineMI[input_t1]( + Cat(), + test_data(), + aten_op, + exir_op, + ) + pipeline.run() + + +def test_cat_tosa_MI_4d(): + square = torch.ones((2, 2, 2, 2)) + for dim in range(-3, 3): + test_data = ((square, square.clone()), dim) + pipeline = TosaPipelineMI[input_t1]( + Cat(), + test_data, + aten_op, + exir_op, ) + pipeline.run() + + +@common.parametrize("test_data", Cat.test_parameters) +def test_cat_tosa_BI(test_data: Tuple): + pipeline = TosaPipelineBI[input_t1]( + Cat(), + test_data(), + aten_op, + exir_op, + ) + pipeline.run() + + +x_fails = { + "cat_rand_two_tensors_dim_0": "MLETORCH-630: AssertionError: Output 0 does not match reference output.", + "cat_rand_two_tensors_dim_0": "MLETORCH-630: AssertionError: Output 0 does not match reference output.", + "cat_rand_two_tensors_dim_3": "MLETORCH-630: AssertionError: Output 0 does not match reference output.", + "cat_rand_large": "MLETORCH-630: AssertionError: Output 0 does not match reference output.", +} + + +@common.parametrize("test_data", Cat.test_parameters, x_fails) +@common.XfailIfNoCorstone300 +def test_cat_u55_BI(test_data: Tuple): + pipeline = EthosU55PipelineBI[input_t1]( + Cat(), + test_data(), + aten_op, + exir_op, + run_on_fvp=True, + ) + pipeline.run() + + +@common.parametrize("test_data", Cat.test_parameters, x_fails) +@common.XfailIfNoCorstone320 +def test_cat_u85_BI(test_data: Tuple): + pipeline = EthosU85PipelineBI[input_t1]( + Cat(), + test_data(), + aten_op, + exir_op, + run_on_fvp=True, + ) + pipeline.run() diff --git a/backends/arm/test/ops/test_clamp.py b/backends/arm/test/ops/test_clamp.py index 368f7967433..0846effa7a6 100644 --- a/backends/arm/test/ops/test_clamp.py +++ b/backends/arm/test/ops/test_clamp.py @@ -17,20 +17,21 @@ TosaPipelineMI, ) - aten_op = "torch.ops.aten.clamp.default" exir_op = "executorch_exir_dialects_edge__ops_aten_clamp_default" + input_t = Tuple[torch.Tensor] + test_data_suite = { # test_name: (test_data, min, max) - "rank_1": (torch.rand(10) * 2, -1.0, 1.0), - "rank_2": (torch.rand(1, 35), 0.5, 0.8), - "rank_3": (torch.ones(1, 10, 10), -1, -1), - "rank_4": (torch.rand(1, 10, 10, 1) * 2, -0.1, 2.0), - "rank_4_mixed_min_max_dtype": (torch.rand(1, 10, 10, 5) + 10, 8.0, 10), - "rank_4_no_min": (torch.rand(1, 10, 10, 1) * 10, None, 5), - "rank_4_no_max": (torch.rand(1, 10, 10, 1) - 3, -3.3, None), + "rank_1": lambda: (torch.rand(10) * 2, -1.0, 1.0), + "rank_2": lambda: (torch.rand(1, 35), 0.5, 0.8), + "rank_3": lambda: (torch.ones(1, 10, 10), -1, -1), + "rank_4": lambda: (torch.rand(1, 10, 10, 1) * 2, -0.1, 2.0), + "rank_4_mixed_min_max_dtype": lambda: (torch.rand(1, 10, 10, 5) + 10, 8.0, 10), + "rank_4_no_min": lambda: (torch.rand(1, 10, 10, 1) * 10, None, 5), + "rank_4_no_max": lambda: (torch.rand(1, 10, 10, 1) - 3, -3.3, None), } @@ -52,7 +53,7 @@ def forward(self, x): @common.parametrize("test_data", test_data_suite) def test_clamp_tosa_MI(test_data): - input_tensor, min_val, max_val = test_data + input_tensor, min_val, max_val = test_data() model = Clamp(min_val, max_val) pipeline = TosaPipelineMI[input_t]( @@ -68,7 +69,7 @@ def test_clamp_tosa_MI(test_data): @common.parametrize("test_data", test_data_suite) def test_clamp_tosa_BI(test_data): - input_tensor, min_val, max_val = test_data + input_tensor, min_val, max_val = test_data() model = Clamp(min_val, max_val) pipeline = TosaPipelineBI[input_t]( @@ -84,46 +85,10 @@ def test_clamp_tosa_BI(test_data): @common.parametrize("test_data", test_data_suite) +@common.XfailIfNoCorstone300 def test_clamp_u55_BI(test_data): - input_tensor, min_val, max_val = test_data - model = Clamp(min_val, max_val) - - pipeline = EthosU55PipelineBI[input_t]( - model, - (input_tensor,), - aten_op, - exir_op, - run_on_fvp=False, - symmetric_io_quantization=True, - ) - - pipeline.run() - - -@common.parametrize("test_data", test_data_suite) -def test_clamp_u85_BI(test_data): - - input_tensor, min_val, max_val = test_data - model = Clamp(min_val, max_val) - - pipeline = EthosU85PipelineBI[input_t]( - model, - (input_tensor,), - aten_op, - exir_op, - run_on_fvp=False, - symmetric_io_quantization=True, - ) - - pipeline.run() - - -@common.parametrize("test_data", test_data_suite) -@common.SkipIfNoCorstone300 -def test_clamp_u55_BI_on_fvp(test_data): - - input_tensor, min_val, max_val = test_data + input_tensor, min_val, max_val = test_data() model = Clamp(min_val, max_val) pipeline = EthosU55PipelineBI[input_t]( @@ -140,10 +105,10 @@ def test_clamp_u55_BI_on_fvp(test_data): @common.parametrize("test_data", test_data_suite) -@common.SkipIfNoCorstone320 -def test_clamp_u85_BI_on_fvp(test_data): +@common.XfailIfNoCorstone320 +def test_clamp_u85_BI(test_data): - input_tensor, min_val, max_val = test_data + input_tensor, min_val, max_val = test_data() model = Clamp(min_val, max_val) pipeline = EthosU85PipelineBI[input_t]( diff --git a/backends/arm/test/ops/test_clone.py b/backends/arm/test/ops/test_clone.py index 2aad62ece24..125a705ccb4 100644 --- a/backends/arm/test/ops/test_clone.py +++ b/backends/arm/test/ops/test_clone.py @@ -21,7 +21,6 @@ TosaPipelineMI, ) - aten_op = "torch.ops.aten.clone.default" exir_op = "executorch_exir_dialects_edge__ops_aten_clone_default" @@ -36,13 +35,13 @@ def forward(self, x: torch.Tensor): test_data_suite = { - "ones_1D_10": (torch.ones(10),), - "ones_1D_50": (torch.ones(50),), - "rand_1D_20": (torch.rand(20),), - "rand_2D_10x10": (torch.rand(10, 10),), - "rand_3D_5x5x5": (torch.rand(5, 5, 5),), - "rand_4D_2x3x4x5": (torch.rand(2, 3, 4, 5),), - "large_tensor": (torch.rand(1000),), + "ones_1D_10": lambda: (torch.ones(10),), + "ones_1D_50": lambda: (torch.ones(50),), + "rand_1D_20": lambda: (torch.rand(20),), + "rand_2D_10x10": lambda: (torch.rand(10, 10),), + "rand_3D_5x5x5": lambda: (torch.rand(5, 5, 5),), + "rand_4D_2x3x4x5": lambda: (torch.rand(2, 3, 4, 5),), + "large_tensor": lambda: (torch.rand(1000),), } @@ -51,7 +50,7 @@ def test_clone_tosa_MI(test_data: Tuple[torch.Tensor]): pipeline = TosaPipelineMI[input_t]( Clone(), - test_data, + test_data(), aten_op, exir_op, ) @@ -63,7 +62,7 @@ def test_clone_tosa_MI(test_data: Tuple[torch.Tensor]): def test_clone_tosa_BI(test_data): pipeline = TosaPipelineBI[input_t]( Clone(), - test_data, + test_data(), aten_op, exir_op, symmetric_io_quantization=True, @@ -72,48 +71,14 @@ def test_clone_tosa_BI(test_data): @common.parametrize("test_data", test_data_suite) +@common.XfailIfNoCorstone300 @pytest.mark.xfail( reason="Empty subgraph leads to Vela compilation failure. See: https://jira.arm.com/browse/MLBEDSW-10477" ) def test_clone_u55_BI(test_data): pipeline = EthosU55PipelineBI[input_t]( Clone(), - test_data, - aten_op, - exir_op, - run_on_fvp=False, - symmetric_io_quantization=True, - ) - - pipeline.run() - - -@common.parametrize("test_data", test_data_suite) -@pytest.mark.xfail( - reason="Empty subgraph leads to Vela compilation failure. See: https://jira.arm.com/browse/MLBEDSW-10477" -) -def test_clone_u85_BI(test_data): - pipeline = EthosU85PipelineBI[input_t]( - Clone(), - test_data, - aten_op, - exir_op, - run_on_fvp=False, - symmetric_io_quantization=True, - ) - - pipeline.run() - - -@common.parametrize("test_data", test_data_suite) -@pytest.mark.xfail( - reason="Empty subgraph leads to Vela compilation failure. See: https://jira.arm.com/browse/MLBEDSW-10477" -) -@common.SkipIfNoCorstone300 -def test_clone_u55_BI_on_fvp(test_data): - pipeline = EthosU55PipelineBI[input_t]( - Clone(), - test_data, + test_data(), aten_op, exir_op, run_on_fvp=True, @@ -124,14 +89,14 @@ def test_clone_u55_BI_on_fvp(test_data): @common.parametrize("test_data", test_data_suite) +@common.XfailIfNoCorstone320 @pytest.mark.xfail( reason="Empty subgraph leads to Vela compilation failure. See: https://jira.arm.com/browse/MLBEDSW-10477" ) -@common.SkipIfNoCorstone320 -def test_clone_u85_BI_on_fvp(test_data): +def test_clone_u85_BI(test_data): pipeline = EthosU85PipelineBI[input_t]( Clone(), - test_data, + test_data(), aten_op, exir_op, run_on_fvp=True, diff --git a/backends/arm/test/ops/test_constant_pad_nd.py b/backends/arm/test/ops/test_constant_pad_nd.py index 9a19f6fbf5f..0a81fd0f97d 100644 --- a/backends/arm/test/ops/test_constant_pad_nd.py +++ b/backends/arm/test/ops/test_constant_pad_nd.py @@ -17,19 +17,20 @@ aten_op = "torch.ops.aten.pad.default" exir_op = "executorch_exir_dialects_edge__ops_aten_pad_default" + input_t1 = Tuple[torch.Tensor] # Input x + test_data_suite = { - "4dim_last1dim": (torch.rand(1, 1, 16, 16), (1, 1, 0, 0, 0, 0, 0, 0), 1), - "4dim_last2dim": (torch.rand(1, 1, 16, 16), (1, 0, 1, 0, 0, 0, 0, 0), 2), - "4dim_last3dim": (torch.rand(1, 1, 16, 16), (1, 1, 0, 2, 0, 2, 0, 0), 3), - "4dim_last4dim": (torch.rand(1, 1, 16, 16), (1, 0, 1, 1, 0, 2, 0, 2), 4), - "3dim_last1dim": (torch.rand(1, 1, 16), (1, 1, 0, 0, 0, 0), 1), - "3dim_last2dim": (torch.rand(1, 1, 16), (1, 0, 1, 1, 0, 0), 2), - "3dim_last3dim": (torch.rand(1, 1, 16), (1, 0, 1, 0, 1, 1), 3), - "2dim_last1dim": (torch.rand(1, 1, 16), (1, 1, 0, 0), 1), - "2dim_last2dim": (torch.rand(1, 1, 16), (1, 0, 1, 1), 2), + "4dim_last1dim": lambda: (torch.rand(1, 1, 16, 16), (1, 1, 0, 0, 0, 0, 0, 0), 1), + "4dim_last2dim": lambda: (torch.rand(1, 1, 16, 16), (1, 0, 1, 0, 0, 0, 0, 0), 2), + "4dim_last3dim": lambda: (torch.rand(1, 1, 16, 16), (1, 1, 0, 2, 0, 2, 0, 0), 3), + "4dim_last4dim": lambda: (torch.rand(1, 1, 16, 16), (1, 0, 1, 1, 0, 2, 0, 2), 4), + "3dim_last1dim": lambda: (torch.rand(1, 1, 16), (1, 1, 0, 0, 0, 0), 1), + "3dim_last2dim": lambda: (torch.rand(1, 1, 16), (1, 0, 1, 1, 0, 0), 2), + "3dim_last3dim": lambda: (torch.rand(1, 1, 16), (1, 0, 1, 0, 1, 1), 3), + "2dim_last1dim": lambda: (torch.rand(1, 1, 16), (1, 1, 0, 0), 1), + "2dim_last2dim": lambda: (torch.rand(1, 1, 16), (1, 0, 1, 1), 2), } -"""Tests pad.""" class ConstantPadND(torch.nn.Module): @@ -53,7 +54,7 @@ def forward(self, x: torch.Tensor): test_data_suite, ) def test_constant_pad_nd_tosa_MI(test_data: Tuple): - test_data, padding, value = test_data + test_data, padding, value = test_data() pipeline = TosaPipelineMI[input_t1]( ConstantPadND(padding, value), (test_data,), @@ -65,7 +66,7 @@ def test_constant_pad_nd_tosa_MI(test_data: Tuple): @common.parametrize("test_data", test_data_suite) def test_constant_pad_nd_tosa_BI(test_data: Tuple): - test_data, padding, value = test_data + test_data, padding, value = test_data() pipeline = TosaPipelineBI[input_t1]( ConstantPadND(padding, value), (test_data,), diff --git a/backends/arm/test/ops/test_conv1d.py b/backends/arm/test/ops/test_conv1d.py index a1ba23ac73a..768da4d5c89 100644 --- a/backends/arm/test/ops/test_conv1d.py +++ b/backends/arm/test/ops/test_conv1d.py @@ -250,27 +250,27 @@ def forward(self, x): ) test_modules = { - "2_3x2x40_nobias": conv1d_2_3x2x40_nobias, - "3_1x3x256_st1": conv1d_3_1x3x256_st1, - "3_1x3x12_st2_pd1": conv1d_3_1x3x12_st2_pd1, - "1_1x2x128_st1": conv1d_1_1x2x128_st1, - "2_1x2x14_st2": conv1d_2_1x2x14_st2, - "5_3x2x128_st1": conv1d_5_3x2x128_st1, - "3_1x3x224_st2_pd1": conv1d_3_1x3x224_st2_pd1, - "7_1x3x16_st2_pd1_dl2_needs_adjust_pass": conv1d_7_1x3x16_st2_pd1_dl2, - "7_1x3x15_st1_pd0_dl1_needs_adjust_pass": conv1d_7_1x3x15_st1_pd0_dl1, - "5_1x3x14_st5_pd0_dl1_needs_adjust_pass": conv1d_5_1x3x14_st5_pd0_dl1, - "5_1x3x9_st5_pd0_dl1_needs_adjust_pass": conv1d_5_1x3x9_st5_pd0_dl1, - "two_conv1d_nobias": two_conv1d_nobias, - "two_conv1d": two_conv1d, + "2_3x2x40_nobias": lambda: conv1d_2_3x2x40_nobias, + "3_1x3x256_st1": lambda: conv1d_3_1x3x256_st1, + "3_1x3x12_st2_pd1": lambda: conv1d_3_1x3x12_st2_pd1, + "1_1x2x128_st1": lambda: conv1d_1_1x2x128_st1, + "2_1x2x14_st2": lambda: conv1d_2_1x2x14_st2, + "5_3x2x128_st1": lambda: conv1d_5_3x2x128_st1, + "3_1x3x224_st2_pd1": lambda: conv1d_3_1x3x224_st2_pd1, + "7_1x3x16_st2_pd1_dl2_needs_adjust_pass": lambda: conv1d_7_1x3x16_st2_pd1_dl2, + "7_1x3x15_st1_pd0_dl1_needs_adjust_pass": lambda: conv1d_7_1x3x15_st1_pd0_dl1, + "5_1x3x14_st5_pd0_dl1_needs_adjust_pass": lambda: conv1d_5_1x3x14_st5_pd0_dl1, + "5_1x3x9_st5_pd0_dl1_needs_adjust_pass": lambda: conv1d_5_1x3x9_st5_pd0_dl1, + "two_conv1d_nobias": lambda: two_conv1d_nobias, + "two_conv1d": lambda: two_conv1d, } @common.parametrize("test_module", test_modules) def test_convolution_1d_tosa_MI(test_module): pipeline = TosaPipelineMI[input_t]( - test_module, - test_module.get_inputs(), + test_module(), + test_module().get_inputs(), aten_op, exir_op, ) @@ -280,8 +280,8 @@ def test_convolution_1d_tosa_MI(test_module): @common.parametrize("test_module", test_modules) def test_convolution_1d_tosa_BI(test_module): pipeline = TosaPipelineBI[input_t]( - test_module, - test_module.get_inputs(), + test_module(), + test_module().get_inputs(), aten_op, exir_op, ) @@ -290,35 +290,11 @@ def test_convolution_1d_tosa_BI(test_module): @common.parametrize("test_module", test_modules) +@common.XfailIfNoCorstone300 def test_convolution_1d_u55_BI(test_module): pipeline = EthosU55PipelineBI[input_t]( - test_module, - test_module.get_inputs(), - aten_op, - exir_op, - run_on_fvp=False, - ) - pipeline.run() - - -@common.parametrize("test_module", test_modules) -def test_convolution_1d_u85_BI(test_module): - pipeline = EthosU85PipelineBI[input_t]( - test_module, - test_module.get_inputs(), - aten_op, - exir_op, - run_on_fvp=False, - ) - pipeline.run() - - -@common.parametrize("test_module", test_modules) -@common.SkipIfNoCorstone300 -def test_convolution_1d_u55_BI_on_fvp(test_module): - pipeline = EthosU55PipelineBI[input_t]( - test_module, - test_module.get_inputs(), + test_module(), + test_module().get_inputs(), aten_op, exir_op, run_on_fvp=True, @@ -328,11 +304,11 @@ def test_convolution_1d_u55_BI_on_fvp(test_module): @common.parametrize("test_module", test_modules) -@common.SkipIfNoCorstone320 -def test_convolution_1d_u85_BI_on_fvp(test_module): +@common.XfailIfNoCorstone320 +def test_convolution_1d_u85_BI(test_module): pipeline = EthosU85PipelineBI[input_t]( - test_module, - test_module.get_inputs(), + test_module(), + test_module().get_inputs(), aten_op, exir_op, run_on_fvp=True, diff --git a/backends/arm/test/ops/test_conv2d.py b/backends/arm/test/ops/test_conv2d.py index 844eed97638..158c296e4ec 100644 --- a/backends/arm/test/ops/test_conv2d.py +++ b/backends/arm/test/ops/test_conv2d.py @@ -330,24 +330,24 @@ def forward(self, x): # Shenanigan to get a nicer output when test fails. With unittest it looks like: # FAIL: test_convolution_2d_tosa_BI_2_3x3_1x3x12x12_st2_pd1 test_modules = { - "2x2_3x2x40x40_nobias": conv2d_2x2_3x2x40x40_nobias, - "3x3_1x3x256x256_st1": conv2d_3x3_1x3x256x256_st1, - "3x3_1x3x12x12_st2_pd1": conv2d_3x3_1x3x12x12_st2_pd1, - "1x1_1x2x128x128_st1": conv2d_1x1_1x2x128x128_st1, - "2x2_1x1x14x13_st2_needs_adjust_pass": conv2d_2x2_1x1x14x13_st2, - "5x5_1x3x14x15_st3_pd1_needs_adjust_pass": conv2d_5x5_1x3x14x15_st3_pd1, - "7x7_1x3x16x16_st2_pd1_dl2_needs_adjust_pass": conv2d_7x7_1x3x16x16_st2_pd1_dl2, - "7x7_1x3x15x15_st1_pd0_dl1_needs_adjust_pass": conv2d_7x7_1x3x15x15_st1_pd0_dl1, - "5x5_1x3x14x14_st5_pd0_dl1_needs_adjust_pass": conv2d_5x5_1x3x14x14_st5_pd0_dl1, - "5x5_1x3x9x9_st5_pd0_dl1_needs_adjust_pass": conv2d_5x5_1x3x9x9_st5_pd0_dl1, - "3x3_1x3x9x8_st3_pd0_dl1_needs_adjust_pass": conv2d_3x3_1x3x9x8_st3_pd0_dl1, - "3x3_1x3x8x9_st3_pd0_dl1_needs_adjust_pass": conv2d_3x3_1x3x8x9_st3_pd0_dl1, - "3x4_1x3x7x7_st3_pd0_dl1_needs_adjust_pass": conv2d_3x4_1x3x7x7_st3_pd0_dl1, - "4x3_1x3x7x7_st3_pd0_dl1_needs_adjust_pass": conv2d_4x3_1x3x7x7_st3_pd0_dl1, - "5x5_3x2x128x128_st1": conv2d_5x5_3x2x128x128_st1, - "3x3_1x3x224x224_st2_pd1": conv2d_3x3_1x3x224x224_st2_pd1, - "two_conv2d_nobias": two_conv2d_nobias, - "two_conv2d": two_conv2d, + "2x2_3x2x40x40_nobias": lambda: conv2d_2x2_3x2x40x40_nobias, + "3x3_1x3x256x256_st1": lambda: conv2d_3x3_1x3x256x256_st1, + "3x3_1x3x12x12_st2_pd1": lambda: conv2d_3x3_1x3x12x12_st2_pd1, + "1x1_1x2x128x128_st1": lambda: conv2d_1x1_1x2x128x128_st1, + "2x2_1x1x14x13_st2_needs_adjust_pass": lambda: conv2d_2x2_1x1x14x13_st2, + "5x5_1x3x14x15_st3_pd1_needs_adjust_pass": lambda: conv2d_5x5_1x3x14x15_st3_pd1, + "7x7_1x3x16x16_st2_pd1_dl2_needs_adjust_pass": lambda: conv2d_7x7_1x3x16x16_st2_pd1_dl2, + "7x7_1x3x15x15_st1_pd0_dl1_needs_adjust_pass": lambda: conv2d_7x7_1x3x15x15_st1_pd0_dl1, + "5x5_1x3x14x14_st5_pd0_dl1_needs_adjust_pass": lambda: conv2d_5x5_1x3x14x14_st5_pd0_dl1, + "5x5_1x3x9x9_st5_pd0_dl1_needs_adjust_pass": lambda: conv2d_5x5_1x3x9x9_st5_pd0_dl1, + "3x3_1x3x9x8_st3_pd0_dl1_needs_adjust_pass": lambda: conv2d_3x3_1x3x9x8_st3_pd0_dl1, + "3x3_1x3x8x9_st3_pd0_dl1_needs_adjust_pass": lambda: conv2d_3x3_1x3x8x9_st3_pd0_dl1, + "3x4_1x3x7x7_st3_pd0_dl1_needs_adjust_pass": lambda: conv2d_3x4_1x3x7x7_st3_pd0_dl1, + "4x3_1x3x7x7_st3_pd0_dl1_needs_adjust_pass": lambda: conv2d_4x3_1x3x7x7_st3_pd0_dl1, + "5x5_3x2x128x128_st1": lambda: conv2d_5x5_3x2x128x128_st1, + "3x3_1x3x224x224_st2_pd1": lambda: conv2d_3x3_1x3x224x224_st2_pd1, + "two_conv2d_nobias": lambda: two_conv2d_nobias, + "two_conv2d": lambda: two_conv2d, } fvp_xfails = { @@ -360,7 +360,10 @@ def forward(self, x): @common.parametrize("test_module", test_modules) def test_convolution_2d_tosa_MI(test_module): pipeline = TosaPipelineMI[input_t]( - test_module, test_module.get_inputs(), aten_op, exir_op + test_module(), + test_module().get_inputs(), + aten_op, + exir_op, ) pipeline.run() @@ -368,48 +371,43 @@ def test_convolution_2d_tosa_MI(test_module): @common.parametrize("test_module", test_modules) def test_convolution_2d_tosa_BI(test_module): pipeline = TosaPipelineBI[input_t]( - test_module, test_module.get_inputs(), aten_op, exir_op + test_module(), + test_module().get_inputs(), + aten_op, + exir_op, ) pipeline.change_args("run_method_and_compare_outputs", qtol=1) pipeline.run() -@common.parametrize("test_module", test_modules) -def test_convolution_2d_u55_BI(test_module): - pipeline = EthosU55PipelineBI[input_t]( - test_module, test_module.get_inputs(), aten_op, exir_op, run_on_fvp=False - ) - pipeline.run() - - -@common.parametrize("test_module", test_modules) -def test_convolution_2d_u85_BI(test_module): - pipeline = EthosU85PipelineBI[input_t]( - test_module, test_module.get_inputs(), aten_op, exir_op, run_on_fvp=False - ) - pipeline.run() - - @common.parametrize("test_module", test_modules, fvp_xfails) -@common.SkipIfNoCorstone300 -def test_convolution_2d_u55_BI_on_fvp(test_module): +@common.XfailIfNoCorstone300 +def test_convolution_2d_u55_BI(test_module): pipeline = EthosU55PipelineBI[input_t]( - test_module, test_module.get_inputs(), aten_op, exir_op, run_on_fvp=True + test_module(), + test_module().get_inputs(), + aten_op, + exir_op, + run_on_fvp=True, ) pipeline.run() @common.parametrize("test_module", test_modules, fvp_xfails) -@common.SkipIfNoCorstone320 -def test_convolution_2d_u85_BI_on_fvp(test_module): +@common.XfailIfNoCorstone320 +def test_convolution_2d_u85_BI(test_module): pipeline = EthosU85PipelineBI[input_t]( - test_module, test_module.get_inputs(), aten_op, exir_op, run_on_fvp=True + test_module(), + test_module().get_inputs(), + aten_op, + exir_op, + run_on_fvp=True, ) pipeline.run() reject_suite = { - "large_stride": Conv2d( + "large_stride": lambda: Conv2d( in_channels=1, out_channels=1, kernel_size=(2, 4), @@ -419,7 +417,7 @@ def test_convolution_2d_u85_BI_on_fvp(test_module): height=14, batches=1, ), - "large_kernel_height": Conv2d( + "large_kernel_height": lambda: Conv2d( in_channels=1, out_channels=1, kernel_size=(2, 65), @@ -429,7 +427,7 @@ def test_convolution_2d_u85_BI_on_fvp(test_module): height=70, batches=1, ), - "large_kernel": Conv2d( + "large_kernel": lambda: Conv2d( in_channels=1, out_channels=1, kernel_size=(70, 60), @@ -443,12 +441,11 @@ def test_convolution_2d_u85_BI_on_fvp(test_module): @common.parametrize("module", reject_suite) -def test_reject_convolution_2d_u55_BI( - module: Conv2d, -): +def test_convolution_2d_u55_BI_not_delegated(module: Conv2d): OpNotSupportedPipeline( - module, - module.get_inputs(), - "TOSA-0.80+BI+u55", + module(), + module().get_inputs(), {"executorch_exir_dialects_edge__ops_aten_convolution_default": 1}, + quantize=True, + u55_subset=True, ).run() diff --git a/backends/arm/test/ops/test_conv3d.py b/backends/arm/test/ops/test_conv3d.py index 22f7e9e7f54..c7bb7c55887 100644 --- a/backends/arm/test/ops/test_conv3d.py +++ b/backends/arm/test/ops/test_conv3d.py @@ -305,22 +305,22 @@ def forward(self, x): ) test_modules = { - "2x2_3x2x40x40_nobias": conv3d_2x2_3x2x40x40_nobias, - "3x3_1x3x256x256_st1": conv3d_3x3_1x3x256x256_st1, - "3x3_1x3x12x12_st2_pd1": conv3d_3x3_1x3x12x12_st2_pd1, - "1x1_1x2x128x128_st1": conv3d_1x1_1x2x128x128_st1, - "2x2_1x1x14x13_st2_needs_adjust_pass": conv3d_2x2_1x1x14x13_st2, - "5x5_1x3x14x15_st3_pd1_needs_adjust_pass": conv3d_5x5_1x3x14x15_st3_pd1, - "7x7_1x3x16x16_st2_pd1_dl2_needs_adjust_pass": conv3d_7x7_1x3x16x16_st2_pd1_dl2, - "7x7_1x3x15x15_st1_pd0_dl1_needs_adjust_pass": conv3d_7x7_1x3x15x15_st1_pd0_dl1, - "5x5_1x3x14x14_st5_pd0_dl1_needs_adjust_pass": conv3d_5x5_1x3x14x14_st5_pd0_dl1, - "5x5_1x3x9x9_st5_pd0_dl1_needs_adjust_pass": conv3d_5x5_1x3x9x9_st5_pd0_dl1, - "3x3_1x3x9x8_st3_pd0_dl1_needs_adjust_pass": conv3d_3x3_1x3x9x8_st3_pd0_dl1, - "3x3_1x3x8x9_st3_pd0_dl1_needs_adjust_pass": conv3d_3x3_1x3x8x9_st3_pd0_dl1, - "3x4_1x3x7x7_st3_pd0_dl1_needs_adjust_pass": conv3d_3x4_1x3x7x7_st3_pd0_dl1, - "4x3_1x3x7x7_st3_pd0_dl1_needs_adjust_pass": conv3d_4x3_1x3x7x7_st3_pd0_dl1, - "5x5_3x2x128x128_st1": conv3d_5x5_3x2x128x128_st1, - "3x3_1x3x224x224_st2_pd1": conv3d_3x3_1x3x224x224_st2_pd1, + "2x2_3x2x40x40_nobias": lambda: conv3d_2x2_3x2x40x40_nobias, + "3x3_1x3x256x256_st1": lambda: conv3d_3x3_1x3x256x256_st1, + "3x3_1x3x12x12_st2_pd1": lambda: conv3d_3x3_1x3x12x12_st2_pd1, + "1x1_1x2x128x128_st1": lambda: conv3d_1x1_1x2x128x128_st1, + "2x2_1x1x14x13_st2_needs_adjust_pass": lambda: conv3d_2x2_1x1x14x13_st2, + "5x5_1x3x14x15_st3_pd1_needs_adjust_pass": lambda: conv3d_5x5_1x3x14x15_st3_pd1, + "7x7_1x3x16x16_st2_pd1_dl2_needs_adjust_pass": lambda: conv3d_7x7_1x3x16x16_st2_pd1_dl2, + "7x7_1x3x15x15_st1_pd0_dl1_needs_adjust_pass": lambda: conv3d_7x7_1x3x15x15_st1_pd0_dl1, + "5x5_1x3x14x14_st5_pd0_dl1_needs_adjust_pass": lambda: conv3d_5x5_1x3x14x14_st5_pd0_dl1, + "5x5_1x3x9x9_st5_pd0_dl1_needs_adjust_pass": lambda: conv3d_5x5_1x3x9x9_st5_pd0_dl1, + "3x3_1x3x9x8_st3_pd0_dl1_needs_adjust_pass": lambda: conv3d_3x3_1x3x9x8_st3_pd0_dl1, + "3x3_1x3x8x9_st3_pd0_dl1_needs_adjust_pass": lambda: conv3d_3x3_1x3x8x9_st3_pd0_dl1, + "3x4_1x3x7x7_st3_pd0_dl1_needs_adjust_pass": lambda: conv3d_3x4_1x3x7x7_st3_pd0_dl1, + "4x3_1x3x7x7_st3_pd0_dl1_needs_adjust_pass": lambda: conv3d_4x3_1x3x7x7_st3_pd0_dl1, + "5x5_3x2x128x128_st1": lambda: conv3d_5x5_3x2x128x128_st1, + "3x3_1x3x224x224_st2_pd1": lambda: conv3d_3x3_1x3x224x224_st2_pd1, } input_t = Tuple[torch.Tensor] @@ -328,18 +328,18 @@ def forward(self, x): @common.parametrize("test_module", test_modules) @pytest.mark.skip # Not implemented, skip until it is. -def test_convolution_3d_tosa_MI(test_module): +def test_convolution_tosa_MI_3d(test_module): pipeline = TosaPipelineMI[input_t]( - test_module, test_module.get_inputs(), aten_op, exir_op + test_module(), test_module().get_inputs(), aten_op, exir_op ) pipeline.run() @common.parametrize("test_module", test_modules) @pytest.mark.skip # Not implemented, skip until it is. -def test_convolution_3d_tosa_BI(test_module): +def test_convolution_tosa_BI_3d(test_module): pipeline = TosaPipelineBI[input_t]( - test_module, test_module.get_inputs(), aten_op, exir_op + test_module(), test_module().get_inputs(), aten_op, exir_op ) pipeline.change_args("run_method_and_compare_outputs", qtol=1) pipeline.run() @@ -347,24 +347,32 @@ def test_convolution_3d_tosa_BI(test_module): @common.parametrize("test_module", test_modules) @pytest.mark.skip # Not implemented, skip until it is. -def test_convolution_3d_u55_BI(test_module): +def test_convolution_u55_BI_3d(test_module): pipeline = EthosU55PipelineBI[input_t]( - test_module, test_module.get_inputs(), aten_op, exir_op, run_on_fvp=True + test_module(), + test_module().get_inputs(), + aten_op, + exir_op, + run_on_fvp=True, ) pipeline.run() @common.parametrize("test_module", test_modules) @pytest.mark.skip # Not implemented, skip until it is. -def test_convolution_3d_u85_BI(test_module): +def test_convolution_u85_BI_3d(test_module): pipeline = EthosU85PipelineBI[input_t]( - test_module, test_module.get_inputs(), aten_op, exir_op, run_on_fvp=True + test_module(), + test_module().get_inputs(), + aten_op, + exir_op, + run_on_fvp=True, ) pipeline.run() reject_suite = { - "large_stride": Conv3d( + "large_stride": lambda: Conv3d( in_channels=1, out_channels=1, kernel_size=(2, 2, 1), @@ -374,7 +382,7 @@ def test_convolution_3d_u85_BI(test_module): height=14, batches=1, ), - "large_kernel_z": Conv3d( + "large_kernel_z": lambda: Conv3d( in_channels=1, out_channels=1, kernel_size=(2, 2, 2), @@ -388,12 +396,11 @@ def test_convolution_3d_u85_BI(test_module): @common.parametrize("module", reject_suite) -def test_reject_convolution_3d_u55_BI( - module: Conv3d, -): +def test_convolution_u55_BI_not_delegated_3d(module: Conv3d): OpNotSupportedPipeline( - module, - module.get_inputs(), - "TOSA-0.80+BI+u55", + module(), + module().get_inputs(), {"executorch_exir_dialects_edge__ops_aten_convolution_default": 1}, + quantize=True, + u55_subset=True, ).run() diff --git a/backends/arm/test/ops/test_conv_combos.py b/backends/arm/test/ops/test_conv_combos.py index 0fb3c2675e9..7f54fa226aa 100644 --- a/backends/arm/test/ops/test_conv_combos.py +++ b/backends/arm/test/ops/test_conv_combos.py @@ -1,20 +1,24 @@ # Copyright 2024-2025 Arm Limited and/or its affiliates. -# All rights reserved. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. -import unittest - from typing import Tuple import pytest import torch -from executorch.backends.arm.test import common, conftest -from executorch.backends.arm.test.tester.arm_tester import ArmTester -from executorch.exir.backend.backend_details import CompileSpec -from parameterized import parameterized +from executorch.backends.arm.test import common + +from executorch.backends.arm.test.tester.test_pipeline import ( + EthosU55PipelineBI, + EthosU85PipelineBI, + TosaPipelineBI, + TosaPipelineMI, +) + +input_t1 = Tuple[torch.Tensor] + from torch.nn.parameter import Parameter @@ -138,13 +142,13 @@ class ComboConvRelu6(torch.nn.Module): "executorch_exir_dialects_edge__ops_aten_hardtanh_default", ] - test_data = [ - (2 * torch.randn(1, 3, 256, 256),), - (0.5 * torch.randn(1, 3, 256, 256),), - (torch.randn(1, 3, 256, 256),), - (-0.5 * torch.randn(1, 3, 256, 256),), - (-2 * torch.randn(1, 3, 256, 256),), - ] + test_data = { + "combo_conv_relu_2_x_4d": lambda: (2 * torch.randn(1, 3, 256, 256),), + "combo_conv_relu_0_5_x_4d": lambda: (0.5 * torch.randn(1, 3, 256, 256),), + "combo_conv_relu_4d": lambda: (torch.randn(1, 3, 256, 256),), + "combo_conv_relu_neg_0_5_x_4d": lambda: (-0.5 * torch.randn(1, 3, 256, 256),), + "combo_conv_relu_neg_2_x_4d": lambda: (-2 * torch.randn(1, 3, 256, 256),), + } def __init__(self): super().__init__() @@ -165,12 +169,12 @@ class ComboConvAvgPool2d(torch.nn.Module): "executorch_exir_dialects_edge__ops_aten_avg_pool2d_default", ] - test_data = [ - (20 * torch.randn(1, 3, 64, 32),), - (torch.randn(1, 3, 100, 200),), - (5 * torch.randn(1, 3, 256, 256),), - (torch.rand(1, 3, 512, 128),), - ] + test_data = { + "combo_conv_avgpool_20_x_4d": lambda: (20 * torch.randn(1, 3, 64, 32),), + "combo_conv_avgpool_4d": lambda: (torch.randn(1, 3, 100, 200),), + "combo_conv_avgpool_5_x_4d_randn": lambda: (5 * torch.randn(1, 3, 256, 256),), + "combo_conv_avgpool_2_x_4d": lambda: (torch.rand(1, 3, 512, 128),), + } def __init__(self): super().__init__() @@ -185,238 +189,291 @@ def forward(self, x): return x -class TestConvCombos(unittest.TestCase): - """Tests conv combined with other ops.""" - - def _test_conv_combo_tosa_MI_pipeline( - self, module: torch.nn.Module, test_data: Tuple[torch.Tensor] - ): - ( - ArmTester( - module, - example_inputs=test_data, - compile_spec=common.get_tosa_compile_spec( - "TOSA-0.80+MI", - ), - ) - .export() - .to_edge() - .partition() - .check_count({"torch.ops.higher_order.executorch_call_delegate": 1}) - .check_not(list(module.edge_op_list)) - .to_executorch() - .run_method_and_compare_outputs(inputs=test_data) - ) - - def _test_conv_combo_tosa_BI_pipeline( - self, - module: torch.nn.Module, - test_data: Tuple[torch.Tensor], - atol: float = 1e-3, - rtol: float = 1e-3, - ): - ( - ArmTester( - module, - example_inputs=test_data, - compile_spec=common.get_tosa_compile_spec( - "TOSA-0.80+BI", - ), - ) - .quantize() - .export() - .to_edge() - .partition() - .check_count({"torch.ops.higher_order.executorch_call_delegate": 1}) - .check_not(list(module.edge_op_list)) - .to_executorch() - .run_method_and_compare_outputs( - inputs=test_data, atol=atol, rtol=rtol, qtol=1 - ) - ) - - def _test_conv_combo_ethos_BI_pipeline( - self, - module: torch.nn.Module, - compile_spec: CompileSpec, - test_data: Tuple[torch.Tensor], - ): - tester = ( - ArmTester( - module, - example_inputs=test_data, - compile_spec=compile_spec, - ) - .quantize() - .export() - .to_edge() - .partition() - .check_count({"torch.ops.higher_order.executorch_call_delegate": 1}) - .check_not(list(module.edge_op_list)) - .to_executorch() - .serialize() - ) - if conftest.is_option_enabled("corstone_fvp"): - tester.run_method_and_compare_outputs(qtol=1, inputs=test_data) - - #################### - ## Conv + meandim ## - #################### - def test_conv_meandim_tosa_MI(self): - model = ComboConv2dMeandim() - self._test_conv_combo_tosa_MI_pipeline(model, model.get_inputs()) - - def test_conv_meandim_tosa_BI(self): - model = ComboConv2dMeandim() - self._test_conv_combo_tosa_BI_pipeline(model, model.get_inputs()) - - @pytest.mark.corstone_fvp - def test_conv_meandim_u55_BI(self): - model = ComboConv2dMeandim() - self._test_conv_combo_ethos_BI_pipeline( - model, - common.get_u55_compile_spec(), - model.get_inputs(), - ) - - @pytest.mark.corstone_fvp - def test_conv_meandim_u85_BI(self): - model = ComboConv2dMeandim() - self._test_conv_combo_ethos_BI_pipeline( - model, - common.get_u85_compile_spec(), - model.get_inputs(), - ) - - ############################## - ## Conv + batch norm + relu ## - ############################## - affine_params = [("affine", True), ("_no_affine", False)] - - @parameterized.expand(affine_params) - def test_conv_batchnorm_relu6_tosa_MI(self, test_suffix, affine): - model = ComboConvBatchnormRelu6(affine) - self._test_conv_combo_tosa_MI_pipeline(model, model.get_inputs()) - - @parameterized.expand(affine_params) - def test_conv_batchnorm_relu6_tosa_BI(self, test_suffix, affine): - model = ComboConvBatchnormRelu6(affine) - self._test_conv_combo_tosa_BI_pipeline(model, model.get_inputs()) - - @parameterized.expand(affine_params) - @pytest.mark.corstone_fvp - def test_conv_batchnorm_relu6_u55_BI(self, test_suffix, affine): - model = ComboConvBatchnormRelu6(affine) - self._test_conv_combo_ethos_BI_pipeline( - model, common.get_u55_compile_spec(), model.get_inputs() - ) - - @parameterized.expand(affine_params) - @pytest.mark.corstone_fvp - def test_conv_batchnorm_relu_u85_BI(self, test_suffix, affine): - model = ComboConvBatchnormRelu6(affine) - self._test_conv_combo_ethos_BI_pipeline( - model, - common.get_u85_compile_spec(), - model.get_inputs(), - ) - - ################## - ## Conv + ReLU6 ## - ################## - @parameterized.expand(ComboConvRelu6.test_data) - def test_conv_relu6_tosa_MI(self, test_data: torch.Tensor): - model = ComboConvRelu6() - test_data = (test_data,) - self._test_conv_combo_tosa_MI_pipeline(model, test_data) - - @parameterized.expand(ComboConvRelu6.test_data) - def test_conv_relu6_tosa_BI(self, test_data: torch.Tensor): - model = ComboConvRelu6() - test_data = (test_data,) - self._test_conv_combo_tosa_BI_pipeline(model, test_data) - - @parameterized.expand(ComboConvRelu6.test_data) - @pytest.mark.corstone_fvp - def test_conv_relu6_u55_BI(self, test_data: torch.Tensor): - model = ComboConvRelu6() - test_data = (test_data,) - self._test_conv_combo_ethos_BI_pipeline( - model, common.get_u55_compile_spec(), test_data - ) - - @parameterized.expand(ComboConvRelu6.test_data) - @pytest.mark.corstone_fvp - def test_conv_relu6_u85_BI(self, test_data: torch.Tensor): - model = ComboConvRelu6() - test_data = (test_data,) - self._test_conv_combo_ethos_BI_pipeline( - model, common.get_u85_compile_spec(), test_data - ) - - ############################### - ## Block bottleneck residual ## - ############################### - def test_block_bottleneck_residual_tosa_MI(self): - model = ComboBlockBottleneckResidual() - self._test_conv_combo_tosa_MI_pipeline(model, model.get_inputs()) - - @pytest.mark.flaky # TODO: Investigate flakyness (MLTORCH-307) - def test_block_bottleneck_residual_tosa_BI(self): - model = ComboBlockBottleneckResidual() - self._test_conv_combo_tosa_BI_pipeline(model, model.get_inputs()) - - @pytest.mark.corstone_fvp - def test_block_bottleneck_residual_u55_BI(self): - model = ComboBlockBottleneckResidual() - self._test_conv_combo_ethos_BI_pipeline( - model, - common.get_u55_compile_spec(), - model.get_inputs(), - ) - - @pytest.mark.corstone_fvp - def test_block_bottleneck_residual_u85_BI(self): - model = ComboBlockBottleneckResidual() - self._test_conv_combo_ethos_BI_pipeline( - model, - common.get_u85_compile_spec(), - model.get_inputs(), - ) - - ###################### - ## Conv + AvgPool2d ## - ###################### - @parameterized.expand(ComboConvAvgPool2d.test_data) - def test_conv_avgpool2d_tosa_MI(self, test_data: torch.Tensor): - model = ComboConvAvgPool2d() - test_data = (test_data,) - self._test_conv_combo_tosa_MI_pipeline(model, test_data) - - @parameterized.expand(ComboConvAvgPool2d.test_data) - def test_conv_avgpool2d_tosa_BI(self, test_data: torch.Tensor): - model = ComboConvAvgPool2d() - test_data = (test_data,) - self._test_conv_combo_tosa_BI_pipeline(model, test_data) - - @parameterized.expand(ComboConvAvgPool2d.test_data) - @pytest.mark.corstone_fvp - def test_conv_avgpool2d_u55_BI(self, test_data: torch.Tensor): - model = ComboConvAvgPool2d() - test_data = (test_data,) - self._test_conv_combo_ethos_BI_pipeline( - model, - common.get_u55_compile_spec(), - test_data, - ) - - @parameterized.expand(ComboConvAvgPool2d.test_data) - @pytest.mark.corstone_fvp - def test_conv_avgpool2d_u85_BI(self, test_data: torch.Tensor): - model = ComboConvAvgPool2d() - test_data = (test_data,) - self._test_conv_combo_ethos_BI_pipeline( - model, - common.get_u85_compile_spec(), - test_data, - ) +#################### +## Conv + meandim ## +#################### + + +def test_convolution_2d_tosa_MI_meandim(): + model = ComboConv2dMeandim() + + pipeline = TosaPipelineMI[input_t1]( + model, + model.get_inputs(), + aten_op=[], + exir_op=ComboConv2dMeandim.edge_op_list, + ) + pipeline.run() + + +def test_convolution_2d_tosa_BI_meandim(): + model = ComboConv2dMeandim() + pipeline = TosaPipelineBI[input_t1]( + model, + model.get_inputs(), + aten_op=[], + exir_op=ComboConv2dMeandim.edge_op_list, + ) + pipeline.run() + + +@common.XfailIfNoCorstone300 +def test_convolution_2d_u55_BI_meandim(): + model = ComboConv2dMeandim() + pipeline = EthosU55PipelineBI[input_t1]( + model, + model.get_inputs(), + aten_ops=[], + exir_ops=ComboConv2dMeandim.edge_op_list, + run_on_fvp=True, + ) + pipeline.run() + + +@common.XfailIfNoCorstone320 +def test_convolution_2d_u85_BI_meandim(): + model = ComboConv2dMeandim() + pipeline = EthosU85PipelineBI[input_t1]( + model, + model.get_inputs(), + aten_ops=[], + exir_ops=ComboConv2dMeandim.edge_op_list, + run_on_fvp=True, + ) + pipeline.run() + + +############################## +## Conv + batch norm + relu ## +############################## +affine_params = {"affine": True, "_no_affine": False} + + +@common.parametrize("affine", affine_params) +def test_convolution_2d_tosa_MI_batchnorm_relu6(affine): + model = ComboConvBatchnormRelu6(affine) + pipeline = TosaPipelineMI[input_t1]( + model, + model.get_inputs(), + aten_op=[], + exir_op=ComboConvBatchnormRelu6.edge_op_list, + ) + pipeline.run() + + +@pytest.mark.flaky(reruns=5) # TODO: Investigate flakyness (MLTORCH-307) +@common.parametrize("affine", affine_params) +def test_convolution_2d_tosa_BI_batchnorm_relu6(affine): + model = ComboConvBatchnormRelu6(affine) + pipeline = TosaPipelineBI[input_t1]( + model, + model.get_inputs(), + aten_op=[], + exir_op=ComboConvBatchnormRelu6.edge_op_list, + ) + pipeline.run() + + +@common.parametrize("affine", affine_params) +@common.XfailIfNoCorstone300 +def test_convolution_2d_u55_BI_batchnorm_relu6(affine): + model = ComboConvBatchnormRelu6(affine) + pipeline = EthosU55PipelineBI[input_t1]( + model, + model.get_inputs(), + aten_ops=[], + exir_ops=[], + run_on_fvp=True, + ) + pipeline.run() + + +@common.parametrize("affine", affine_params) +@common.XfailIfNoCorstone320 +def test_convolution_2d_u85_BI_batchnorm_relu6(affine): + model = ComboConvBatchnormRelu6(affine) + pipeline = EthosU85PipelineBI[input_t1]( + model, + model.get_inputs(), + aten_ops=[], + exir_ops=[], + run_on_fvp=True, + ) + pipeline.run() + + +################## +## Conv + ReLU6 ## +################## + + +@common.parametrize("test_data", ComboConvRelu6.test_data) +def test_convolution_2d_tosa_MI_relu6(test_data: torch.Tensor): + model = ComboConvRelu6() + pipeline = TosaPipelineMI[input_t1]( + model, + test_data(), + aten_op=[], + exir_op=ComboConvRelu6.edge_op_list, + ) + pipeline.run() + + +@pytest.mark.flaky(reruns=5) # TODO: Investigate flakyness (MLTORCH-307) +@common.parametrize("test_data", ComboConvRelu6.test_data) +def test_convolution_2d_tosa_BI_relu6(test_data: torch.Tensor): + model = ComboConvRelu6() + pipeline = TosaPipelineBI[input_t1]( + model, + test_data(), + aten_op=[], + exir_op=ComboConvRelu6.edge_op_list, + ) + pipeline.run() + + +@common.parametrize("test_data", ComboConvRelu6.test_data) +@common.XfailIfNoCorstone300 +def test_convolution_2d_u55_BI_relu6(test_data: torch.Tensor): + model = ComboConvRelu6() + pipeline = EthosU55PipelineBI[input_t1]( + model, + test_data(), + aten_ops=[], + exir_ops=ComboConvRelu6.edge_op_list, + run_on_fvp=True, + ) + pipeline.run() + + +@common.parametrize("test_data", ComboConvRelu6.test_data) +@common.XfailIfNoCorstone320 +def test_convolution_2d_u85_BI_relu6(test_data: torch.Tensor): + model = ComboConvRelu6() + pipeline = EthosU85PipelineBI[input_t1]( + model, + test_data(), + aten_ops=[], + exir_ops=ComboConvRelu6.edge_op_list, + run_on_fvp=True, + ) + pipeline.run() + + +############################### +## Block bottleneck residual ## +############################### +def test_convolution_2d_tosa_MI_block_bottleneck(): + model = ComboBlockBottleneckResidual() + pipeline = TosaPipelineMI[input_t1]( + model, + model.get_inputs(), + aten_op=[], + exir_op=ComboBlockBottleneckResidual.edge_op_list, + ) + pipeline.run() + + +@pytest.mark.flaky(reruns=5) # TODO: Investigate flakyness (MLTORCH-307) +def test_convolution_2d_tosa_BI_block_bottleneck(): + model = ComboBlockBottleneckResidual() + pipeline = TosaPipelineBI[input_t1]( + model, + model.get_inputs(), + aten_op=[], + exir_op=ComboBlockBottleneckResidual.edge_op_list, + ) + pipeline.change_args("run_method_and_compare_outputs", model.get_inputs(), qtol=1) + pipeline.run() + + +@common.XfailIfNoCorstone300 +def test_convolution_2d_u55_BI_block_bottleneck(): + model = ComboBlockBottleneckResidual() + pipeline = EthosU55PipelineBI[input_t1]( + model, + model.get_inputs(), + aten_ops=[], + exir_ops=[], + run_on_fvp=True, + ) + pipeline.run() + + +@common.XfailIfNoCorstone320 +def test_convolution_2d_u85_BI_block_bottleneck(): + model = ComboBlockBottleneckResidual() + pipeline = EthosU85PipelineBI[input_t1]( + model, + model.get_inputs(), + aten_ops=[], + exir_ops=[], + run_on_fvp=True, + ) + pipeline.run() + + +###################### +## Conv + AvgPool2d ## +###################### + + +@common.parametrize("test_data", ComboConvAvgPool2d.test_data) +def test_convolution_2d_tosa_MI_avgpool2d(test_data: torch.Tensor): + model = ComboConvAvgPool2d() + pipeline = TosaPipelineMI[input_t1]( + model, + test_data(), + aten_op=[], + exir_op=ComboConvAvgPool2d.edge_op_list, + ) + pipeline.run() + + +x_fails = { + "combo_conv_avgpool_20_x_4d": "AssertionError: Output 0 does not match reference output.", + "combo_conv_avgpool_4d": "AssertionError: Output 0 does not match reference output.", + "combo_conv_avgpool_5_x_4d_randn": "AssertionError: Output 0 does not match reference output.", + "combo_conv_avgpool_2_x_4d": "AssertionError: Output 0 does not match reference output.", +} + + +@pytest.mark.flaky(reruns=5) # TODO: Investigate flakyness (MLTORCH-307) +@common.parametrize("test_data", ComboConvAvgPool2d.test_data, x_fails) +def test_convolution_2d_tosa_BI_avgpool2d(test_data: torch.Tensor): + model = ComboConvAvgPool2d() + pipeline = TosaPipelineBI[input_t1]( + model, + test_data(), + aten_op=[], + exir_op=ComboConvAvgPool2d.edge_op_list, + ) + pipeline.run() + + +@common.parametrize("test_data", ComboConvAvgPool2d.test_data) +@common.XfailIfNoCorstone300 +def test_convolution_2d_u55_BI_avgpool2d(test_data: torch.Tensor): + model = ComboConvAvgPool2d() + pipeline = EthosU55PipelineBI[input_t1]( + model, + test_data(), + aten_ops=[], + exir_ops=[], + run_on_fvp=True, + ) + pipeline.run() + + +@common.parametrize("test_data", ComboConvAvgPool2d.test_data) +@common.XfailIfNoCorstone320 +def test_convolution_2d_u85_BI_avgpool2d(test_data: torch.Tensor): + model = ComboConvAvgPool2d() + pipeline = EthosU85PipelineBI[input_t1]( + model, + test_data(), + aten_ops=[], + exir_ops=[], + run_on_fvp=True, + ) + pipeline.run() diff --git a/backends/arm/test/ops/test_depthwise_conv.py b/backends/arm/test/ops/test_depthwise_conv.py index 59ce628693c..91b3dde1bb2 100644 --- a/backends/arm/test/ops/test_depthwise_conv.py +++ b/backends/arm/test/ops/test_depthwise_conv.py @@ -1,24 +1,29 @@ # Copyright 2024-2025 Arm Limited and/or its affiliates. -# All rights reserved. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. -import unittest - from typing import Tuple import pytest import torch -from executorch.backends.arm.test import common, conftest +from executorch.backends.arm.test import common + +from executorch.backends.arm.test.tester.test_pipeline import ( + EthosU55PipelineBI, + EthosU85PipelineBI, + TosaPipelineBI, + TosaPipelineMI, +) + +input_t = Tuple[torch.Tensor] # Input x + +exir_op = "executorch_exir_dialects_edge__ops_aten_convolution_default" + from executorch.backends.arm.test.ops.test_conv1d import Conv1d from executorch.backends.arm.test.ops.test_conv2d import Conv2d -from executorch.backends.arm.test.tester.arm_tester import ArmTester -from executorch.exir.backend.backend_details import CompileSpec -from parameterized import parameterized - """ The configuration when @@ -149,158 +154,93 @@ ) # Shenanigan to get a nicer output when test fails. -testsuite_conv2d = [ - ("2x2_1x6x4x4_gp6_st1", dw_conv2d_2x2_1x6x4x4_gp6_st1), - ("3x3_1x3x256x256_gp3_st1", dw_conv2d_3x3_1x3x256x256_gp3_st1), - ("3x3_1x4x256x256_gp4_nobias", dw_conv2d_3x3_1x4x256x256_gp4_nobias), - ("3x3_1x4x256x256_gp4_st1", dw_conv2d_3x3_1x4x256x256_gp4_st1), - ("3x3_2x8x198x198_gp8_st3", dw_conv2d_3x3_2x8x198x198_gp8_st3), - ("two_dw_conv2d", two_dw_conv2d), -] - -testsuite_conv2d_u85 = [ - ("2x2_1x6x4x4_gp6_st1", dw_conv2d_2x2_1x6x4x4_gp6_st1), - ("3x3_1x3x256x256_gp3_st1", dw_conv2d_3x3_1x3x256x256_gp3_st1), - ("3x3_1x4x256x256_gp4_st1", dw_conv2d_3x3_1x4x256x256_gp4_st1), - ("3x3_1x4x256x256_gp4_nobias", dw_conv2d_3x3_1x4x256x256_gp4_nobias), -] - -testsuite_conv2d_u85_xfails = [ - ("3x3_2x8x198x198_gp8_st3", dw_conv2d_3x3_2x8x198x198_gp8_st3), - ("two_dw_conv2d", two_dw_conv2d), -] - - -testsuite_conv1d = [ - ("2_1x6x4_gp6_st1", dw_conv1d_2_1x6x4_gp6_st1), - ("two_dw_conv1d", two_dw_conv1d), - ("3_1x3x256_gp3_st1", dw_conv1d_3_1x3x256_gp3_st1), - ("3_1x3x14_gp3_st1", dw_conv1d_3_1x3x14_gp3_st1), -] - - -class TestDepthwiseConv(unittest.TestCase): - """Tests Conv1D and Conv2D where groups == in_channels and out_channels = K * in_channels. This - is a special case enables depthwise convolution.""" - - def _test_dw_conv_tosa_MI_pipeline( - self, module: torch.nn.Module, test_data: Tuple[torch.Tensor] - ): - ( - ArmTester( - module, - example_inputs=test_data, - compile_spec=common.get_tosa_compile_spec( - "TOSA-0.80+MI", - ), - ) - .export() - .to_edge() - .partition() - .check_not(["executorch_exir_dialects_edge__ops_aten_convolution_default"]) - .check_count({"torch.ops.higher_order.executorch_call_delegate": 1}) - .to_executorch() - .run_method_and_compare_outputs(inputs=test_data) - ) - - def _test_dw_conv_tosa_BI_pipeline( - self, module: torch.nn.Module, test_data: Tuple[torch.Tensor] - ): - ( - ArmTester( - module, - example_inputs=test_data, - compile_spec=common.get_tosa_compile_spec( - "TOSA-0.80+BI", - ), - ) - .quantize() - .export() - .to_edge() - .partition() - .check_not(["executorch_exir_dialects_edge__ops_aten_convolution_default"]) - .check_count({"torch.ops.higher_order.executorch_call_delegate": 1}) - .to_executorch() - .run_method_and_compare_outputs(inputs=test_data, qtol=1) - ) - - def _test_dw_conv_ethos_BI_pipeline( - self, - module: torch.nn.Module, - compile_spec: CompileSpec, - test_data: Tuple[torch.Tensor], - ): - tester = ( - ArmTester( - module, - example_inputs=test_data, - compile_spec=compile_spec, - ) - .quantize() - .export() - .to_edge() - .partition() - .check_not(["executorch_exir_dialects_edge__ops_aten_convolution_default"]) - .check_count({"torch.ops.higher_order.executorch_call_delegate": 1}) - .to_executorch() - .serialize() - ) - if conftest.is_option_enabled("corstone_fvp"): - tester.run_method_and_compare_outputs(qtol=1, inputs=test_data) - - @parameterized.expand(testsuite_conv1d + testsuite_conv2d) - def test_dw_conv_tosa_MI(self, test_name: str, model: torch.nn.Module): - self._test_dw_conv_tosa_MI_pipeline(model, model.get_inputs()) - - @parameterized.expand(testsuite_conv1d + testsuite_conv2d) - @pytest.mark.flaky # TODO: Investigate flakyness (MLTORCH-307) - def test_dw_conv_tosa_BI(self, test_name: str, model: torch.nn.Module): - self._test_dw_conv_tosa_BI_pipeline(model, model.get_inputs()) - - @parameterized.expand(testsuite_conv2d[:4], skip_on_empty=True) - @pytest.mark.corstone_fvp - def test_dw_conv2d_u55_BI(self, test_name: str, model: torch.nn.Module): - self._test_dw_conv_ethos_BI_pipeline( - model, - common.get_u55_compile_spec(), - model.get_inputs(), - ) - - @parameterized.expand(testsuite_conv2d[4:], skip_on_empty=True) - @pytest.mark.corstone_fvp - @conftest.expectedFailureOnFVP # TODO: MLETORCH-516 - def test_dw_conv2d_u55_BI_xfails(self, test_name: str, model: torch.nn.Module): - self._test_dw_conv_ethos_BI_pipeline( - model, - common.get_u55_compile_spec(), - model.get_inputs(), - ) - - @parameterized.expand(testsuite_conv1d, skip_on_empty=True) - @pytest.mark.corstone_fvp - def test_dw_conv1d_u55_BI(self, test_name: str, model: torch.nn.Module): - self._test_dw_conv_ethos_BI_pipeline( - model, - common.get_u55_compile_spec(), - model.get_inputs(), - ) - - @parameterized.expand(testsuite_conv1d + testsuite_conv2d_u85) - @pytest.mark.corstone_fvp - def test_dw_conv_u85_BI(self, test_name: str, model: torch.nn.Module): - self._test_dw_conv_ethos_BI_pipeline( - model, - common.get_u85_compile_spec(), - model.get_inputs(), - ) - - # All test cases except 3x3_1x3x256x256_gp3_st1 have numerical issues on FVP. MLETORCH-520 - @parameterized.expand(testsuite_conv2d_u85_xfails) - @pytest.mark.corstone_fvp - @conftest.expectedFailureOnFVP - def test_dw_conv_u85_BI_xfails(self, test_name: str, model: torch.nn.Module): - self._test_dw_conv_ethos_BI_pipeline( - model, - common.get_u85_compile_spec(), - model.get_inputs(), - ) +testsuite_conv2d = { + "2x2_1x6x4x4_gp6_st1": lambda: dw_conv2d_2x2_1x6x4x4_gp6_st1, + "3x3_1x3x256x256_gp3_st1": lambda: dw_conv2d_3x3_1x3x256x256_gp3_st1, + "3x3_1x4x256x256_gp4_nobias": lambda: dw_conv2d_3x3_1x4x256x256_gp4_nobias, + "3x3_1x4x256x256_gp4_st1": lambda: dw_conv2d_3x3_1x4x256x256_gp4_st1, + "3x3_2x8x198x198_gp8_st3": lambda: dw_conv2d_3x3_2x8x198x198_gp8_st3, + "two_dw_conv2d": lambda: two_dw_conv2d, +} + +testsuite_conv2d_u85 = { + "2x2_1x6x4x4_gp6_st1": lambda: dw_conv2d_2x2_1x6x4x4_gp6_st1, + "3x3_1x3x256x256_gp3_st1": lambda: dw_conv2d_3x3_1x3x256x256_gp3_st1, + "3x3_1x4x256x256_gp4_st1": lambda: dw_conv2d_3x3_1x4x256x256_gp4_st1, + "3x3_1x4x256x256_gp4_nobias": lambda: dw_conv2d_3x3_1x4x256x256_gp4_nobias, +} + +testsuite_conv1d = { + "2_1x6x4_gp6_st1": lambda: dw_conv1d_2_1x6x4_gp6_st1, + "two_dw_conv1d": lambda: two_dw_conv1d, + "3_1x3x256_gp3_st1": lambda: dw_conv1d_3_1x3x256_gp3_st1, + "3_1x3x14_gp3_st1": lambda: dw_conv1d_3_1x3x14_gp3_st1, +} + + +@common.parametrize("test_module", testsuite_conv1d | testsuite_conv2d) +def test_convolution_2d_tosa_MI_depth_wise(test_module: torch.nn.Module): + pipeline = TosaPipelineMI[input_t]( + test_module(), + test_module().get_inputs(), + aten_op=[], + exir_op=exir_op, + ) + pipeline.run() + + +@pytest.mark.flaky(reruns=5) # TODO: Investigate flakyness (MLTORCH-307) +@common.parametrize("test_module", testsuite_conv1d | testsuite_conv2d) +def test_convolution_2d_tosa_BI_depth_wise(test_module: torch.nn.Module): + pipeline = TosaPipelineBI[input_t]( + test_module(), + test_module().get_inputs(), + aten_op=[], + exir_op=exir_op, + ) + pipeline.run() + + +x_fails = { + "3x3_2x8x198x198_gp8_st3": "MLETORCH-516: AssertionError: Output 0 does not match reference output.", + "two_dw_conv2d": "MLETORCH-516: AssertionError: Output 0 does not match reference output.", +} + + +@common.parametrize("test_module", testsuite_conv2d, x_fails) +@common.XfailIfNoCorstone300 # TODO: MLETORCH-516 +def test_convolution_2d_u55_BI_depth_wise(test_module: torch.nn.Module): + pipeline = EthosU55PipelineBI[input_t]( + test_module(), + test_module().get_inputs(), + aten_ops=[], + exir_ops=exir_op, + run_on_fvp=True, + ) + pipeline.run() + + +@common.XfailIfNoCorstone300 # TODO: MLETORCH-516 +@common.parametrize("test_module", testsuite_conv1d) +def test_convolution_1d_u55_BI_depth_wise(test_module: torch.nn.Module): + pipeline = EthosU55PipelineBI[input_t]( + test_module(), + test_module().get_inputs(), + aten_ops=[], + exir_ops=exir_op, + run_on_fvp=True, + ) + pipeline.run() + + +@common.parametrize("test_module", testsuite_conv1d | testsuite_conv2d, x_fails) +@common.XfailIfNoCorstone320 # TODO: MLETORCH-516 +def test_convolution_2d_u85_BI_depth_wise(test_module: torch.nn.Module): + pipeline = EthosU85PipelineBI[input_t]( + test_module(), + test_module().get_inputs(), + aten_ops=[], + exir_ops=exir_op, + run_on_fvp=True, + ) + pipeline.run() diff --git a/backends/arm/test/ops/test_div.py b/backends/arm/test/ops/test_div.py index d200a753ce5..087bdb84a63 100644 --- a/backends/arm/test/ops/test_div.py +++ b/backends/arm/test/ops/test_div.py @@ -1,243 +1,131 @@ # Copyright (c) Meta Platforms, Inc. and affiliates. -# Copyright 2024-2025 Arm Limited and/or its affiliates. # All rights reserved. +# Copyright 2024-2025 Arm Limited and/or its affiliates. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. -import unittest from typing import Optional, Tuple, Union -import pytest - import torch -from executorch.backends.arm.test import common, conftest -from executorch.backends.arm.test.tester.arm_tester import ArmTester -from parameterized import parameterized +from executorch.backends.arm.test import common + +from executorch.backends.arm.test.tester.test_pipeline import ( + EthosU55PipelineBI, + EthosU85PipelineBI, + TosaPipelineBI, + TosaPipelineMI, +) + +aten_op = "torch.ops.aten.div.Tensor" +exir_op = "executorch_exir_dialects_edge__ops_aten_div_Tensor" +input_t1 = Tuple[torch.Tensor] # Input x -test_data_suite = [ +test_data_suite = { # (test_name, input, other, rounding_mode) See torch.div() for info - ( - "op_div_rank1_ones", - torch.ones(5), - torch.ones(5), - None, - ), - ( - "op_div_rank1_negative_ones", + "op_div_rank1_ones": lambda: (torch.ones(5), torch.ones(5), None), + "op_div_rank1_negative_ones": lambda: ( torch.ones(5) * (-1), torch.ones(5) * (-1), None, ), - ( - "op_div_rank1_rand", + "op_div_rank1_rand": lambda: ( torch.rand(5) * 5, torch.rand(5) * 5, None, ), - ( - "op_div_rank4_ones", + "op_div_rank4_ones": lambda: ( torch.ones(5, 10, 25, 20), torch.ones(5, 10, 25, 20), None, ), - ( - "op_div_rank4_negative_ones", + "op_div_rank4_negative_ones": lambda: ( (-1) * torch.ones(5, 10, 25, 20), torch.ones(5, 10, 25, 20), None, ), - ( - "op_div_rank4_ones_div_negative", + "op_div_rank4_ones_div_negative": lambda: ( torch.ones(5, 10, 25, 20), (-1) * torch.ones(5, 10, 25, 20), None, ), - ( - "op_div_rank4_large_rand", + "op_div_rank4_large_rand": lambda: ( 200 * torch.rand(5, 10, 25, 20), torch.rand(5, 10, 25, 20), None, ), - ( - "op_div_rank4_negative_large_rand", + "op_div_rank4_negative_large_rand": lambda: ( (-200) * torch.rand(5, 10, 25, 20), torch.rand(5, 10, 25, 20), None, ), - ( - "op_div_rank4_large_randn", + "op_div_rank4_large_randn": lambda: ( 200 * torch.randn(5, 10, 25, 20) + 1, torch.rand(5, 10, 25, 20) + 1, None, ), -] - - -class TestDiv(unittest.TestCase): - """Tests division""" - - class Div(torch.nn.Module): - - def forward( - self, - input_: Union[torch.Tensor, torch.types.Number], - other_: Union[torch.Tensor, torch.types.Number], - rounding_mode: Optional[str] = None, - ): - if rounding_mode is None: - return torch.div(input=input_, other=other_) - else: - return torch.div( - input=input_, other=other_, rounding_mode=rounding_mode - ) - - def _test_div_tosa_MI_pipeline( - self, module: torch.nn.Module, test_data: Tuple[torch.Tensor] - ): - ( - ArmTester( - module, - example_inputs=test_data, - compile_spec=common.get_tosa_compile_spec("TOSA-0.80+MI"), - ) - .export() - .check_count({"torch.ops.aten.div.Tensor": 1}) - .check_not(["torch.ops.quantized_decomposed"]) - .to_edge() - .partition() - .check_count({"torch.ops.higher_order.executorch_call_delegate": 1}) - .to_executorch() - .run_method_and_compare_outputs(inputs=test_data) - ) - - def _test_div_tosa_BI_pipeline( - self, module: torch.nn.Module, test_data: Tuple[torch.Tensor] - ): - ( - ArmTester( - module, - example_inputs=test_data, - compile_spec=common.get_tosa_compile_spec("TOSA-0.80+BI"), - ) - .quantize() - .export() - .check_count( - {"torch.ops.aten.reciprocal.default": 1, "torch.ops.aten.mul.Tensor": 1} - ) - .check(["torch.ops.quantized_decomposed"]) - .to_edge() - .partition() - .check_count({"torch.ops.higher_order.executorch_call_delegate": 1}) - .to_executorch() - .run_method_and_compare_outputs(inputs=test_data, atol=1, rtol=0.1) - ) - - def _test_div_ethos_BI_pipeline( - self, module: torch.nn.Module, compile_spec, test_data: Tuple[torch.Tensor] - ): - tester = ( - ArmTester( - module, - example_inputs=test_data, - compile_spec=common.get_u55_compile_spec(), - ) - .quantize() - .export() - .check_count( - {"torch.ops.aten.reciprocal.default": 1, "torch.ops.aten.mul.Tensor": 1} - ) - .check(["torch.ops.quantized_decomposed"]) - .to_edge() - .partition() - .check_count({"torch.ops.higher_order.executorch_call_delegate": 1}) - .to_executorch() - .serialize() - ) - if conftest.is_option_enabled("corstone_fvp"): - tester.run_method_and_compare_outputs(qtol=1, inputs=test_data) - - @parameterized.expand(test_data_suite) - def test_div_tosa_MI( - self, - test_name: str, - input_: Union[torch.Tensor, torch.types.Number], - other_: Union[torch.Tensor, torch.types.Number], - rounding_mode: Optional[str] = None, - ): - test_data = (input_, other_) - self._test_div_tosa_MI_pipeline(self.Div(), test_data) +} - @parameterized.expand(test_data_suite) - def test_div_tosa_BI( - self, - test_name: str, - input_: Union[torch.Tensor, torch.types.Number], - other_: Union[torch.Tensor, torch.types.Number], - rounding_mode: Optional[str] = None, - ): - test_data = (input_, other_) - self._test_div_tosa_BI_pipeline(self.Div(), test_data) +class Div(torch.nn.Module): - @parameterized.expand(test_data_suite[:3]) - @pytest.mark.corstone_fvp - def test_div_u55_BI( - self, - test_name: str, - input_: Union[torch.Tensor, torch.types.Number], - other_: Union[torch.Tensor, torch.types.Number], - rounding_mode: Optional[str] = None, - ): - test_data = (input_, other_) - self._test_div_ethos_BI_pipeline( - self.Div(), common.get_u55_compile_spec(), test_data - ) - - # Numerical issues on FVP likely due to mul op, MLETORCH-521 - @parameterized.expand(test_data_suite[3:]) - @pytest.mark.corstone_fvp - @conftest.expectedFailureOnFVP - def test_div_u55_BI_xfails( - self, - test_name: str, - input_: Union[torch.Tensor, torch.types.Number], - other_: Union[torch.Tensor, torch.types.Number], - rounding_mode: Optional[str] = None, - ): - test_data = (input_, other_) - self._test_div_ethos_BI_pipeline( - self.Div(), common.get_u55_compile_spec(), test_data - ) - - @parameterized.expand(test_data_suite[:3]) - @pytest.mark.corstone_fvp - def test_div_u85_BI( - self, - test_name: str, - input_: Union[torch.Tensor, torch.types.Number], - other_: Union[torch.Tensor, torch.types.Number], - rounding_mode: Optional[str] = None, - ): - test_data = (input_, other_) - self._test_div_ethos_BI_pipeline( - self.Div(), common.get_u85_compile_spec(), test_data - ) - - # Numerical issues on FVP likely due to mul op, MLETORCH-521 - @parameterized.expand(test_data_suite[3:]) - @pytest.mark.corstone_fvp - @conftest.expectedFailureOnFVP - def test_div_u85_BI_xfails( + def forward( self, - test_name: str, input_: Union[torch.Tensor, torch.types.Number], other_: Union[torch.Tensor, torch.types.Number], rounding_mode: Optional[str] = None, ): - test_data = (input_, other_) - self._test_div_ethos_BI_pipeline( - self.Div(), common.get_u85_compile_spec(), test_data - ) + if rounding_mode is None: + return torch.div(input=input_, other=other_) + else: + return torch.div(input=input_, other=other_, rounding_mode=rounding_mode) + + +@common.parametrize("test_data", test_data_suite) +def test_div_tensor_tosa_MI(test_data: Tuple): + pipeline = TosaPipelineMI[input_t1](Div(), test_data(), aten_op, exir_op) + pipeline.run() + + +@common.parametrize("test_data", test_data_suite) +def test_div_tensor_tosa_BI(test_data: Tuple): + pipeline = TosaPipelineBI[input_t1](Div(), test_data(), aten_op=[], exir_op=[]) + pipeline.run() + + +x_fails = { + "op_div_rank4_ones": "MLETORCH-521: Numerical issues on FVP likely due to mul op", + "op_div_rank4_negative_ones": "MLETORCH-521: Numerical issues on FVP likely due to mul op", + "op_div_rank4_ones_div_negative": "MLETORCH-521: Numerical issues on FVP likely due to mul op", + "op_div_rank4_large_rand": "MLETORCH-521: Numerical issues on FVP likely due to mul op", + "op_div_rank4_negative_large_rand": "MLETORCH-521: Numerical issues on FVP likely due to mul op", + "op_div_rank4_large_randn": "MLETORCH-521: Numerical issues on FVP likely due to mul op", +} + + +@common.parametrize("test_data", test_data_suite, xfails=x_fails) +@common.XfailIfNoCorstone300 +def test_div_tensor_u55_BI(test_data: Tuple): + pipeline = EthosU55PipelineBI[input_t1]( + Div(), + test_data(), + aten_ops=[], + exir_ops=[], + run_on_fvp=True, + ) + pipeline.run() + + +@common.parametrize("test_data", test_data_suite, xfails=x_fails) +@common.XfailIfNoCorstone320 +def test_div_tensor_u85_BI(test_data: Tuple): + pipeline = EthosU85PipelineBI[input_t1]( + Div(), + test_data(), + aten_ops=[], + exir_ops=[], + run_on_fvp=True, + ) + pipeline.run() diff --git a/backends/arm/test/ops/test_eq.py b/backends/arm/test/ops/test_eq.py index e3bcf877ffe..bd6cace00a5 100644 --- a/backends/arm/test/ops/test_eq.py +++ b/backends/arm/test/ops/test_eq.py @@ -15,7 +15,6 @@ TosaPipelineMI, ) - input_t = Tuple[torch.Tensor] @@ -63,24 +62,27 @@ def get_inputs(self): op_eq_scalar_rank4_randn = Equal(torch.randn(3, 2, 2, 2), 0.3) test_data_tensor = { - "eq_tensor_rank1_ones": op_eq_tensor_rank1_ones, - "eq_tensor_rank2_rand": op_eq_tensor_rank2_rand, - "eq_tensor_rank3_randn": op_eq_tensor_rank3_randn, - "eq_tensor_rank4_randn": op_eq_tensor_rank4_randn, + "eq_tensor_rank1_ones": lambda: op_eq_tensor_rank1_ones, + "eq_tensor_rank2_rand": lambda: op_eq_tensor_rank2_rand, + "eq_tensor_rank3_randn": lambda: op_eq_tensor_rank3_randn, + "eq_tensor_rank4_randn": lambda: op_eq_tensor_rank4_randn, } test_data_scalar = { - "eq_scalar_rank1_ones": op_eq_scalar_rank1_ones, - "eq_scalar_rank2_rand": op_eq_scalar_rank2_rand, - "eq_scalar_rank3_randn": op_eq_scalar_rank3_randn, - "eq_scalar_rank4_randn": op_eq_scalar_rank4_randn, + "eq_scalar_rank1_ones": lambda: op_eq_scalar_rank1_ones, + "eq_scalar_rank2_rand": lambda: op_eq_scalar_rank2_rand, + "eq_scalar_rank3_randn": lambda: op_eq_scalar_rank3_randn, + "eq_scalar_rank4_randn": lambda: op_eq_scalar_rank4_randn, } @common.parametrize("test_module", test_data_tensor) -def test_eq_tensor_tosa_MI(test_module): +def test_eq_scalar_tosa_MI_tensor(test_module): pipeline = TosaPipelineMI[input_t]( - test_module, test_module.get_inputs(), Equal.aten_op_Tensor, Equal.exir_op + test_module(), + test_module().get_inputs(), + Equal.aten_op_Tensor, + Equal.exir_op, ) pipeline.run() @@ -88,8 +90,8 @@ def test_eq_tensor_tosa_MI(test_module): @common.parametrize("test_module", test_data_scalar) def test_eq_scalar_tosa_MI(test_module): pipeline = TosaPipelineMI[input_t]( - test_module, - test_module.get_inputs(), + test_module(), + test_module().get_inputs(), Equal.aten_op_Scalar, Equal.exir_op, ) @@ -97,9 +99,12 @@ def test_eq_scalar_tosa_MI(test_module): @common.parametrize("test_module", test_data_tensor) -def test_eq_tensor_tosa_BI(test_module): +def test_eq_scalar_tosa_BI_tensor(test_module): pipeline = TosaPipelineBI[input_t]( - test_module, test_module.get_inputs(), Equal.aten_op_Tensor, Equal.exir_op + test_module(), + test_module().get_inputs(), + Equal.aten_op_Tensor, + Equal.exir_op, ) pipeline.run() @@ -107,20 +112,24 @@ def test_eq_tensor_tosa_BI(test_module): @common.parametrize("test_module", test_data_scalar) def test_eq_scalar_tosa_BI(test_module): pipeline = TosaPipelineBI[input_t]( - test_module, test_module.get_inputs(), Equal.aten_op_Tensor, Equal.exir_op + test_module(), + test_module().get_inputs(), + Equal.aten_op_Tensor, + Equal.exir_op, ) pipeline.run() @common.parametrize("test_module", test_data_tensor) @common.XfailIfNoCorstone300 -def test_eq_tensor_u55_BI(test_module): +def test_eq_scalar_u55_BI_tensor(test_module): # EQUAL is not supported on U55. pipeline = OpNotSupportedPipeline[input_t]( - test_module, - test_module.get_inputs(), - "TOSA-0.80+BI+u55", + test_module(), + test_module().get_inputs(), {Equal.exir_op: 1}, + quantize=True, + u55_subset=True, ) pipeline.run() @@ -130,11 +139,12 @@ def test_eq_tensor_u55_BI(test_module): def test_eq_scalar_u55_BI(test_module): # EQUAL is not supported on U55. pipeline = OpNotSupportedPipeline[input_t]( - test_module, - test_module.get_inputs(), - "TOSA-0.80+BI+u55", + test_module(), + test_module().get_inputs(), {Equal.exir_op: 1}, n_expected_delegates=1, + quantize=True, + u55_subset=True, ) pipeline.run() @@ -148,10 +158,10 @@ def test_eq_scalar_u55_BI(test_module): strict=False, ) @common.XfailIfNoCorstone320 -def test_eq_tensor_u85_BI(test_module): +def test_eq_scalar_u85_BI_tensor(test_module): pipeline = EthosU85PipelineBI[input_t]( - test_module, - test_module.get_inputs(), + test_module(), + test_module().get_inputs(), Equal.aten_op_Tensor, Equal.exir_op, run_on_fvp=True, @@ -170,8 +180,8 @@ def test_eq_tensor_u85_BI(test_module): @common.XfailIfNoCorstone320 def test_eq_scalar_u85_BI(test_module): pipeline = EthosU85PipelineBI[input_t]( - test_module, - test_module.get_inputs(), + test_module(), + test_module().get_inputs(), Equal.aten_op_Tensor, Equal.exir_op, run_on_fvp=True, diff --git a/backends/arm/test/ops/test_erf.py b/backends/arm/test/ops/test_erf.py index d452be7cae1..e7136036c65 100644 --- a/backends/arm/test/ops/test_erf.py +++ b/backends/arm/test/ops/test_erf.py @@ -24,24 +24,24 @@ def forward(self, x: torch.Tensor): return torch.erf(x) test_data: dict[str, input_t1] = { - "zeros": (torch.zeros(1, 10, 10, 10),), - "ones": (torch.ones(10, 10, 10),), - "rand": ((torch.rand(10, 10) - 0.5),), - "randn_pos": ((torch.randn(1, 4, 4, 4) + 10),), - "randn_neg": ((torch.randn(1, 4, 4, 4) - 10),), - "ramp": (torch.arange(-16, 16, 0.2),), + "zeros": lambda: (torch.zeros(1, 10, 10, 10),), + "ones": lambda: (torch.ones(10, 10, 10),), + "rand": lambda: ((torch.rand(10, 10) - 0.5),), + "randn_pos": lambda: ((torch.randn(1, 4, 4, 4) + 10),), + "randn_neg": lambda: ((torch.randn(1, 4, 4, 4) - 10),), + "ramp": lambda: (torch.arange(-16, 16, 0.2),), } @common.parametrize("test_data", Erf.test_data) def test_erf_tosa_MI(test_data: input_t1): - pipeline = TosaPipelineMI[input_t1](Erf(), test_data, aten_op, exir_op) + pipeline = TosaPipelineMI[input_t1](Erf(), test_data(), aten_op, exir_op) pipeline.run() @common.parametrize("test_data", Erf.test_data) def test_erf_tosa_BI(test_data: input_t1): - pipeline = TosaPipelineBI[input_t1](Erf(), test_data, aten_op, exir_op) + pipeline = TosaPipelineBI[input_t1](Erf(), test_data(), aten_op, exir_op) pipeline.run() @@ -49,7 +49,7 @@ def test_erf_tosa_BI(test_data: input_t1): @common.XfailIfNoCorstone300 def test_erf_u55_BI(test_data: input_t1): pipeline = EthosU55PipelineBI[input_t1]( - Erf(), test_data, aten_op, exir_op, run_on_fvp=True + Erf(), test_data(), aten_op, exir_op, run_on_fvp=True ) pipeline.run() @@ -58,6 +58,6 @@ def test_erf_u55_BI(test_data: input_t1): @common.XfailIfNoCorstone320 def test_erf_u85_BI(test_data: input_t1): pipeline = EthosU85PipelineBI[input_t1]( - Erf(), test_data, aten_op, exir_op, run_on_fvp=True + Erf(), test_data(), aten_op, exir_op, run_on_fvp=True ) pipeline.run() diff --git a/backends/arm/test/ops/test_exp.py b/backends/arm/test/ops/test_exp.py index 3fa9f8c99fa..9218455916a 100644 --- a/backends/arm/test/ops/test_exp.py +++ b/backends/arm/test/ops/test_exp.py @@ -1,127 +1,85 @@ # Copyright (c) Meta Platforms, Inc. and affiliates. -# Copyright 2024 Arm Limited and/or its affiliates. # All rights reserved. +# Copyright 2024-2025 Arm Limited and/or its affiliates. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. -import unittest from typing import Tuple -import pytest - import torch -from executorch.backends.arm.test import common, conftest -from executorch.backends.arm.test.tester.arm_tester import ArmTester -from executorch.exir.backend.backend_details import CompileSpec -from parameterized import parameterized -test_data_suite = [ +from executorch.backends.arm.test import common +from executorch.backends.arm.test.tester.test_pipeline import ( + EthosU55PipelineBI, + EthosU85PipelineBI, + TosaPipelineBI, + TosaPipelineMI, +) + +test_data_suite = { # (test_name, test_data) - ("zeros", torch.zeros(1, 10, 10, 10)), - ("ones", torch.ones(10, 10, 10)), - ("rand", torch.rand(10, 10) - 0.5), - ("randn_pos", torch.randn(1, 4, 4, 4) + 10), - ("randn_neg", torch.randn(10) - 10), - ("ramp", torch.arange(-16, 16, 0.2)), -] - - -class TestExp(unittest.TestCase): - """Tests lowering of aten.exp""" - - class Exp(torch.nn.Module): - def forward(self, x: torch.Tensor) -> torch.Tensor: - return torch.exp(x) - - def _test_exp_tosa_MI_pipeline( - self, module: torch.nn.Module, test_data: Tuple[torch.tensor] - ): - ( - ArmTester( - module, - example_inputs=test_data, - compile_spec=common.get_tosa_compile_spec("TOSA-0.80+MI"), - ) - .export() - .check(["torch.ops.aten.exp.default"]) - .check_not(["torch.ops.quantized_decomposed"]) - .to_edge() - .partition() - .check_not(["executorch_exir_dialects_edge__ops_aten_exp_default"]) - .check_count({"torch.ops.higher_order.executorch_call_delegate": 1}) - .to_executorch() - .run_method_and_compare_outputs(inputs=test_data) - ) - - def _test_exp_tosa_BI_pipeline(self, module: torch.nn.Module, test_data: Tuple): - ( - ArmTester( - module, - example_inputs=test_data, - compile_spec=common.get_tosa_compile_spec("TOSA-0.80+BI"), - ) - .quantize() - .export() - .check(["torch.ops.aten.exp.default"]) - .check(["torch.ops.quantized_decomposed"]) - .to_edge() - .partition() - .check_not(["executorch_exir_dialects_edge__ops_aten_exp_default"]) - .check_count({"torch.ops.higher_order.executorch_call_delegate": 1}) - .to_executorch() - .run_method_and_compare_outputs(inputs=test_data) - ) - - def _test_exp_ethosu_BI_pipeline( - self, - compile_spec: CompileSpec, - module: torch.nn.Module, - test_data: Tuple[torch.tensor], - ): - tester = ( - ArmTester( - module, - example_inputs=test_data, - compile_spec=common.get_u55_compile_spec(), - ) - .quantize() - .export() - .check_count({"torch.ops.aten.exp.default": 1}) - .check(["torch.ops.quantized_decomposed"]) - .to_edge() - .partition() - .check_not(["executorch_exir_dialects_edge__ops_aten_exp_default"]) - .check_count({"torch.ops.higher_order.executorch_call_delegate": 1}) - .to_executorch() - .serialize() - ) - if conftest.is_option_enabled("corstone_fvp"): - tester.run_method_and_compare_outputs(qtol=1, inputs=test_data) - - @parameterized.expand(test_data_suite) - def test_exp_tosa_MI( - self, - test_name: str, - test_data: torch.Tensor, - ): - self._test_exp_tosa_MI_pipeline(self.Exp(), (test_data,)) - - @parameterized.expand(test_data_suite) - def test_exp_tosa_BI(self, test_name: str, test_data: torch.Tensor): - self._test_exp_tosa_BI_pipeline(self.Exp(), (test_data,)) - - @parameterized.expand(test_data_suite) - @pytest.mark.corstone_fvp - def test_exp_tosa_u55_BI(self, test_name: str, test_data: torch.Tensor): - self._test_exp_ethosu_BI_pipeline( - common.get_u55_compile_spec(), self.Exp(), (test_data,) - ) - - @parameterized.expand(test_data_suite) - @pytest.mark.corstone_fvp - def test_exp_tosa_u85_BI(self, test_name: str, test_data: torch.Tensor): - self._test_exp_ethosu_BI_pipeline( - common.get_u85_compile_spec(), self.Exp(), (test_data,) - ) + "zeros": lambda: torch.zeros(1, 10, 10, 10), + "ones": lambda: torch.ones(10, 10, 10), + "rand": lambda: torch.rand(10, 10) - 0.5, + "randn_pos": lambda: torch.randn(1, 4, 4, 4) + 10, + "randn_neg": lambda: torch.randn(10) - 10, + "ramp": lambda: torch.arange(-16, 16, 0.2), +} + +aten_op = "torch.ops.aten.exp.default" +input_t1 = Tuple[torch.Tensor] # Input x + + +class Exp(torch.nn.Module): + def forward(self, x: torch.Tensor) -> torch.Tensor: + return torch.exp(x) + + +@common.parametrize("test_data", test_data_suite) +def test_exp_tosa_MI(test_data: Tuple): + pipeline = TosaPipelineMI[input_t1]( + Exp(), + (test_data(),), + aten_op, + exir_op=[], + ) + pipeline.run() + + +@common.parametrize("test_data", test_data_suite) +def test_exp_tosa_BI(test_data: Tuple): + pipeline = TosaPipelineBI[input_t1]( + Exp(), + (test_data(),), + aten_op, + exir_op=[], + ) + pipeline.run() + + +@common.parametrize("test_data", test_data_suite) +@common.XfailIfNoCorstone300 +def test_exp_u55_BI(test_data: Tuple): + pipeline = EthosU55PipelineBI[input_t1]( + Exp(), + (test_data(),), + aten_op, + exir_ops=[], + run_on_fvp=True, + ) + pipeline.run() + + +@common.parametrize("test_data", test_data_suite) +@common.XfailIfNoCorstone320 +def test_exp_u85_BI(test_data: Tuple): + pipeline = EthosU85PipelineBI[input_t1]( + Exp(), + (test_data(),), + aten_op, + exir_ops=[], + run_on_fvp=True, + ) + pipeline.run() diff --git a/backends/arm/test/ops/test_expand.py b/backends/arm/test/ops/test_expand.py index cd073bddcc8..8f84c39dd27 100644 --- a/backends/arm/test/ops/test_expand.py +++ b/backends/arm/test/ops/test_expand.py @@ -7,7 +7,6 @@ # Tests the expand op which copies the data of the input tensor (possibly with new data format) # -import unittest from typing import Sequence, Tuple @@ -15,153 +14,121 @@ import torch -from executorch.backends.arm.quantizer import ( - EthosUQuantizer, - get_symmetric_quantization_config, - TOSAQuantizer, +from executorch.backends.arm.test import common +from executorch.backends.arm.test.tester.test_pipeline import ( + EthosU55PipelineBI, + EthosU85PipelineBI, + TosaPipelineBI, + TosaPipelineMI, ) -from executorch.backends.arm.test import common, conftest -from executorch.backends.arm.test.tester.arm_tester import ArmTester -from executorch.backends.arm.tosa_specification import TosaSpecification - -from executorch.backends.xnnpack.test.tester.tester import Quantize -from executorch.exir.backend.backend_details import CompileSpec -from parameterized import parameterized - - -class TestSimpleExpand(unittest.TestCase): - """Tests the Tensor.expand which should be converted to a repeat op by a pass.""" - - class Expand(torch.nn.Module): - # (input tensor, multiples) - test_parameters = [ - (torch.rand(1), (2,)), - (torch.randn(1), (2, 2, 4)), - (torch.randn(1, 1, 1, 5), (1, 4, -1, -1)), - (torch.randn(1, 1), (1, 2, 2, 4)), - (torch.randn(1, 1), (2, 2, 2, 4)), - (torch.randn(10, 1, 1, 97), (-1, 4, -1, -1)), - (torch.rand(1, 1, 2, 2), (4, 3, -1, 2)), - (torch.randn(1, 4), (1, -1)), - (torch.randn(1, 1, 192), (1, -1, -1)), - ] - - def forward(self, x: torch.Tensor, m: Sequence): - return x.expand(m) - - def _test_expand_tosa_MI_pipeline(self, module: torch.nn.Module, test_data: Tuple): - ( - ArmTester( - module, - example_inputs=test_data, - compile_spec=common.get_tosa_compile_spec("TOSA-0.80+MI"), - ) - .export() - .check_count({"torch.ops.aten.expand.default": 1}) - .to_edge() - .partition() - .check_not(["torch.ops.aten.expand.default"]) - .check_count({"torch.ops.higher_order.executorch_call_delegate": 1}) - .to_executorch() - .run_method_and_compare_outputs(inputs=test_data) - ) - - def _test_expand_tosa_BI_pipeline(self, module: torch.nn.Module, test_data: Tuple): - tosa_spec = TosaSpecification.create_from_string("TOSA-0.80+BI") - compile_spec = common.get_tosa_compile_spec(tosa_spec) - quantizer = TOSAQuantizer(tosa_spec).set_io(get_symmetric_quantization_config()) - ( - ArmTester(module, example_inputs=test_data, compile_spec=compile_spec) - .quantize(Quantize(quantizer, get_symmetric_quantization_config())) - .export() - .check_count({"torch.ops.aten.expand.default": 1}) - .to_edge() - .partition() - .check_not(["torch.ops.aten.expand.default"]) - .check_count({"torch.ops.higher_order.executorch_call_delegate": 1}) - .to_executorch() - .run_method_and_compare_outputs(inputs=test_data, qtol=1) - ) - - def _test_expand_ethosu_BI_pipeline( - self, compile_spec: CompileSpec, module: torch.nn.Module, test_data: Tuple - ): - quantizer = EthosUQuantizer(compile_spec).set_io( - get_symmetric_quantization_config() - ) - tester = ( - ArmTester( - module, - example_inputs=test_data, - compile_spec=compile_spec, - ) - .quantize(Quantize(quantizer, get_symmetric_quantization_config())) - .export() - .check_count({"torch.ops.aten.expand.default": 1}) - .to_edge() - .partition() - .check_not(["torch.ops.aten.expand.default"]) - .check_count({"torch.ops.higher_order.executorch_call_delegate": 1}) - .to_executorch() - .serialize() - ) - if conftest.is_option_enabled("corstone_fvp"): - tester.run_method_and_compare_outputs(qtol=1, inputs=test_data) - - @parameterized.expand(Expand.test_parameters) - def test_expand_tosa_MI(self, test_input, multiples): - self._test_expand_tosa_MI_pipeline(self.Expand(), (test_input, multiples)) - - @parameterized.expand(Expand.test_parameters) - def test_expand_tosa_BI(self, test_input, multiples): - self._test_expand_tosa_BI_pipeline(self.Expand(), (test_input, multiples)) - - @parameterized.expand(Expand.test_parameters[:-5]) - @pytest.mark.corstone_fvp - def test_expand_u55_BI(self, test_input, multiples): - self._test_expand_ethosu_BI_pipeline( - common.get_u55_compile_spec(), self.Expand(), (test_input, multiples) - ) - - # MLETORCH-629: Expand does not work on FVP with batch>1 - @parameterized.expand(Expand.test_parameters[-5:-2]) - @pytest.mark.corstone_fvp - @conftest.expectedFailureOnFVP - def test_expand_u55_BI_xfails_on_fvp(self, test_input, multiples): - self._test_expand_ethosu_BI_pipeline( - common.get_u55_compile_spec(), self.Expand(), (test_input, multiples) - ) - - @parameterized.expand(Expand.test_parameters[-2:]) - @pytest.mark.xfail( - reason="MLETORCH-716: Node will be optimized away and Vela can't handle empty graphs" + +aten_op = "torch.ops.aten.expand.default" +input_t1 = Tuple[torch.Tensor, torch.Tensor] # Input x, Input y + + +class Expand(torch.nn.Module): + # (input tensor, multiples) + test_parameters = { + "rand_1d_both": lambda: (torch.rand(1), (2,)), + "rand_1d": lambda: (torch.randn(1), (2, 2, 4)), + "rand_4d": lambda: (torch.randn(1, 1, 1, 5), (1, 4, -1, -1)), + "rand_batch_1": lambda: (torch.randn(1, 1), (1, 2, 2, 4)), + "rand_batch_2": lambda: (torch.randn(1, 1), (2, 2, 2, 4)), + "rand_mix_neg": lambda: (torch.randn(10, 1, 1, 97), (-1, 4, -1, -1)), + "rand_small_neg": lambda: (torch.rand(1, 1, 2, 2), (4, 3, -1, 2)), + } + + test_reject_set = { + "rand_2d": lambda: (torch.randn(1, 4), (1, -1)), + "rand_neg_mul": lambda: (torch.randn(1, 1, 192), (1, -1, -1)), + } + + def forward(self, x: torch.Tensor, m: Sequence): + return x.expand(m) + + +@common.parametrize("test_data", Expand.test_parameters | Expand.test_reject_set) +def test_expand_tosa_MI(test_data: Tuple): + pipeline = TosaPipelineMI[input_t1]( + Expand(), + test_data(), + aten_op, + exir_op=[], + ) + pipeline.run() + + +@common.parametrize("test_data", Expand.test_parameters | Expand.test_reject_set) +def test_expand_tosa_BI(test_data: Tuple): + pipeline = TosaPipelineBI[input_t1]( + Expand(), + test_data(), + aten_op, + exir_op=[], + ) + pipeline.run() + + +x_fails = { + "rand_batch_2": "AssertionError: Output 0 does not match reference output.", + "rand_mix_neg": "AssertionError: Output 0 does not match reference output.", + "rand_small_neg": "AssertionError: Output 0 does not match reference output.", +} + + +@common.parametrize("test_data", Expand.test_parameters, x_fails) +@common.XfailIfNoCorstone300 +def test_expand_u55_BI(test_data: Tuple): + pipeline = EthosU55PipelineBI[input_t1]( + Expand(), + test_data(), + aten_op, + exir_ops=[], + run_on_fvp=True, ) - def test_expand_u55_BI_xfails(self, test_input, multiples): - self._test_expand_ethosu_BI_pipeline( - common.get_u55_compile_spec(), self.Expand(), (test_input, multiples) - ) - - @parameterized.expand(Expand.test_parameters[:-5]) - @pytest.mark.corstone_fvp - def test_expand_u85_BI(self, test_input, multiples): - self._test_expand_ethosu_BI_pipeline( - common.get_u85_compile_spec(), self.Expand(), (test_input, multiples) - ) - - # MLETORCH-629: Expand does not work on FVP with batch>1 - @parameterized.expand(Expand.test_parameters[-5:-2]) - @pytest.mark.corstone_fvp - @conftest.expectedFailureOnFVP - def test_expand_u85_BI_xfails_on_fvp(self, test_input, multiples): - self._test_expand_ethosu_BI_pipeline( - common.get_u85_compile_spec(), self.Expand(), (test_input, multiples) - ) - - @parameterized.expand(Expand.test_parameters[-2:]) - @pytest.mark.xfail( - reason="MLETORCH-716: Node will be optimized away and Vela can't handle empty graphs" + pipeline.run() + + +@common.parametrize("test_data", Expand.test_parameters, x_fails) +@common.XfailIfNoCorstone320 +def test_expand_u85_BI(test_data: Tuple): + pipeline = EthosU85PipelineBI[input_t1]( + Expand(), + test_data(), + aten_op, + exir_ops=[], + run_on_fvp=True, + ) + pipeline.run() + + +@common.parametrize("test_data", Expand.test_reject_set) +@common.XfailIfNoCorstone300 +@pytest.mark.xfail( + reason="MLETORCH-716: Node will be optimized away and Vela can't handle empty graphs" +) +def test_expand_u55_BI_failure_set(test_data: Tuple): + pipeline = EthosU55PipelineBI[input_t1]( + Expand(), + test_data(), + aten_op, + exir_ops=[], + run_on_fvp=True, + ) + pipeline.run() + + +@common.parametrize("test_data", Expand.test_reject_set) +@common.XfailIfNoCorstone320 +@pytest.mark.xfail( + reason="MLETORCH-716: Node will be optimized away and Vela can't handle empty graphs" +) +def test_expand_u85_BI_failure_set(test_data: Tuple): + pipeline = EthosU85PipelineBI[input_t1]( + Expand(), + test_data(), + aten_op, + exir_ops=[], + run_on_fvp=True, ) - def test_expand_u85_xfails(self, test_input, multiples): - self._test_expand_ethosu_BI_pipeline( - common.get_u85_compile_spec(), self.Expand(), (test_input, multiples) - ) + pipeline.run() diff --git a/backends/arm/test/ops/test_full.py b/backends/arm/test/ops/test_full.py index 193ed632ed0..13a3146f2fe 100644 --- a/backends/arm/test/ops/test_full.py +++ b/backends/arm/test/ops/test_full.py @@ -8,186 +8,199 @@ # The shape and value are set at compile time, i.e. can't be set by a tensor input. # -import unittest - from typing import Tuple import pytest import torch -from executorch.backends.arm.test import common, conftest -from executorch.backends.arm.test.tester.arm_tester import ArmTester -from executorch.exir.backend.compile_spec_schema import CompileSpec -from parameterized import parameterized - - -class TestFull(unittest.TestCase): - """Tests the full op which creates a tensor of a given shape filled with a given value.""" - - class Full(torch.nn.Module): - # A single full op - def forward(self): - return torch.full((3, 3), 4.5) - - class AddConstFull(torch.nn.Module): - # Input + a full with constant value. - def forward(self, x: torch.Tensor): - return torch.full((2, 2, 3, 3), 4.5, dtype=torch.float32) + x - - class AddVariableFull(torch.nn.Module): - sizes: list[tuple[int, ...]] = [ - (5,), - (5, 5), - (5, 5, 5), - (1, 5, 5, 5), - ] - test_parameters = [((torch.randn(n) * 10 - 5, 3.2),) for n in sizes] - - def forward(self, x: torch.Tensor, y): - # Input + a full with the shape from the input and a given value 'y'. - return x + torch.full(x.shape, y) - - class FullLike(torch.nn.Module): - """Since full_like is replaced with full, we only need to test on reference model, not FVP.""" - - test_parameters = [ - ((torch.randn(2, 2, 2, 2) * 50, 3.2),), - ((torch.randn(2, 2, 2, 2) * 50, 3),), - (((torch.randn(2, 2, 2, 2) * 50).to(torch.int32), 3.2),), - (((torch.randn(2, 2, 2, 2) * 50).to(torch.int32), 3),), - ] - - def forward(self, input_tensor: torch.Tensor, value): - # Our backend can't handle tensors without users, which input_tensor doesn't have - # when the full_like is converted to a full. Therefore involve it in the output. - return input_tensor + torch.full_like(input_tensor, value) - - def _test_full_tosa_MI_pipeline( - self, - module: torch.nn.Module, - example_data: Tuple, - test_data: Tuple | None = None, - ): - if test_data is None: - test_data = example_data - ( - ArmTester( - module, - example_inputs=example_data, - compile_spec=common.get_tosa_compile_spec("TOSA-0.80+MI"), - ) - .export() - .to_edge_transform_and_lower() - .check_not(["executorch_exir_dialects_edge__ops_aten_full_default"]) - .check_count({"torch.ops.higher_order.executorch_call_delegate": 1}) - .to_executorch() - .run_method_and_compare_outputs(inputs=test_data) - ) - - def _test_full_tosa_BI_pipeline( - self, - module: torch.nn.Module, - test_data: Tuple, - ): - ( - ArmTester( - module, - example_inputs=test_data, - compile_spec=common.get_tosa_compile_spec("TOSA-0.80+BI"), - ) - .quantize() - .export() - .to_edge_transform_and_lower() - .check_not(["executorch_exir_dialects_edge__ops_aten_full_default"]) - .check_count({"torch.ops.higher_order.executorch_call_delegate": 1}) - .to_executorch() - .run_method_and_compare_outputs(inputs=test_data) - ) - - def _test_full_tosa_ethos_pipeline( - self, compile_spec: list[CompileSpec], module: torch.nn.Module, test_data: Tuple - ): - tester = ( - ArmTester(module, example_inputs=test_data, compile_spec=compile_spec) - .quantize() - .export() - .to_edge_transform_and_lower() - .check_not(["executorch_exir_dialects_edge__ops_aten_full_default"]) - .check_count({"torch.ops.higher_order.executorch_call_delegate": 1}) - .to_executorch() - .serialize() - ) - if conftest.is_option_enabled("corstone_fvp"): - tester.run_method_and_compare_outputs(qtol=1, inputs=test_data) - - def _test_full_tosa_u55_pipeline(self, module: torch.nn.Module, test_data: Tuple): - self._test_full_tosa_ethos_pipeline( - common.get_u55_compile_spec(), module, test_data - ) - - def _test_full_tosa_u85_pipeline(self, module: torch.nn.Module, test_data: Tuple): - self._test_full_tosa_ethos_pipeline( - common.get_u85_compile_spec(), module, test_data - ) - - def test_only_full_tosa_MI(self): - self._test_full_tosa_MI_pipeline(self.Full(), ()) - - def test_const_full_tosa_MI(self): - _input = torch.rand((2, 2, 3, 3)) * 10 - self._test_full_tosa_MI_pipeline(self.AddConstFull(), (_input,)) - - @parameterized.expand(FullLike.test_parameters) - def test_full_like_tosa_MI(self, test_tensor: Tuple): - self._test_full_tosa_MI_pipeline(self.FullLike(), test_tensor) - - @parameterized.expand(AddVariableFull.test_parameters) - def test_full_tosa_MI(self, test_tensor: Tuple): - self._test_full_tosa_MI_pipeline( - self.AddVariableFull(), example_data=test_tensor - ) - - @parameterized.expand(AddVariableFull.test_parameters) - def test_full_tosa_BI(self, test_tensor: Tuple): - self._test_full_tosa_BI_pipeline(self.AddVariableFull(), test_tensor) - - @parameterized.expand(FullLike.test_parameters) - def test_full_like_tosa_BI(self, test_tensor: Tuple): - self._test_full_tosa_BI_pipeline(self.FullLike(), test_tensor) - - @parameterized.expand(AddVariableFull.test_parameters) - @pytest.mark.corstone_fvp - def test_full_u55_BI(self, test_tensor: Tuple): - self._test_full_tosa_u55_pipeline( - self.AddVariableFull(), - test_tensor, - ) - - @parameterized.expand(AddVariableFull.test_parameters) - @pytest.mark.corstone_fvp - def test_full_u85_BI(self, test_tensor: Tuple): - self._test_full_tosa_u85_pipeline( - self.AddVariableFull(), - test_tensor, - ) - - def test_integer_value(self): - _input = torch.ones((2, 2)) - integer_fill_value = 1 - self._test_full_tosa_MI_pipeline( - self.AddVariableFull(), example_data=(_input, integer_fill_value) - ) - - # This fails since the fill value in the full tensor is set at compile time by the example data (1.). - # Test data tries to set it again at runtime (to 2.) but it doesn't do anything. - # In eager mode, the fill value can be set at runtime, causing the outputs to not match. - @unittest.expectedFailure - def test_set_value_at_runtime(self): - _input = torch.ones((2, 2)) - example_fill_value = 1.0 - test_fill_value = 2.0 - self._test_full_tosa_MI_pipeline( - self.AddVariableFull(), - example_data=(_input, example_fill_value), - test_data=(_input, test_fill_value), - ) +from executorch.backends.arm.test import common +from executorch.backends.arm.test.tester.test_pipeline import ( + EthosU55PipelineBI, + EthosU85PipelineBI, + TosaPipelineBI, + TosaPipelineMI, +) + +input_t1 = Tuple[torch.Tensor, int] + +exir_op = "executorch_exir_dialects_edge__ops_aten_full_default" + + +class Full(torch.nn.Module): + # A single full op + def forward(self): + return torch.full((3, 3), 4.5) + + +class AddConstFull(torch.nn.Module): + # Input + a full with constant value. + def forward(self, x: torch.Tensor): + return torch.full((2, 2, 3, 3), 4.5, dtype=torch.float32) + x + + +class AddVariableFull(torch.nn.Module): + sizes: list[tuple[int, ...]] = [ + (5,), + (5, 5), + (5, 5, 5), + (1, 5, 5, 5), + ] + test_parameters = {} + for i, n in enumerate(sizes): + test_parameters[f"slice_randn_{i}"] = (torch.randn(n) * 10 - 5, 3.2) + + def forward(self, x: torch.Tensor, y): + # Input + a full with the shape from the input and a given value 'y'. + return x + torch.full(x.shape, y) + + +class FullLike(torch.nn.Module): + """Since full_like is replaced with full, we only need to test on reference model, not FVP.""" + + test_parameters = { + "full_like_value_3_2": lambda: (torch.randn(2, 2, 2, 2) * 50, 3.2), + "full_like_value_3": lambda: (torch.randn(2, 2, 2, 2) * 50, 3), + "full_like_value_3_2_int32": lambda: ( + (torch.randn(2, 2, 2, 2) * 50).to(torch.int32), + 3.2, + ), + "full_like_value_3_int32": lambda: ( + (torch.randn(2, 2, 2, 2) * 50).to(torch.int32), + 3, + ), + } + + def forward(self, input_tensor: torch.Tensor, value): + # Our backend can't handle tensors without users, which input_tensor doesn't have + # when the full_like is converted to a full. Therefore involve it in the output. + return input_tensor + torch.full_like(input_tensor, value) + + +def test_full_tosa_MI_only(): + pipeline = TosaPipelineMI[input_t1]( + Full(), + (), + aten_op=[], + exir_op=exir_op, + ) + pipeline.run() + + +def test_full_tosa_MI_const(): + test_data = (torch.rand((2, 2, 3, 3)) * 10,) + pipeline = TosaPipelineMI[input_t1]( + AddConstFull(), + test_data, + aten_op=[], + exir_op=exir_op, + ) + pipeline.run() + + +@common.parametrize("test_data", FullLike.test_parameters) +def test_full_like_tosa_MI(test_data: Tuple): + pipeline = TosaPipelineMI[input_t1]( + FullLike(), + test_data(), + aten_op=[], + exir_op=exir_op, + ) + pipeline.run() + + +@common.parametrize("test_data", AddVariableFull.test_parameters) +def test_full_tosa_MI(test_data: Tuple): + pipeline = TosaPipelineMI[input_t1]( + AddVariableFull(), + test_data, + aten_op=[], + exir_op=exir_op, + ) + pipeline.run() + + +@common.parametrize("test_data", AddVariableFull.test_parameters) +def test_full_tosa_BI(test_data: Tuple): + pipeline = TosaPipelineBI[input_t1]( + AddVariableFull(), + test_data, + aten_op=[], + exir_op=exir_op, + ) + pipeline.run() + + +@common.parametrize("test_data", FullLike.test_parameters) +def test_full_like_tosa_BI(test_data: Tuple): + pipeline = TosaPipelineBI[input_t1]( + FullLike(), + test_data(), + aten_op=[], + exir_op=exir_op, + ) + pipeline.pop_stage("check.quant_nodes") + pipeline.run() + + +@common.parametrize("test_data", AddVariableFull.test_parameters) +@common.XfailIfNoCorstone320 +def test_full_u85_BI(test_data: Tuple): + pipeline = EthosU85PipelineBI[input_t1]( + AddVariableFull(), + test_data, + aten_ops=[], + exir_ops=exir_op, + run_on_fvp=True, + use_to_edge_transform_and_lower=True, + ) + pipeline.run() + + +@common.parametrize("test_data", AddVariableFull.test_parameters) +@common.XfailIfNoCorstone300 +def test_full_u55_BI(test_data: Tuple): + pipeline = EthosU55PipelineBI[input_t1]( + AddVariableFull(), + test_data, + aten_ops=[], + exir_ops=exir_op, + run_on_fvp=True, + use_to_edge_transform_and_lower=True, + ) + pipeline.run() + + +# This fails since full outputs int64 by default if 'fill_value' is integer, which our backend doesn't support. +@pytest.mark.skip( + "This fails since full outputs int64 by default if 'fill_value' is integer, which our backend doesn't support." +) +def test_full_tosa_MI_integer_value(): + test_data = (torch.ones((2, 2)), 1.0) + pipeline = TosaPipelineMI[input_t1]( + AddVariableFull(), + test_data, + aten_op=[], + exir_op=exir_op, + ) + pipeline.run() + + +# This fails since the fill value in the full tensor is set at compile time by the example data (1.). +# Test data tries to set it again at runtime (to 2.) but it doesn't do anything. +# In eager mode, the fill value can be set at runtime, causing the outputs to not match. +@pytest.mark.skip( + "This fails since the fill value in the full tensor is set at compile time by the example data (1.)." +) +def test_full_tosa_MI_set_value_at_runtime(tosa_version: str): + test_data = (torch.ones((2, 2)), 1.0) + pipeline = TosaPipelineMI[input_t1]( + AddVariableFull(), + test_data, + aten_op=[], + exir_op=exir_op, + ) + pipeline.pop_stage("run_method_and_compare_outputs") + pipeline.add_stage( + pipeline.tester.run_method_and_compare_outputs, inputs=(torch.ones((2, 2)), 2.0) + ) + pipeline.run() diff --git a/backends/arm/test/ops/test_ge.py b/backends/arm/test/ops/test_ge.py index 7bcd2c923a4..19c036be526 100644 --- a/backends/arm/test/ops/test_ge.py +++ b/backends/arm/test/ops/test_ge.py @@ -62,25 +62,25 @@ def get_inputs(self): op_ge_scalar_rank4_randn = GreaterEqual(torch.randn(3, 2, 2, 2), 0.3) test_data_tensor = { - "ge_tensor_rank1_ones": op_ge_tensor_rank1_ones, - "ge_tensor_rank2_rand": op_ge_tensor_rank2_rand, - "ge_tensor_rank3_randn": op_ge_tensor_rank3_randn, - "ge_tensor_rank4_randn": op_ge_tensor_rank4_randn, + "ge_tensor_rank1_ones": lambda: op_ge_tensor_rank1_ones, + "ge_tensor_rank2_rand": lambda: op_ge_tensor_rank2_rand, + "ge_tensor_rank3_randn": lambda: op_ge_tensor_rank3_randn, + "ge_tensor_rank4_randn": lambda: op_ge_tensor_rank4_randn, } test_data_scalar = { - "ge_scalar_rank1_ones": op_ge_scalar_rank1_ones, - "ge_scalar_rank2_rand": op_ge_scalar_rank2_rand, - "ge_scalar_rank3_randn": op_ge_scalar_rank3_randn, - "ge_scalar_rank4_randn": op_ge_scalar_rank4_randn, + "ge_scalar_rank1_ones": lambda: op_ge_scalar_rank1_ones, + "ge_scalar_rank2_rand": lambda: op_ge_scalar_rank2_rand, + "ge_scalar_rank3_randn": lambda: op_ge_scalar_rank3_randn, + "ge_scalar_rank4_randn": lambda: op_ge_scalar_rank4_randn, } @common.parametrize("test_module", test_data_tensor) def test_ge_tensor_tosa_MI(test_module): pipeline = TosaPipelineMI[input_t]( - test_module, - test_module.get_inputs(), + test_module(), + test_module().get_inputs(), GreaterEqual.aten_op_tensor, GreaterEqual.exir_op, ) @@ -90,8 +90,8 @@ def test_ge_tensor_tosa_MI(test_module): @common.parametrize("test_module", test_data_scalar) def test_ge_scalar_tosa_MI(test_module): pipeline = TosaPipelineMI[input_t]( - test_module, - test_module.get_inputs(), + test_module(), + test_module().get_inputs(), GreaterEqual.aten_op_scalar, GreaterEqual.exir_op, ) @@ -101,8 +101,8 @@ def test_ge_scalar_tosa_MI(test_module): @common.parametrize("test_module", test_data_tensor) def test_ge_tensor_tosa_BI(test_module): pipeline = TosaPipelineBI[input_t]( - test_module, - test_module.get_inputs(), + test_module(), + test_module().get_inputs(), GreaterEqual.aten_op_tensor, GreaterEqual.exir_op, ) @@ -112,8 +112,8 @@ def test_ge_tensor_tosa_BI(test_module): @common.parametrize("test_module", test_data_scalar) def test_ge_scalar_tosa_BI(test_module): pipeline = TosaPipelineBI[input_t]( - test_module, - test_module.get_inputs(), + test_module(), + test_module().get_inputs(), GreaterEqual.aten_op_tensor, GreaterEqual.exir_op, ) @@ -125,10 +125,11 @@ def test_ge_scalar_tosa_BI(test_module): def test_ge_tensor_u55_BI(test_module): # GREATER_EQUAL is not supported on U55. pipeline = OpNotSupportedPipeline[input_t]( - test_module, - test_module.get_inputs(), - "TOSA-0.80+BI+u55", + test_module(), + test_module().get_inputs(), {GreaterEqual.exir_op: 1}, + quantize=True, + u55_subset=True, ) pipeline.run() @@ -138,11 +139,12 @@ def test_ge_tensor_u55_BI(test_module): def test_ge_scalar_u55_BI(test_module): # GREATER_EQUAL is not supported on U55. pipeline = OpNotSupportedPipeline[input_t]( - test_module, - test_module.get_inputs(), - "TOSA-0.80+BI+u55", + test_module(), + test_module().get_inputs(), {GreaterEqual.exir_op: 1}, n_expected_delegates=1, + quantize=True, + u55_subset=True, ) pipeline.run() @@ -155,8 +157,8 @@ def test_ge_scalar_u55_BI(test_module): @common.XfailIfNoCorstone320 def test_ge_tensor_u85_BI(test_module): pipeline = EthosU85PipelineBI[input_t]( - test_module, - test_module.get_inputs(), + test_module(), + test_module().get_inputs(), GreaterEqual.aten_op_tensor, GreaterEqual.exir_op, run_on_fvp=True, @@ -172,8 +174,8 @@ def test_ge_tensor_u85_BI(test_module): @common.XfailIfNoCorstone320 def test_ge_scalar_u85_BI(test_module): pipeline = EthosU85PipelineBI[input_t]( - test_module, - test_module.get_inputs(), + test_module(), + test_module().get_inputs(), GreaterEqual.aten_op_tensor, GreaterEqual.exir_op, run_on_fvp=True, diff --git a/backends/arm/test/ops/test_gelu.py b/backends/arm/test/ops/test_gelu.py index fb1253fdb0c..6ac9b5dabf5 100644 --- a/backends/arm/test/ops/test_gelu.py +++ b/backends/arm/test/ops/test_gelu.py @@ -22,51 +22,51 @@ class Gelu(torch.nn.Module): exir_op = "executorch_exir_dialects_edge__ops_aten_gelu_default" test_data: dict[str, Tuple[str, input_t1]] = { - "zeros_none": ( + "zeros_none": lambda: ( "none", torch.zeros(1, 10, 10, 10), ), - "ones_none": ( + "ones_none": lambda: ( "none", torch.ones(10, 10, 10), ), - "rand_none": ( + "rand_none": lambda: ( "none", (torch.rand(10, 10) - 0.5), ), - "randn_pos_none": ( + "randn_pos_none": lambda: ( "none", (torch.randn(1, 4, 4, 4) + 10), ), - "randn_neg_none": ( + "randn_neg_none": lambda: ( "none", (torch.randn(1, 4, 4, 4) - 10), ), - "ramp_none": ( + "ramp_none": lambda: ( "none", torch.arange(-16, 16, 0.2), ), - "zeros_tanh": ( + "zeros_tanh": lambda: ( "tanh", torch.zeros(1, 10, 10, 10), ), - "ones_tanh": ( + "ones_tanh": lambda: ( "tanh", torch.ones(10, 10, 10), ), - "rand_tanh": ( + "rand_tanh": lambda: ( "tanh", (torch.rand(10, 10) - 0.5), ), - "randn_pos_tanh": ( + "randn_pos_tanh": lambda: ( "tanh", (torch.randn(1, 4, 4, 4) + 10), ), - "randn_neg_tanh": ( + "randn_neg_tanh": lambda: ( "tanh", (torch.randn(1, 4, 4, 4) - 10), ), - "ramp_tanh": ( + "ramp_tanh": lambda: ( "tanh", torch.arange(-16, 16, 0.2), ), @@ -82,10 +82,10 @@ def forward(self, x: torch.Tensor): @common.parametrize("test_data", Gelu.test_data) def test_gelu_tosa_MI(test_data: input_t1): - approximate = test_data[0] + approximate, test_data = test_data() TosaPipelineMI[input_t1]( Gelu(approximate), - (test_data[1],), + (test_data,), Gelu.aten_op, Gelu.exir_op, use_to_edge_transform_and_lower=False, @@ -94,32 +94,34 @@ def test_gelu_tosa_MI(test_data: input_t1): @common.parametrize("test_data", Gelu.test_data) def test_gelu_tosa_BI(test_data: input_t1): - approximate = test_data[0] + approximate, test_data = test_data() TosaPipelineBI[input_t1]( Gelu(approximate), - (test_data[1],), + (test_data,), Gelu.aten_op, Gelu.exir_op, ).run() @common.parametrize("test_data", Gelu.test_data) +@common.XfailIfNoCorstone300 def test_gelu_u55_BI(test_data: input_t1): - approximate = test_data[0] + approximate, test_data = test_data() EthosU55PipelineBI[input_t1]( Gelu(approximate), - (test_data[1],), + (test_data,), Gelu.aten_op, Gelu.exir_op, ).run() @common.parametrize("test_data", Gelu.test_data) +@common.XfailIfNoCorstone320 def test_gelu_u85_BI(test_data: input_t1): - approximate = test_data[0] + approximate, test_data = test_data() EthosU85PipelineBI[input_t1]( Gelu(approximate), - (test_data[1],), + (test_data,), Gelu.aten_op, Gelu.exir_op, ).run() diff --git a/backends/arm/test/ops/test_gt.py b/backends/arm/test/ops/test_gt.py index 15515958c85..0a1b97928fd 100644 --- a/backends/arm/test/ops/test_gt.py +++ b/backends/arm/test/ops/test_gt.py @@ -63,24 +63,27 @@ def get_inputs(self): op_gt_scalar_rank4_randn = Greater(torch.randn(3, 2, 2, 2), 0.3) test_data_tensor = { - "gt_tensor_rank1_ones": op_gt_tensor_rank1_ones, - "gt_tensor_rank2_rand": op_gt_tensor_rank2_rand, - "gt_tensor_rank3_randn": op_gt_tensor_rank3_randn, - "gt_tensor_rank4_randn": op_gt_tensor_rank4_randn, + "gt_tensor_rank1_ones": lambda: op_gt_tensor_rank1_ones, + "gt_tensor_rank2_rand": lambda: op_gt_tensor_rank2_rand, + "gt_tensor_rank3_randn": lambda: op_gt_tensor_rank3_randn, + "gt_tensor_rank4_randn": lambda: op_gt_tensor_rank4_randn, } test_data_scalar = { - "gt_scalar_rank1_ones": op_gt_scalar_rank1_ones, - "gt_scalar_rank2_rand": op_gt_scalar_rank2_rand, - "gt_scalar_rank3_randn": op_gt_scalar_rank3_randn, - "gt_scalar_rank4_randn": op_gt_scalar_rank4_randn, + "gt_scalar_rank1_ones": lambda: op_gt_scalar_rank1_ones, + "gt_scalar_rank2_rand": lambda: op_gt_scalar_rank2_rand, + "gt_scalar_rank3_randn": lambda: op_gt_scalar_rank3_randn, + "gt_scalar_rank4_randn": lambda: op_gt_scalar_rank4_randn, } @common.parametrize("test_module", test_data_tensor) def test_gt_tensor_tosa_MI(test_module): pipeline = TosaPipelineMI[input_t]( - test_module, test_module.get_inputs(), Greater.aten_op_tensor, Greater.exir_op + test_module(), + test_module().get_inputs(), + Greater.aten_op_tensor, + Greater.exir_op, ) pipeline.run() @@ -88,7 +91,10 @@ def test_gt_tensor_tosa_MI(test_module): @common.parametrize("test_module", test_data_scalar) def test_gt_scalar_tosa_MI(test_module): pipeline = TosaPipelineMI[input_t]( - test_module, test_module.get_inputs(), Greater.aten_op_scalar, Greater.exir_op + test_module(), + test_module().get_inputs(), + Greater.aten_op_scalar, + Greater.exir_op, ) pipeline.run() @@ -96,7 +102,10 @@ def test_gt_scalar_tosa_MI(test_module): @common.parametrize("test_module", test_data_tensor) def test_gt_tensor_tosa_BI(test_module): pipeline = TosaPipelineBI[input_t]( - test_module, test_module.get_inputs(), Greater.aten_op_tensor, Greater.exir_op + test_module(), + test_module().get_inputs(), + Greater.aten_op_tensor, + Greater.exir_op, ) pipeline.run() @@ -104,7 +113,10 @@ def test_gt_tensor_tosa_BI(test_module): @common.parametrize("test_module", test_data_scalar) def test_gt_scalar_tosa_BI(test_module): pipeline = TosaPipelineBI[input_t]( - test_module, test_module.get_inputs(), Greater.aten_op_tensor, Greater.exir_op + test_module(), + test_module().get_inputs(), + Greater.aten_op_tensor, + Greater.exir_op, ) pipeline.run() @@ -114,10 +126,11 @@ def test_gt_scalar_tosa_BI(test_module): def test_gt_tensor_u55_BI(test_module): # Greater is not supported on U55. pipeline = OpNotSupportedPipeline[input_t]( - test_module, - test_module.get_inputs(), - "TOSA-0.80+BI+u55", + test_module(), + test_module().get_inputs(), {Greater.exir_op: 1}, + quantize=True, + u55_subset=True, ) pipeline.run() @@ -127,11 +140,12 @@ def test_gt_tensor_u55_BI(test_module): def test_gt_scalar_u55_BI(test_module): # Greater is not supported on U55. pipeline = OpNotSupportedPipeline[input_t]( - test_module, - test_module.get_inputs(), - "TOSA-0.80+BI+u55", + test_module(), + test_module().get_inputs(), {Greater.exir_op: 1}, n_expected_delegates=1, + quantize=True, + u55_subset=True, ) pipeline.run() @@ -146,8 +160,8 @@ def test_gt_scalar_u55_BI(test_module): @common.XfailIfNoCorstone320 def test_gt_tensor_u85_BI(test_module): pipeline = EthosU85PipelineBI[input_t]( - test_module, - test_module.get_inputs(), + test_module(), + test_module().get_inputs(), Greater.aten_op_tensor, Greater.exir_op, run_on_fvp=True, @@ -165,8 +179,8 @@ def test_gt_tensor_u85_BI(test_module): @common.XfailIfNoCorstone320 def test_gt_scalar_u85_BI(test_module): pipeline = EthosU85PipelineBI[input_t]( - test_module, - test_module.get_inputs(), + test_module(), + test_module().get_inputs(), Greater.aten_op_tensor, Greater.exir_op, run_on_fvp=True, diff --git a/backends/arm/test/ops/test_hardsigmoid.py b/backends/arm/test/ops/test_hardsigmoid.py index f73a995b120..399c6088e89 100644 --- a/backends/arm/test/ops/test_hardsigmoid.py +++ b/backends/arm/test/ops/test_hardsigmoid.py @@ -1,128 +1,89 @@ # Copyright 2025 Arm Limited and/or its affiliates. -# All rights reserved. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. -import unittest from typing import Tuple -import pytest import torch -from executorch.backends.arm.test import common, conftest -from executorch.backends.arm.test.tester.arm_tester import ArmTester -from executorch.exir.backend.compile_spec_schema import CompileSpec -from parameterized import parameterized +from executorch.backends.arm.test import common +from executorch.backends.arm.test.tester.test_pipeline import ( + EthosU55PipelineBI, + EthosU85PipelineBI, + TosaPipelineBI, + TosaPipelineMI, +) +aten_op = "torch.ops.aten.hardsigmoid.default" +input_t1 = Tuple[torch.Tensor] # Input x -test_data_suite = [ +test_data_suite = { # (test_name, test_data) - ("zeros", torch.zeros(1, 10, 10, 10)), - ("ones", torch.ones(10, 10, 10)), - ("rand", torch.rand(10, 10) - 0.5), - ("randn_pos", torch.randn(10) + 10), - ("randn_neg", torch.randn(10) - 10), - ("ramp", torch.arange(-16, 16, 0.2)), -] - - -class TestHardsigmoid(unittest.TestCase): - class Hardsigmoid(torch.nn.Module): - def __init__(self): - super().__init__() - self.hardsigmoid = torch.nn.Hardsigmoid() - - def forward(self, x): - return self.hardsigmoid(x) - - def _test_hardsigmoid_tosa_MI_pipeline( - self, module: torch.nn.Module, test_data: Tuple[torch.tensor] - ): - ( - ArmTester( - module, - example_inputs=test_data, - compile_spec=common.get_tosa_compile_spec("TOSA-0.80+MI"), - ) - .export() - .check(["torch.ops.aten.hardsigmoid.default"]) - .check_not(["torch.ops.quantized_decomposed"]) - .to_edge_transform_and_lower() - .check_not(["executorch_exir_dialects_edge__ops_aten_clamp_default"]) - .check_count({"torch.ops.higher_order.executorch_call_delegate": 1}) - .to_executorch() - .run_method_and_compare_outputs(inputs=test_data) - ) - - def _test_hardsigmoid_tosa_BI_pipeline( - self, module: torch.nn.Module, test_data: Tuple - ): - ( - ArmTester( - module, - example_inputs=test_data, - compile_spec=common.get_tosa_compile_spec("TOSA-0.80+BI"), - ) - .quantize() - .export() - .check(["torch.ops.aten.hardsigmoid.default"]) - .check(["torch.ops.quantized_decomposed"]) - .to_edge_transform_and_lower() - .check_not(["executorch_exir_dialects_edge__ops_aten_clamp_default"]) - .check_count({"torch.ops.higher_order.executorch_call_delegate": 1}) - .to_executorch() - .run_method_and_compare_outputs(inputs=test_data) - ) - - def _test_hardsigmoid_tosa_ethos_BI_pipeline( - self, - compile_spec: list[CompileSpec], - module: torch.nn.Module, - test_data: Tuple[torch.tensor], - ): - tester = ( - ArmTester( - module, - example_inputs=test_data, - compile_spec=compile_spec, - ) - .quantize() - .export() - .check_count({"torch.ops.aten.hardsigmoid.default": 1}) - .check(["torch.ops.quantized_decomposed"]) - .to_edge_transform_and_lower() - .check_not(["executorch_exir_dialects_edge__ops_aten_clamp_default"]) - .check_count({"torch.ops.higher_order.executorch_call_delegate": 1}) - .to_executorch() - .serialize() - ) - if conftest.is_option_enabled("corstone_fvp"): - tester.run_method_and_compare_outputs(qtol=1, inputs=test_data) - - @parameterized.expand(test_data_suite) - def test_hardsigmoid_tosa_MI( - self, - test_name: str, - test_data: torch.Tensor, - ): - self._test_hardsigmoid_tosa_MI_pipeline(self.Hardsigmoid(), (test_data,)) - - @parameterized.expand(test_data_suite) - def test_hardsigmoid_tosa_BI(self, test_name: str, test_data: torch.Tensor): - self._test_hardsigmoid_tosa_BI_pipeline(self.Hardsigmoid(), (test_data,)) - - @parameterized.expand(test_data_suite) - @pytest.mark.corstone_fvp - def test_hardsigmoid_tosa_u55_BI(self, test_name: str, test_data: torch.Tensor): - self._test_hardsigmoid_tosa_ethos_BI_pipeline( - common.get_u55_compile_spec(), self.Hardsigmoid(), (test_data,) - ) - - @parameterized.expand(test_data_suite) - @pytest.mark.corstone_fvp - def test_hardsigmoid_tosa_u85_BI(self, test_name: str, test_data: torch.Tensor): - self._test_hardsigmoid_tosa_ethos_BI_pipeline( - common.get_u85_compile_spec(), self.Hardsigmoid(), (test_data,) - ) + "zeros": lambda: torch.zeros(1, 10, 10, 10), + "ones": lambda: torch.ones(10, 10, 10), + "rand": lambda: torch.rand(10, 10) - 0.5, + "randn_pos": lambda: torch.randn(10) + 10, + "randn_neg": lambda: torch.randn(10) - 10, + "ramp": lambda: torch.arange(-16, 16, 0.2), +} + + +class Hardsigmoid(torch.nn.Module): + def __init__(self): + super().__init__() + self.hardsigmoid = torch.nn.Hardsigmoid() + + def forward(self, x): + return self.hardsigmoid(x) + + +@common.parametrize("test_data", test_data_suite) +def test_hardsigmoid_tosa_MI(test_data: torch.Tensor): + pipeline = TosaPipelineMI[input_t1]( + Hardsigmoid(), + (test_data(),), + aten_op, + exir_op=[], + ) + pipeline.run() + + +@common.parametrize("test_data", test_data_suite) +def test_hardsigmoid_tosa_BI(test_data: torch.Tensor): + pipeline = TosaPipelineBI[input_t1]( + Hardsigmoid(), + (test_data(),), + aten_op, + exir_op=[], + ) + pipeline.run() + + +@common.parametrize("test_data", test_data_suite) +@common.XfailIfNoCorstone300 +def test_hardsigmoid_u55_BI(test_data: torch.Tensor): + pipeline = EthosU55PipelineBI[input_t1]( + Hardsigmoid(), + (test_data(),), + aten_op, + exir_ops=[], + run_on_fvp=True, + use_to_edge_transform_and_lower=True, + ) + pipeline.run() + + +@common.parametrize("test_data", test_data_suite) +@common.XfailIfNoCorstone320 +def test_hardsigmoid_u85_BI(test_data: torch.Tensor): + pipeline = EthosU85PipelineBI[input_t1]( + Hardsigmoid(), + (test_data(),), + aten_op, + exir_ops=[], + run_on_fvp=True, + use_to_edge_transform_and_lower=True, + ) + pipeline.run() diff --git a/backends/arm/test/ops/test_hardswish.py b/backends/arm/test/ops/test_hardswish.py index 81aba540e3f..bd61346e3db 100644 --- a/backends/arm/test/ops/test_hardswish.py +++ b/backends/arm/test/ops/test_hardswish.py @@ -1,128 +1,79 @@ # Copyright 2025 Arm Limited and/or its affiliates. -# All rights reserved. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. -import unittest from typing import Tuple -import pytest import torch -from executorch.backends.arm.test import common, conftest -from executorch.backends.arm.test.tester.arm_tester import ArmTester -from executorch.exir.backend.compile_spec_schema import CompileSpec -from parameterized import parameterized +from executorch.backends.arm.test import common +from executorch.backends.arm.test.tester.test_pipeline import ( + EthosU55PipelineBI, + EthosU85PipelineBI, + TosaPipelineBI, + TosaPipelineMI, +) +aten_op = "torch.ops.aten.hardswish.default" +exir_op = "executorch_exir_dialects_edge__ops_aten_clamp_default" -test_data_suite = [ +input_t1 = Tuple[torch.Tensor] + +test_data_suite = { # (test_name, test_data) - ("zeros", torch.zeros(1, 10, 10, 10)), - ("ones", torch.ones(10, 10, 10)), - ("rand", torch.rand(10, 10) - 0.5), - ("randn_pos", torch.randn(10) + 10), - ("randn_neg", torch.randn(10) - 10), - ("ramp", torch.arange(-16, 16, 0.2)), -] - - -class TestHardswish(unittest.TestCase): - class Hardswish(torch.nn.Module): - def __init__(self): - super().__init__() - self.hardswish = torch.nn.Hardswish() - - def forward(self, x): - return self.hardswish(x) - - def _test_hardswish_tosa_MI_pipeline( - self, module: torch.nn.Module, test_data: Tuple[torch.tensor] - ): - ( - ArmTester( - module, - example_inputs=test_data, - compile_spec=common.get_tosa_compile_spec("TOSA-0.80+MI"), - ) - .export() - .check(["torch.ops.aten.hardswish.default"]) - .check_not(["torch.ops.quantized_decomposed"]) - .to_edge_transform_and_lower() - .check_not(["executorch_exir_dialects_edge__ops_aten_clamp_default"]) - .check_count({"torch.ops.higher_order.executorch_call_delegate": 1}) - .to_executorch() - .run_method_and_compare_outputs(inputs=test_data) - ) - - def _test_hardswish_tosa_BI_pipeline( - self, module: torch.nn.Module, test_data: Tuple - ): - ( - ArmTester( - module, - example_inputs=test_data, - compile_spec=common.get_tosa_compile_spec("TOSA-0.80+BI"), - ) - .quantize() - .export() - .check(["torch.ops.aten.hardswish.default"]) - .check(["torch.ops.quantized_decomposed"]) - .to_edge_transform_and_lower() - .check_not(["executorch_exir_dialects_edge__ops_aten_clamp_default"]) - .check_count({"torch.ops.higher_order.executorch_call_delegate": 1}) - .to_executorch() - .run_method_and_compare_outputs(inputs=test_data) - ) - - def _test_hardswish_tosa_ethos_BI_pipeline( - self, - compile_spec: list[CompileSpec], - module: torch.nn.Module, - test_data: Tuple[torch.tensor], - ): - tester = ( - ArmTester( - module, - example_inputs=test_data, - compile_spec=compile_spec, - ) - .quantize() - .export() - .check_count({"torch.ops.aten.hardswish.default": 1}) - .check(["torch.ops.quantized_decomposed"]) - .to_edge_transform_and_lower() - .check_not(["executorch_exir_dialects_edge__ops_aten_clamp_default"]) - .check_count({"torch.ops.higher_order.executorch_call_delegate": 1}) - .to_executorch() - .serialize() - ) - if conftest.is_option_enabled("corstone_fvp"): - tester.run_method_and_compare_outputs(qtol=1, inputs=test_data) - - @parameterized.expand(test_data_suite) - def test_hardswish_tosa_MI( - self, - test_name: str, - test_data: torch.Tensor, - ): - self._test_hardswish_tosa_MI_pipeline(self.Hardswish(), (test_data,)) - - @parameterized.expand(test_data_suite) - def test_hardswish_tosa_BI(self, test_name: str, test_data: torch.Tensor): - self._test_hardswish_tosa_BI_pipeline(self.Hardswish(), (test_data,)) - - @parameterized.expand(test_data_suite) - @pytest.mark.corstone_fvp - def test_hardswish_tosa_u55_BI(self, test_name: str, test_data: torch.Tensor): - self._test_hardswish_tosa_ethos_BI_pipeline( - common.get_u55_compile_spec(), self.Hardswish(), (test_data,) - ) - - @parameterized.expand(test_data_suite) - @pytest.mark.corstone_fvp - def test_hardswish_tosa_u85_BI(self, test_name: str, test_data: torch.Tensor): - self._test_hardswish_tosa_ethos_BI_pipeline( - common.get_u85_compile_spec(), self.Hardswish(), (test_data,) - ) + "zeros": lambda: (torch.zeros(1, 10, 10, 10)), + "ones": lambda: (torch.ones(10, 10, 10)), + "rand": lambda: (torch.rand(10, 10) - 0.5), + "randn_pos": lambda: (torch.randn(10) + 10), + "randn_neg": lambda: (torch.randn(10) - 10), + "ramp": lambda: (torch.arange(-16, 16, 0.2)), +} + + +class Hardswish(torch.nn.Module): + def __init__(self): + super().__init__() + self.hardswish = torch.nn.Hardswish() + + def forward(self, x): + return self.hardswish(x) + + +@common.parametrize("test_data", test_data_suite) +def test_hardswish_tosa_MI(test_data): + pipeline = TosaPipelineMI[input_t1](Hardswish(), (test_data(),), aten_op, exir_op) + pipeline.run() + + +@common.parametrize("test_data", test_data_suite) +def test_hardswish_tosa_BI(test_data): + pipeline = TosaPipelineBI[input_t1](Hardswish(), (test_data(),), aten_op, exir_op) + pipeline.run() + + +@common.parametrize("test_data", test_data_suite) +@common.XfailIfNoCorstone300 +def test_hardswish_u55_BI(test_data): + EthosU55PipelineBI[input_t1]( + Hardswish(), + (test_data(),), + aten_op, + exir_op, + run_on_fvp=True, + use_to_edge_transform_and_lower=True, + ).run() + + +@common.parametrize("test_data", test_data_suite) +@common.XfailIfNoCorstone320 +def test_hardswish_u85_BI(test_data): + EthosU85PipelineBI[input_t1]( + Hardswish(), + (test_data(),), + aten_op, + exir_op, + run_on_fvp=True, + use_to_edge_transform_and_lower=True, + ).run() diff --git a/backends/arm/test/ops/test_hardtanh.py b/backends/arm/test/ops/test_hardtanh.py index 46b44078785..f1a50467df7 100644 --- a/backends/arm/test/ops/test_hardtanh.py +++ b/backends/arm/test/ops/test_hardtanh.py @@ -1,143 +1,91 @@ # Copyright (c) Meta Platforms, Inc. and affiliates. -# Copyright 2024-2025 Arm Limited and/or its affiliates. # All rights reserved. +# Copyright 2024-2025 Arm Limited and/or its affiliates. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. -import unittest from typing import Tuple -import pytest - import torch -from executorch.backends.arm.quantizer import ( - EthosUQuantizer, - get_symmetric_quantization_config, - TOSAQuantizer, +from executorch.backends.arm.test import common +from executorch.backends.arm.test.tester.test_pipeline import ( + EthosU55PipelineBI, + EthosU85PipelineBI, + TosaPipelineBI, + TosaPipelineMI, ) -from executorch.backends.arm.test import common, conftest -from executorch.backends.arm.test.tester.arm_tester import ArmTester - -from executorch.backends.arm.tosa_specification import TosaSpecification -from executorch.backends.xnnpack.test.tester.tester import Quantize -from parameterized import parameterized - -test_data_suite = [ +test_data_suite = { # (test_name, test_data) - ("zeros", torch.zeros(1, 10, 10, 10)), - ("ones", torch.ones(10, 10, 10)), - ("rand", torch.rand(10, 10) - 0.5), - ("randn_pos", torch.randn(10) + 10), - ("randn_neg", torch.randn(10) - 10), - ("ramp", torch.arange(-16, 16, 0.2)), -] - - -class TestHardTanh(unittest.TestCase): - """Tests HardTanh Operator.""" - - class HardTanh(torch.nn.Module): - - def __init__(self): - super().__init__() - - self.hardTanh = torch.nn.Hardtanh() - - def forward(self, x): - return self.hardTanh(x) - - def _test_hardtanh_tosa_MI_pipeline( - self, module: torch.nn.Module, test_data: Tuple[torch.tensor] - ): - ( - ArmTester( - module, - example_inputs=test_data, - compile_spec=common.get_tosa_compile_spec("TOSA-0.80+MI"), - ) - .export() - .check(["torch.ops.aten.hardtanh.default"]) - .check_not(["torch.ops.quantized_decomposed"]) - .to_edge() - .partition() - .check_not(["executorch_exir_dialects_edge__ops_aten_hardtanh_default"]) - .check_count({"torch.ops.higher_order.executorch_call_delegate": 1}) - .to_executorch() - .run_method_and_compare_outputs(inputs=test_data) - ) - - def _test_hardtanh_tosa_BI_pipeline( - self, module: torch.nn.Module, test_data: Tuple[torch.tensor] - ): - tosa_spec = TosaSpecification.create_from_string("TOSA-0.80+BI") - compile_spec = common.get_tosa_compile_spec(tosa_spec) - quantizer = TOSAQuantizer(tosa_spec).set_io(get_symmetric_quantization_config()) - ( - ArmTester(module, example_inputs=test_data, compile_spec=compile_spec) - .quantize(Quantize(quantizer, get_symmetric_quantization_config())) - .export() - .check_count({"torch.ops.aten.hardtanh.default": 1}) - .check(["torch.ops.quantized_decomposed"]) - .to_edge() - .partition() - .check_not(["executorch_exir_dialects_edge__ops_aten_hardtanh_default"]) - .check_count({"torch.ops.higher_order.executorch_call_delegate": 1}) - .to_executorch() - .run_method_and_compare_outputs(inputs=test_data) - ) - - def _test_hardtanh_tosa_ethosu_BI_pipeline( - self, compile_spec, module: torch.nn.Module, test_data: Tuple[torch.tensor] - ): - quantizer = EthosUQuantizer(compile_spec).set_io( - get_symmetric_quantization_config() - ) - tester = ( - ArmTester( - module, - example_inputs=test_data, - compile_spec=compile_spec, - ) - .quantize(Quantize(quantizer, get_symmetric_quantization_config())) - .export() - .check_count({"torch.ops.aten.hardtanh.default": 1}) - .check(["torch.ops.quantized_decomposed"]) - .to_edge() - .partition() - .check_not(["executorch_exir_dialects_edge__ops_aten_hardtanh_default"]) - .check_count({"torch.ops.higher_order.executorch_call_delegate": 1}) - .to_executorch() - .serialize() - ) - if conftest.is_option_enabled("corstone_fvp"): - tester.run_method_and_compare_outputs(qtol=1, inputs=test_data) - - @parameterized.expand(test_data_suite) - def test_hardtanh_tosa_MI( - self, - test_name: str, - test_data: torch.Tensor, - ): - self._test_hardtanh_tosa_MI_pipeline(self.HardTanh(), (test_data,)) - - @parameterized.expand(test_data_suite) - def test_hardtanh_tosa_BI(self, test_name: str, test_data: torch.Tensor): - self._test_hardtanh_tosa_BI_pipeline(self.HardTanh(), (test_data,)) - - @parameterized.expand(test_data_suite) - @pytest.mark.corstone_fvp - def test_hardtanh_tosa_u55_BI(self, test_name: str, test_data: torch.Tensor): - self._test_hardtanh_tosa_ethosu_BI_pipeline( - common.get_u55_compile_spec(), self.HardTanh(), (test_data,) - ) - - @parameterized.expand(test_data_suite) - @pytest.mark.corstone_fvp - def test_hardtanh_tosa_u85_BI(self, test_name: str, test_data: torch.Tensor): - self._test_hardtanh_tosa_ethosu_BI_pipeline( - common.get_u85_compile_spec(), self.HardTanh(), (test_data,) - ) + "zeros": lambda: (torch.zeros(1, 10, 10, 10)), + "ones": lambda: (torch.ones(10, 10, 10)), + "rand": lambda: (torch.rand(10, 10) - 0.5), + "randn_pos": lambda: (torch.randn(10) + 10), + "randn_neg": lambda: (torch.randn(10) - 10), + "ramp": lambda: (torch.arange(-16, 16, 0.2)), +} + +aten_op = "torch.ops.aten.hardtanh.default" +exir_op = "executorch_exir_dialects_edge__ops_aten_hardtanh_default" + +input_t = Tuple[torch.Tensor] + + +class HardTanh(torch.nn.Module): + + def __init__(self): + super().__init__() + + self.hardTanh = torch.nn.Hardtanh() + + def forward(self, x): + return self.hardTanh(x) + + +@common.parametrize("test_data", test_data_suite) +def test_hardtanh_tosa_MI(test_data: torch.Tensor): + pipeline = TosaPipelineMI[input_t](HardTanh(), (test_data(),), aten_op, exir_op) + pipeline.run() + + +@common.parametrize("test_data", test_data_suite) +def test_hardtanh_tosa_BI(test_data: torch.Tensor): + pipeline = TosaPipelineBI[input_t]( + HardTanh(), + (test_data(),), + aten_op, + exir_op, + symmetric_io_quantization=True, + ) + pipeline.run() + + +@common.parametrize("test_data", test_data_suite) +@common.XfailIfNoCorstone300 +def test_hardtanh_u55_BI(test_data: torch.Tensor): + pipeline = EthosU55PipelineBI[input_t]( + HardTanh(), + (test_data(),), + aten_op, + exir_op, + run_on_fvp=True, + symmetric_io_quantization=True, + ) + pipeline.run() + + +@common.parametrize("test_data", test_data_suite) +@common.XfailIfNoCorstone320 +def test_hardtanh_u85_BI(test_data: torch.Tensor): + pipeline = EthosU85PipelineBI[input_t]( + HardTanh(), + (test_data(),), + aten_op, + exir_op, + run_on_fvp=True, + symmetric_io_quantization=True, + ) + pipeline.run() diff --git a/backends/arm/test/ops/test_layer_norm.py b/backends/arm/test/ops/test_layer_norm.py index 7ed181711a1..d2d9aa0bc14 100644 --- a/backends/arm/test/ops/test_layer_norm.py +++ b/backends/arm/test/ops/test_layer_norm.py @@ -42,18 +42,21 @@ def forward(self, x): input_t = tuple[torch.Tensor] test_data_suite = { - "randn_last_dim": ((torch.randn(1, 5, 5, 5),), LayerNorm([5])), - "rand_last_two_dims": ((torch.rand(1, 5, 5, 5),), LayerNorm([5, 5])), - "rand_last_two_dims_not_elementwise_affine": ( + "randn_last_dim": lambda: ((torch.randn(1, 5, 5, 5),), LayerNorm([5])), + "rand_last_two_dims": lambda: ((torch.rand(1, 5, 5, 5),), LayerNorm([5, 5])), + "rand_last_two_dims_not_elementwise_affine": lambda: ( (torch.rand(1, 5, 5, 5),), LayerNorm([5, 5], 1e-5, False), ), - "rand_last_two_dims_not_elementwise_affine_no_bias": ( + "rand_last_two_dims_not_elementwise_affine_no_bias": lambda: ( (torch.rand(1, 5, 5, 5),), LayerNorm([5, 5], 1e-5, False, False), ), - "randn_last_three_dims": ((torch.randn(1, 15, 10, 5),), LayerNorm([15, 10, 5])), - "randn_last_three_dims_no_bias": ( + "randn_last_three_dims": lambda: ( + (torch.randn(1, 15, 10, 5),), + LayerNorm([15, 10, 5]), + ), + "randn_last_three_dims_no_bias": lambda: ( (torch.randn(1, 15, 10, 5),), LayerNorm([15, 10, 5], 1e-2, False, False), ), @@ -62,9 +65,10 @@ def forward(self, x): @common.parametrize("test_data", test_data_suite) def test_native_layer_norm_tosa_MI(test_data): + test_data, model = test_data() pipeline = TosaPipelineMI[input_t]( - test_data[1], - test_data[0], + model, + test_data, "torch.ops.aten.layer_norm.default", ) pipeline.run() @@ -72,9 +76,10 @@ def test_native_layer_norm_tosa_MI(test_data): @common.parametrize("test_data", test_data_suite) def test_native_layer_norm_tosa_BI(test_data): + test_data, model = test_data() pipeline = TosaPipelineBI[input_t]( - test_data[1], - test_data[0], + model, + test_data, "torch.ops.aten.sub.Tensor", # Just check for sub op included in the layernorm decomposition ) pipeline.change_args("run_method_and_compare_outputs", qtol=1) @@ -84,9 +89,10 @@ def test_native_layer_norm_tosa_BI(test_data): @common.parametrize("test_data", test_data_suite) @common.XfailIfNoCorstone300 def test_native_layer_norm_u55_BI(test_data): + test_data, model = test_data() pipeline = EthosU55PipelineBI[input_t]( - test_data[1], - test_data[0], + model, + test_data, "torch.ops.aten.sub.Tensor", # Just check for sub op included in the layernorm decomposition run_on_fvp=True, ) @@ -97,9 +103,10 @@ def test_native_layer_norm_u55_BI(test_data): @common.parametrize("test_data", test_data_suite) @common.XfailIfNoCorstone320 def test_native_layer_norm_u85_BI(test_data): + test_data, model = test_data() pipeline = EthosU85PipelineBI[input_t]( - test_data[1], - test_data[0], + model, + test_data, "torch.ops.aten.sub.Tensor", # Just check for sub op included in the layernorm decomposition run_on_fvp=True, ) diff --git a/backends/arm/test/ops/test_le.py b/backends/arm/test/ops/test_le.py index 7e243ead620..217e409c6f5 100644 --- a/backends/arm/test/ops/test_le.py +++ b/backends/arm/test/ops/test_le.py @@ -5,7 +5,6 @@ from typing import Tuple -import pytest import torch from executorch.backends.arm.test import common @@ -57,63 +56,38 @@ def get_inputs(self): ) test_data_common = { - "le_rank1_ones": op_le_rank1_ones, - "le_rank2_rand": op_le_rank2_rand, - "le_rank3_randn": op_le_rank3_randn, - "le_rank4_randn": op_le_rank4_randn, + "le_rank1_ones": lambda: op_le_rank1_ones, + "le_rank2_rand": lambda: op_le_rank2_rand, + "le_rank3_randn": lambda: op_le_rank3_randn, + "le_rank4_randn": lambda: op_le_rank4_randn, } @common.parametrize("test_module", test_data_common) -def test_le_tosa_MI(test_module): +def test_le_tensor_tosa_MI(test_module): pipeline = TosaPipelineMI[input_t]( - test_module, test_module.get_inputs(), aten_op, exir_op + test_module(), test_module().get_inputs(), aten_op, exir_op ) pipeline.run() @common.parametrize("test_module", test_data_common) -def test_le_tosa_BI(test_module): +def test_le_tensor_tosa_BI(test_module): pipeline = TosaPipelineBI[input_t]( - test_module, test_module.get_inputs(), aten_op, exir_op + test_module(), test_module().get_inputs(), aten_op, exir_op ) pipeline.run() @common.parametrize("test_module", test_data_common) -def test_le_u55_BI(test_module): +def test_le_tensor_u55_BI_not_delegated(test_module): # GREATER_EQUAL is not supported on U55. LE uses the GREATER_EQUAL Tosa operator. pipeline = OpNotSupportedPipeline[input_t]( - test_module, - test_module.get_inputs(), - "TOSA-0.80+BI+u55", - {exir_op: 1}, - ) - pipeline.run() - - -@common.parametrize("test_module", test_data_common) -def test_le_u85_BI(test_module): - pipeline = EthosU85PipelineBI[input_t]( - test_module, - test_module.get_inputs(), - aten_op, - exir_op, - run_on_fvp=False, - use_to_edge_transform_and_lower=True, - ) - pipeline.run() - - -@common.parametrize("test_module", test_data_common) -@pytest.mark.skip(reason="The same as test_le_u55_BI") -def test_le_u55_BI_on_fvp(test_module): - # GREATER_EQUAL is not supported on U55. LE uses the GREATER_EQUAL Tosa operator. - pipeline = OpNotSupportedPipeline[input_t]( - test_module, - test_module.get_inputs(), - "TOSA-0.80+BI+u55", + test_module(), + test_module().get_inputs(), {exir_op: 1}, + quantize=True, + u55_subset=True, ) pipeline.run() @@ -123,11 +97,11 @@ def test_le_u55_BI_on_fvp(test_module): test_data_common, xfails={"le_rank4_randn": "4D fails because boolean Tensors can't be subtracted"}, ) -@common.SkipIfNoCorstone320 -def test_le_u85_BI_on_fvp(test_module): +@common.XfailIfNoCorstone320 +def test_le_tensor_u85_BI(test_module): pipeline = EthosU85PipelineBI[input_t]( - test_module, - test_module.get_inputs(), + test_module(), + test_module().get_inputs(), aten_op, exir_op, run_on_fvp=True, diff --git a/backends/arm/test/ops/test_leaky_relu.py b/backends/arm/test/ops/test_leaky_relu.py index b9f0c3a8d1a..a83c2812bf0 100644 --- a/backends/arm/test/ops/test_leaky_relu.py +++ b/backends/arm/test/ops/test_leaky_relu.py @@ -28,19 +28,22 @@ def forward(self, x: torch.Tensor): return self.activation(x) test_data: dict[str, input_t1] = { - "zeros": ((torch.zeros(1, 1, 5, 5),), 0.01), - "ones": ((torch.ones(1, 32, 112, 112),), 0.01), - "rand": ((torch.rand(1, 96, 56, 56),), 0.2), - "3Dtensor": ((torch.rand(5, 5, 5),), 0.001), - "negative_slope": ((torch.rand(1, 16, 128, 128),), -0.002), + "zeros": lambda: ((torch.zeros(1, 1, 5, 5),), 0.01), + "ones": lambda: ((torch.ones(1, 32, 112, 112),), 0.01), + "rand": lambda: ((torch.rand(1, 96, 56, 56),), 0.2), + "3Dtensor": lambda: ((torch.rand(5, 5, 5),), 0.001), + "negative_slope": lambda: ((torch.rand(1, 16, 128, 128),), -0.002), } @common.parametrize("test_data", LeakyReLU.test_data) def test_leaky_relu_tosa_MI(test_data): - data, slope = test_data + data, slope = test_data() pipeline = TosaPipelineMI[input_t1]( - LeakyReLU(slope), data, [], use_to_edge_transform_and_lower=True + LeakyReLU(slope), + data, + [], + use_to_edge_transform_and_lower=True, ) pipeline.add_stage_after( "to_edge_transform_and_lower", pipeline.tester.check_not, [exir_op] @@ -50,9 +53,12 @@ def test_leaky_relu_tosa_MI(test_data): @common.parametrize("test_data", LeakyReLU.test_data) def test_leaky_relu_tosa_BI(test_data): - data, slope = test_data + data, slope = test_data() pipeline = TosaPipelineBI[input_t1]( - LeakyReLU(slope), data, [], use_to_edge_transform_and_lower=True + LeakyReLU(slope), + data, + [], + use_to_edge_transform_and_lower=True, ) pipeline.add_stage_after("quantize", pipeline.tester.check_not, [aten_op]) pipeline.run() @@ -61,7 +67,7 @@ def test_leaky_relu_tosa_BI(test_data): @common.parametrize("test_data", LeakyReLU.test_data) @common.XfailIfNoCorstone300 def test_leaky_relu_u55_BI(test_data): - data, slope = test_data + data, slope = test_data() pipeline = EthosU55PipelineBI[input_t1]( LeakyReLU(slope), data, @@ -76,7 +82,7 @@ def test_leaky_relu_u55_BI(test_data): @common.parametrize("test_data", LeakyReLU.test_data) @common.XfailIfNoCorstone320 def test_leaky_relu_u85_BI(test_data): - data, slope = test_data + data, slope = test_data() pipeline = EthosU85PipelineBI[input_t1]( LeakyReLU(slope), data, diff --git a/backends/arm/test/ops/test_linear.py b/backends/arm/test/ops/test_linear.py index 9a289909bae..56d33097999 100644 --- a/backends/arm/test/ops/test_linear.py +++ b/backends/arm/test/ops/test_linear.py @@ -1,271 +1,199 @@ # Copyright (c) Meta Platforms, Inc. and affiliates. -# Copyright 2024-2025 Arm Limited and/or its affiliates. # All rights reserved. +# Copyright 2024-2025 Arm Limited and/or its affiliates. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. -import unittest from typing import Tuple import pytest import torch -from executorch.backends.arm.test import common, conftest +from executorch.backends.arm.test import common -from executorch.backends.arm.test.tester.arm_tester import ArmTester -from executorch.exir.backend.compile_spec_schema import CompileSpec -from parameterized import parameterized +from executorch.backends.arm.test.tester.test_pipeline import ( + EthosU55PipelineBI, + EthosU85PipelineBI, + TosaPipelineBI, + TosaPipelineMI, +) +aten_op = "torch.ops.aten.linear.default" -test_data_suite_rank1 = [ +input_t1 = Tuple[torch.Tensor] + +test_data_suite_rank1 = { # (test_name, test_data, out_features, has_bias) - ( - "model_linear_rank1_zeros", + "model_linear_rank1_zeros": lambda: ( torch.zeros(10), 15, True, ), - ( - "model_linear_rank1_ones", + "model_linear_rank1_ones": lambda: ( torch.ones(10), 15, False, ), - ( - "model_linear_rank1_negative_ones", + "model_linear_rank1_negative_ones": lambda: ( torch.ones(10) * (-1), 20, True, ), - ( - "model_linear_rank1_rand", + "model_linear_rank1_rand": lambda: ( torch.rand(10), 10, True, ), - ( - "model_linear_rank1_negative_large_rand", + "model_linear_rank1_negative_large_rand": lambda: ( torch.rand(10) * (-100), 30, False, ), - ( - "model_linear_rank1_large_randn", + "model_linear_rank1_large_randn": lambda: ( torch.randn(15) * 100, 20, True, ), -] +} -test_data_suite_rank4 = [ +test_data_suite_rank4 = { # (test_name, test_data, out_features, has_bias) - ( - "model_linear_rank4_zeros", + "model_linear_rank4_zeros": lambda: ( torch.zeros(5, 10, 25, 20), 30, True, ), - ( - "model_linear_rank4_ones", + "model_linear_rank4_ones": lambda: ( torch.ones(5, 10, 25, 20), 30, False, ), - ( - "model_linear_rank4_negative_ones", + "model_linear_rank4_negative_ones": lambda: ( torch.ones(5, 10, 25, 20) * (-1), 30, True, ), - ( - "model_linear_rank4_rand", + "model_linear_rank4_rand": lambda: ( torch.rand(5, 10, 25, 20), 30, False, ), - ( - "model_linear_rank4_negative_large_rand", + "model_linear_rank4_negative_large_rand": lambda: ( torch.rand(5, 10, 25, 20) * (-100), 30, True, ), - ( - "model_linear_rank4_large_randn", + "model_linear_rank4_large_randn": lambda: ( torch.randn(5, 10, 25, 20) * 100, 30, False, ), -] - - -class TestLinear(unittest.TestCase): - """tests the linear operation y = Ax + b""" - - class Linear(torch.nn.Module): - def __init__( - self, - in_features: int, - out_features: int = 3, - bias: bool = True, - ): - super().__init__() - self.fc = torch.nn.Linear( - in_features=in_features, - out_features=out_features, - bias=bias, - ) - - def forward(self, x): - return self.fc(x) +} - def _test_linear_tosa_MI_pipeline( - self, module: torch.nn.Module, test_data: Tuple[torch.Tensor] - ): - tester = ( - ArmTester( - module, - example_inputs=test_data, - compile_spec=common.get_tosa_compile_spec( - "TOSA-0.80+MI", - ), - ) - .export() - .check_count({"torch.ops.aten.linear.default": 1}) - .check_not(["torch.ops.quantized_decomposed"]) - .to_edge_transform_and_lower() - .check_count({"torch.ops.higher_order.executorch_call_delegate": 1}) - .to_executorch() - ) - if conftest.is_option_enabled("tosa_ref_model"): - tester.run_method_and_compare_outputs(inputs=test_data) - def _test_linear_tosa_BI_pipeline( - self, module: torch.nn.Module, test_data: Tuple[torch.Tensor] - ): - tester = ( - ArmTester( - module, - example_inputs=test_data, - compile_spec=common.get_tosa_compile_spec( - "TOSA-0.80+BI", - ), - ) - .quantize() - .export() - .check_count({"torch.ops.aten.linear.default": 1}) - .check(["torch.ops.quantized_decomposed"]) - .to_edge_transform_and_lower() - .check_count({"torch.ops.higher_order.executorch_call_delegate": 1}) - .to_executorch() - ) - if conftest.is_option_enabled("tosa_ref_model"): - tester.run_method_and_compare_outputs(inputs=test_data, qtol=1) - - def _test_linear_tosa_ethosu_BI_pipeline( +class Linear(torch.nn.Module): + def __init__( self, - module: torch.nn.Module, - compile_spec: CompileSpec, - test_data: Tuple[torch.Tensor], - ) -> ArmTester: - tester = ( - ArmTester( - module, - example_inputs=test_data, - compile_spec=compile_spec, - ) - .quantize() - .export() - .check_count({"torch.ops.aten.linear.default": 1}) - .check(["torch.ops.quantized_decomposed"]) - .to_edge_transform_and_lower() - .check_count({"torch.ops.higher_order.executorch_call_delegate": 1}) - .to_executorch() - .serialize() - ) - # TODO: Add FVP testing support. - return tester - - @parameterized.expand(test_data_suite_rank1 + test_data_suite_rank4) - @pytest.mark.tosa_ref_model - def test_linear_tosa_MI( - self, - test_name: str, - test_data: torch.Tensor, - out_features: int, - has_bias: bool, + in_features: int, + out_features: int = 3, + bias: bool = True, ): - in_features = test_data.shape[-1] - test_data = (test_data,) - self._test_linear_tosa_MI_pipeline( - self.Linear( - in_features=in_features, - out_features=out_features, - bias=has_bias, - ), - test_data, + super().__init__() + self.fc = torch.nn.Linear( + in_features=in_features, + out_features=out_features, + bias=bias, ) - @parameterized.expand(test_data_suite_rank1 + test_data_suite_rank4) - @pytest.mark.tosa_ref_model - def test_linear_tosa_BI( - self, - test_name: str, - test_data: torch.Tensor, - out_features: int, - has_bias: bool, - ): - in_features = test_data.shape[-1] - test_data = (test_data,) - self._test_linear_tosa_BI_pipeline( - self.Linear( - in_features=in_features, out_features=out_features, bias=has_bias - ), - test_data, - ) - - @parameterized.expand(test_data_suite_rank1) - @pytest.mark.corstone_fvp - def test_linear_tosa_u55_BI( - self, - test_name: str, - test_data: torch.Tensor, - out_features: int, - has_bias: bool, - ): - in_features = test_data.shape[-1] - test_data = (test_data,) - tester = self._test_linear_tosa_ethosu_BI_pipeline( - self.Linear( - in_features=in_features, - out_features=out_features, - bias=has_bias, - ), - common.get_u55_compile_spec(), - test_data, - ) - - if conftest.is_option_enabled("corstone_fvp"): - tester.run_method_and_compare_outputs(qtol=1, inputs=test_data) - - @parameterized.expand(test_data_suite_rank1 + test_data_suite_rank4) - @pytest.mark.corstone_fvp - def test_linear_tosa_u85_BI( - self, - test_name: str, - test_data: torch.Tensor, - out_features: int, - has_bias: bool, - ): - in_features = test_data.shape[-1] - test_data = (test_data,) - self._test_linear_tosa_ethosu_BI_pipeline( - self.Linear( - in_features=in_features, - out_features=out_features, - bias=has_bias, - ), - common.get_u85_compile_spec(), - test_data, - ) + def forward(self, x): + return self.fc(x) + + +@common.parametrize("test_data", test_data_suite_rank1 | test_data_suite_rank4) +def test_linear_tosa_MI(test_data: torch.Tensor): + test_data, out_features, has_bias = test_data() + in_features = test_data.shape[-1] + pipeline = TosaPipelineMI[input_t1]( + Linear( + in_features=in_features, + out_features=out_features, + bias=has_bias, + ), + (test_data,), + aten_op, + exir_op=[], + ) + pipeline.run() + + +@pytest.mark.flaky(reruns=5) # TODO: Investigate flakyness. +@common.parametrize("test_data", test_data_suite_rank1 | test_data_suite_rank4) +def test_linear_tosa_BI(test_data: torch.Tensor): + test_data, out_features, has_bias = test_data() + in_features = test_data.shape[-1] + pipeline = TosaPipelineBI[input_t1]( + Linear( + in_features=in_features, + out_features=out_features, + bias=has_bias, + ), + (test_data,), + aten_op, + exir_op=[], + use_to_edge_transform_and_lower=True, + ) + pipeline.run() + + +@common.parametrize("test_data", test_data_suite_rank1) +@common.XfailIfNoCorstone300 +def test_linear_u55_BI(test_data: torch.Tensor): + test_data, out_features, has_bias = test_data() + in_features = test_data.shape[-1] + EthosU55PipelineBI[input_t1]( + Linear( + in_features=in_features, + out_features=out_features, + bias=has_bias, + ), + (test_data,), + aten_op, + exir_ops=[], + run_on_fvp=True, + use_to_edge_transform_and_lower=True, + ).run() + + +x_fail = { + "model_linear_rank4_zeros": "AssertionError: Output 0 does not match reference output.", + "model_linear_rank4_ones": "AssertionError: Output 0 does not match reference output.", + "model_linear_rank4_negative_ones": "AssertionError: Output 0 does not match reference output.", + "model_linear_rank4_rand": "AssertionError: Output 0 does not match reference output.", + "model_linear_rank4_negative_large_rand": "AssertionError: Output 0 does not match reference output.", + "model_linear_rank4_large_randn": "AssertionError: Output 0 does not match reference output.", +} + + +@common.parametrize( + "test_data", + test_data_suite_rank1 | test_data_suite_rank4, + x_fail, +) +@common.XfailIfNoCorstone320 +def test_linear_u85_BI(test_data: torch.Tensor): + test_data, out_features, has_bias = test_data() + in_features = test_data.shape[-1] + EthosU85PipelineBI[input_t1]( + Linear( + in_features=in_features, + out_features=out_features, + bias=has_bias, + ), + (test_data,), + aten_op, + exir_ops=[], + run_on_fvp=True, + use_to_edge_transform_and_lower=True, + ).run() diff --git a/backends/arm/test/ops/test_log.py b/backends/arm/test/ops/test_log.py index 0226a62328b..0ca4510681d 100644 --- a/backends/arm/test/ops/test_log.py +++ b/backends/arm/test/ops/test_log.py @@ -1,127 +1,75 @@ # Copyright (c) Meta Platforms, Inc. and affiliates. -# Copyright 2024 Arm Limited and/or its affiliates. # All rights reserved. +# Copyright 2024-2025 Arm Limited and/or its affiliates. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. -import unittest from typing import Tuple -import pytest - import torch -from executorch.backends.arm.test import common, conftest -from executorch.backends.arm.test.tester.arm_tester import ArmTester -from executorch.exir.backend.backend_details import CompileSpec -from parameterized import parameterized +from executorch.backends.arm.test import common + +from executorch.backends.arm.test.tester.test_pipeline import ( + EthosU55PipelineBI, + EthosU85PipelineBI, + TosaPipelineBI, + TosaPipelineMI, +) + +aten_op = "torch.ops.aten.log.default" +exir_op = "executorch_exir_dialects_edge__ops_aten_log_default" + +input_t1 = Tuple[torch.Tensor] -test_data_suite = [ +test_data_suite = { # (test_name, test_data) - ("ones_rank4", torch.ones(1, 10, 10, 10)), - ("ones_rank3", torch.ones(10, 10, 10)), - ("rand", torch.rand(10, 10) + 0.001), - ("randn_pos", torch.randn(10) + 10), - ("randn_spread", torch.max(torch.Tensor([0.0]), torch.randn(10) * 100)), - ("ramp", torch.arange(0.01, 20, 0.2)), -] - - -class TestLog(unittest.TestCase): - """Tests lowering of aten.log""" - - class Log(torch.nn.Module): - def forward(self, x: torch.Tensor) -> torch.Tensor: - return torch.log(x) - - def _test_log_tosa_MI_pipeline( - self, module: torch.nn.Module, test_data: Tuple[torch.tensor] - ): - ( - ArmTester( - module, - example_inputs=test_data, - compile_spec=common.get_tosa_compile_spec("TOSA-0.80+MI"), - ) - .export() - .check(["torch.ops.aten.log.default"]) - .check_not(["torch.ops.quantized_decomposed"]) - .to_edge() - .partition() - .check_not(["executorch_exir_dialects_edge__ops_aten_log_default"]) - .check_count({"torch.ops.higher_order.executorch_call_delegate": 1}) - .to_executorch() - .run_method_and_compare_outputs(inputs=test_data) - ) - - def _test_log_tosa_BI_pipeline(self, module: torch.nn.Module, test_data: Tuple): - ( - ArmTester( - module, - example_inputs=test_data, - compile_spec=common.get_tosa_compile_spec("TOSA-0.80+BI"), - ) - .quantize() - .export() - .check(["torch.ops.aten.log.default"]) - .check(["torch.ops.quantized_decomposed"]) - .to_edge() - .partition() - .check_not(["executorch_exir_dialects_edge__ops_aten_log_default"]) - .check_count({"torch.ops.higher_order.executorch_call_delegate": 1}) - .to_executorch() - .run_method_and_compare_outputs(inputs=test_data) - ) - - def _test_log_ethosu_BI_pipeline( - self, - compile_spec: CompileSpec, - module: torch.nn.Module, - test_data: Tuple[torch.tensor], - ): - tester = ( - ArmTester( - module, - example_inputs=test_data, - compile_spec=compile_spec, - ) - .quantize() - .export() - .check_count({"torch.ops.aten.log.default": 1}) - .check(["torch.ops.quantized_decomposed"]) - .to_edge() - .partition() - .check_not(["executorch_exir_dialects_edge__ops_aten_log_default"]) - .check_count({"torch.ops.higher_order.executorch_call_delegate": 1}) - .to_executorch() - .serialize() - ) - if conftest.is_option_enabled("corstone_fvp"): - tester.run_method_and_compare_outputs(qtol=1, inputs=test_data) - - @parameterized.expand(test_data_suite) - def test_log_tosa_MI( - self, - test_name: str, - test_data: torch.Tensor, - ): - self._test_log_tosa_MI_pipeline(self.Log(), (test_data,)) - - @parameterized.expand(test_data_suite) - def test_log_tosa_BI(self, test_name: str, test_data: torch.Tensor): - self._test_log_tosa_BI_pipeline(self.Log(), (test_data,)) - - @parameterized.expand(test_data_suite) - @pytest.mark.corstone_fvp - def test_log_tosa_u55_BI(self, test_name: str, test_data: torch.Tensor): - self._test_log_ethosu_BI_pipeline( - common.get_u55_compile_spec(), self.Log(), (test_data,) - ) - - @parameterized.expand(test_data_suite) - @pytest.mark.corstone_fvp - def test_log_tosa_u85_BI(self, test_name: str, test_data: torch.Tensor): - self._test_log_ethosu_BI_pipeline( - common.get_u85_compile_spec(), self.Log(), (test_data,) - ) + "ones_rank4": lambda: (torch.ones(1, 10, 10, 10)), + "ones_rank3": lambda: (torch.ones(10, 10, 10)), + "rand": lambda: (torch.rand(10, 10) + 0.001), + "randn_pos": lambda: (torch.randn(10) + 10), + "randn_spread": lambda: (torch.max(torch.Tensor([0.0]), torch.randn(10) * 100)), + "ramp": lambda: (torch.arange(0.01, 20, 0.2)), +} + + +class Log(torch.nn.Module): + def forward(self, x: torch.Tensor) -> torch.Tensor: + return torch.log(x) + + +@common.parametrize("test_data", test_data_suite) +def test_log_tosa_MI(test_data: input_t1): + pipeline = TosaPipelineMI[input_t1](Log(), (test_data(),), aten_op, exir_op) + pipeline.run() + + +@common.parametrize("test_data", test_data_suite) +def test_log_tosa_BI(test_data: input_t1): + pipeline = TosaPipelineBI[input_t1](Log(), (test_data(),), aten_op, exir_op) + pipeline.run() + + +@common.parametrize("test_data", test_data_suite) +@common.XfailIfNoCorstone300 +def test_log_u55_BI(test_data: input_t1): + EthosU55PipelineBI[input_t1]( + Log(), + (test_data(),), + aten_op, + exir_op, + run_on_fvp=True, + ).run() + + +@common.parametrize("test_data", test_data_suite) +@common.XfailIfNoCorstone320 +def test_log_u85_BI(test_data: input_t1): + EthosU85PipelineBI[input_t1]( + Log(), + (test_data(),), + aten_op, + exir_op, + run_on_fvp=True, + ).run() diff --git a/backends/arm/test/ops/test_logical.py b/backends/arm/test/ops/test_logical.py index a4b66339b0c..139653eea97 100644 --- a/backends/arm/test/ops/test_logical.py +++ b/backends/arm/test/ops/test_logical.py @@ -23,19 +23,19 @@ class LogicalBinary(torch.nn.Module): test_data: dict[input_t2] = { - "rank1": ( + "rank1": lambda: ( torch.tensor([True, True, False, False], dtype=torch.bool), torch.tensor([True, False, True, False], dtype=torch.bool), ), - "rand_rank2": ( + "rand_rank2": lambda: ( torch.randint(0, 2, (10, 10), dtype=torch.bool), torch.randint(0, 2, (10, 10), dtype=torch.bool), ), - "rand_rank3": ( + "rand_rank3": lambda: ( torch.randint(0, 2, (10, 10, 10), dtype=torch.bool), torch.randint(0, 2, (10, 10, 10), dtype=torch.bool), ), - "rand_rank4": ( + "rand_rank4": lambda: ( torch.randint(0, 2, (1, 10, 10, 10), dtype=torch.bool), torch.randint(0, 2, (1, 10, 10, 10), dtype=torch.bool), ), @@ -68,10 +68,10 @@ def forward(self, tensor1: torch.Tensor, tensor2: torch.Tensor): class Not(torch.nn.Module): test_data: dict[input_t1] = { - "rank1": (torch.tensor([True, True, False, False], dtype=torch.bool),), - "rand_rank2": (torch.randint(0, 2, (10, 10), dtype=torch.bool),), - "rand_rank3": (torch.randint(0, 2, (10, 10, 10), dtype=torch.bool),), - "rand_rank4": (torch.randint(0, 2, (1, 10, 10, 10), dtype=torch.bool),), + "rank1": lambda: (torch.tensor([True, True, False, False], dtype=torch.bool),), + "rand_rank2": lambda: (torch.randint(0, 2, (10, 10), dtype=torch.bool),), + "rand_rank3": lambda: (torch.randint(0, 2, (10, 10, 10), dtype=torch.bool),), + "rand_rank4": lambda: (torch.randint(0, 2, (1, 10, 10, 10), dtype=torch.bool),), } aten_op = "torch.ops.aten.logical_not.default" @@ -83,23 +83,31 @@ def forward(self, tensor: torch.Tensor): @common.parametrize("test_data", And().test_data) def test_logical_and_tosa_MI(test_data: input_t2): - pipeline = TosaPipelineMI[input_t2](And(), test_data, And().aten_op, And().exir_op) + pipeline = TosaPipelineMI[input_t2]( + And(), test_data(), And().aten_op, And().exir_op + ) pipeline.run() @common.parametrize("test_data", And().test_data) def test_logical_and_tosa_BI(test_data: input_t2): - pipeline = TosaPipelineBI[input_t2](And(), test_data, And().aten_op, And().exir_op) + pipeline = TosaPipelineBI[input_t2]( + And(), test_data(), And().aten_op, And().exir_op + ) pipeline.pop_stage("quantize") pipeline.pop_stage("check.quant_nodes") pipeline.run() @common.parametrize("test_data", And().test_data) -def test_logical_and_u55_BI(test_data: input_t2): +def test_logical_and_u55_BI_not_delegated(test_data: input_t2): # Tests that we don't delegate these ops since they are not supported on U55. pipeline = OpNotSupportedPipeline[input_t2]( - And(), test_data, "TOSA-0.80+BI+u55", {And().exir_op: 1} + And(), + test_data(), + {And().exir_op: 1}, + quantize=True, + u55_subset=True, ) pipeline.run() @@ -109,7 +117,7 @@ def test_logical_and_u55_BI(test_data: input_t2): @common.XfailIfNoCorstone320 def test_logical_and_u85_BI(test_data: input_t2): pipeline = EthosU85PipelineBI[input_t2]( - And(), test_data, And().aten_op, And().exir_op, run_on_fvp=True + And(), test_data(), And().aten_op, And().exir_op, run_on_fvp=True ) pipeline.pop_stage("quantize") pipeline.pop_stage("check.quant_nodes") @@ -118,23 +126,31 @@ def test_logical_and_u85_BI(test_data: input_t2): @common.parametrize("test_data", Xor().test_data) def test_logical_xor_tosa_MI(test_data: input_t2): - pipeline = TosaPipelineMI[input_t2](Xor(), test_data, Xor().aten_op, Xor().exir_op) + pipeline = TosaPipelineMI[input_t2]( + Xor(), test_data(), Xor().aten_op, Xor().exir_op + ) pipeline.run() @common.parametrize("test_data", Xor().test_data) def test_logical_xor_tosa_BI(test_data: input_t2): - pipeline = TosaPipelineBI[input_t2](Xor(), test_data, Xor().aten_op, Xor().exir_op) + pipeline = TosaPipelineBI[input_t2]( + Xor(), test_data(), Xor().aten_op, Xor().exir_op + ) pipeline.pop_stage("quantize") pipeline.pop_stage("check.quant_nodes") pipeline.run() @common.parametrize("test_data", Xor().test_data) -def test_logical_xor_u55_BI(test_data: input_t2): +def test_logical_xor_u55_BI_not_delegated(test_data: input_t2): # Tests that we don't delegate these ops since they are not supported on U55. pipeline = OpNotSupportedPipeline[input_t2]( - Xor(), test_data, "TOSA-0.80+BI+u55", {Xor().exir_op: 1} + Xor(), + test_data(), + {Xor().exir_op: 1}, + quantize=True, + u55_subset=True, ) pipeline.run() @@ -144,7 +160,7 @@ def test_logical_xor_u55_BI(test_data: input_t2): @common.XfailIfNoCorstone320 def test_logical_xor_u85_BI(test_data: input_t2): pipeline = EthosU85PipelineBI[input_t2]( - Xor(), test_data, Xor().aten_op, Xor().exir_op, run_on_fvp=True + Xor(), test_data(), Xor().aten_op, Xor().exir_op, run_on_fvp=True ) pipeline.pop_stage("quantize") pipeline.pop_stage("check.quant_nodes") @@ -153,33 +169,37 @@ def test_logical_xor_u85_BI(test_data: input_t2): @common.parametrize("test_data", Or().test_data) def test_logical_or_tosa_MI(test_data: input_t2): - pipeline = TosaPipelineMI[input_t2](Or(), test_data, Or().aten_op, Or().exir_op) + pipeline = TosaPipelineMI[input_t2](Or(), test_data(), Or().aten_op, Or().exir_op) pipeline.run() @common.parametrize("test_data", Or().test_data) def test_logical_or_tosa_BI(test_data: input_t2): - pipeline = TosaPipelineBI[input_t2](Or(), test_data, Or().aten_op, Or().exir_op) + pipeline = TosaPipelineBI[input_t2](Or(), test_data(), Or().aten_op, Or().exir_op) pipeline.pop_stage("quantize") pipeline.pop_stage("check.quant_nodes") pipeline.run() @common.parametrize("test_data", Or().test_data) -def test_logical_or_u55_BI(test_data: input_t2): +def test_logical_or_u55_BI_not_delegated(test_data: input_t2): # Tests that we don't delegate these ops since they are not supported on U55. pipeline = OpNotSupportedPipeline[input_t2]( - Or(), test_data, "TOSA-0.80+BI+u55", {Or().exir_op: 1} + Or(), + test_data(), + {Or().exir_op: 1}, + quantize=True, + u55_subset=True, ) pipeline.run() @common.parametrize("test_data", Or().test_data) @pytest.mark.xfail(reason="MLETORCH-706: Support ScalarType::Bool in EthosUBackend.") -@common.XfailIfNoCorstone320 # TODO: Refactor to use XfailIfNoCorstone320 once MLETORCH-706 is done +@common.XfailIfNoCorstone320 def test_logical_or_u85_BI(test_data: input_t2): pipeline = EthosU85PipelineBI[input_t2]( - Or(), test_data, Or().aten_op, Or().exir_op, run_on_fvp=True + Or(), test_data(), Or().aten_op, Or().exir_op, run_on_fvp=True ) pipeline.pop_stage("quantize") pipeline.pop_stage("check.quant_nodes") @@ -188,23 +208,31 @@ def test_logical_or_u85_BI(test_data: input_t2): @common.parametrize("test_data", Not().test_data) def test_logical_not_tosa_MI(test_data: input_t2): - pipeline = TosaPipelineMI[input_t2](Not(), test_data, Not().aten_op, Not().exir_op) + pipeline = TosaPipelineMI[input_t2]( + Not(), test_data(), Not().aten_op, Not().exir_op + ) pipeline.run() @common.parametrize("test_data", Not().test_data) def test_logical_not_tosa_BI(test_data: input_t2): - pipeline = TosaPipelineBI[input_t2](Not(), test_data, Not().aten_op, Not().exir_op) + pipeline = TosaPipelineBI[input_t2]( + Not(), test_data(), Not().aten_op, Not().exir_op + ) pipeline.pop_stage("quantize") pipeline.pop_stage("check.quant_nodes") pipeline.run() @common.parametrize("test_data", Not().test_data) -def test_logical_not_u55_BI(test_data: input_t2): +def test_logical_not_u55_BI_not_delegated(test_data: input_t2): # Tests that we don't delegate these ops since they are not supported on U55. pipeline = OpNotSupportedPipeline[input_t2]( - Not(), test_data, "TOSA-0.80+BI+u55", {Not().exir_op: 1} + Not(), + test_data(), + {Not().exir_op: 1}, + quantize=True, + u55_subset=True, ) pipeline.run() @@ -214,7 +242,7 @@ def test_logical_not_u55_BI(test_data: input_t2): @common.XfailIfNoCorstone320 def test_logical_not_u85_BI(test_data: input_t2): pipeline = EthosU85PipelineBI[input_t2]( - Not(), test_data, Not().aten_op, Not().exir_op, run_on_fvp=True + Not(), test_data(), Not().aten_op, Not().exir_op, run_on_fvp=True ) pipeline.pop_stage("quantize") pipeline.pop_stage("check.quant_nodes") diff --git a/backends/arm/test/ops/test_logsoftmax.py b/backends/arm/test/ops/test_logsoftmax.py index 7068ee77e01..50132ba8211 100644 --- a/backends/arm/test/ops/test_logsoftmax.py +++ b/backends/arm/test/ops/test_logsoftmax.py @@ -5,6 +5,8 @@ from typing import Tuple +import pytest + import torch from executorch.backends.arm.test import common from executorch.backends.arm.test.tester.test_pipeline import ( @@ -29,20 +31,20 @@ def forward(self, x): return self.log_softmax(x) test_data = { - "ones": ((torch.ones(10, 10),), 1), - "ones_neg_dim": ((torch.ones(1, 3, 4),), -1), - "randn_neg_dim": ((torch.randn(1, 5, 8, 7),), -3), - "zeros": ((torch.zeros(1, 8, 5, 2),), 0), - "zeros_neg_dim": ((torch.zeros(1, 7, 8, 9),), -4), - "rand": ((torch.rand(1, 2, 5, 8),), 2), - "rand_neg_dim": ((torch.rand(1, 10, 8, 10),), -2), - "randn_mult_batches": ((torch.randn(2, 10, 10, 10),), 3), + "ones": lambda: ((torch.ones(10, 10),), 1), + "ones_neg_dim": lambda: ((torch.ones(1, 3, 4),), -1), + "randn_neg_dim": lambda: ((torch.randn(1, 5, 8, 7),), -3), + "zeros": lambda: ((torch.zeros(1, 8, 5, 2),), 0), + "zeros_neg_dim": lambda: ((torch.zeros(1, 7, 8, 9),), -4), + "rand": lambda: ((torch.rand(1, 2, 5, 8),), 2), + "rand_neg_dim": lambda: ((torch.rand(1, 10, 8, 10),), -2), + "randn_mult_batches": lambda: ((torch.randn(2, 10, 10, 10),), 3), } @common.parametrize("test_data", LogSoftmax.test_data) def test_log_softmax_tosa_MI(test_data): - data, dim = test_data + data, dim = test_data() pipeline = TosaPipelineMI[input_t1](LogSoftmax(dim), data, []) pipeline.add_stage_after( "to_edge_transform_and_lower", pipeline.tester.check_not, [exir_op] @@ -51,9 +53,10 @@ def test_log_softmax_tosa_MI(test_data): pipeline.run() +@pytest.mark.flaky(reruns=5) @common.parametrize("test_data", LogSoftmax.test_data) def test_log_softmax_tosa_BI(test_data): - data, dim = test_data + data, dim = test_data() pipeline = TosaPipelineBI[input_t1](LogSoftmax(dim), data, []) pipeline.add_stage_after("quantize", pipeline.tester.check_not, [aten_op]) pipeline.change_args("run_method_and_compare_outputs", qtol=1) @@ -69,8 +72,13 @@ def test_log_softmax_tosa_BI(test_data): ) @common.XfailIfNoCorstone300() def test_log_softmax_u55_BI(test_data): - data, dim = test_data - pipeline = EthosU55PipelineBI[input_t1](LogSoftmax(dim), data, [], run_on_fvp=True) + data, dim = test_data() + pipeline = EthosU55PipelineBI[input_t1]( + LogSoftmax(dim), + data, + [], + run_on_fvp=True, + ) pipeline.add_stage_after("quantize", pipeline.tester.check_not, [aten_op]) pipeline.change_args("run_method_and_compare_outputs", qtol=1) pipeline.run() @@ -85,8 +93,13 @@ def test_log_softmax_u55_BI(test_data): ) @common.XfailIfNoCorstone320 def test_log_softmax_u85_BI(test_data): - data, dim = test_data - pipeline = EthosU85PipelineBI[input_t1](LogSoftmax(dim), data, [], run_on_fvp=True) + data, dim = test_data() + pipeline = EthosU85PipelineBI[input_t1]( + LogSoftmax(dim), + data, + [], + run_on_fvp=True, + ) pipeline.add_stage_after("quantize", pipeline.tester.check_not, [aten_op]) pipeline.change_args("run_method_and_compare_outputs", qtol=1) pipeline.run() diff --git a/backends/arm/test/ops/test_lshift.py b/backends/arm/test/ops/test_lshift.py index f6ddabf6612..e74e80deeed 100644 --- a/backends/arm/test/ops/test_lshift.py +++ b/backends/arm/test/ops/test_lshift.py @@ -4,6 +4,7 @@ # LICENSE file in the root directory of this source tree. import torch +from executorch.backends.arm.test import common from executorch.backends.arm.test.common import ( XfailIfNoCorstone300, XfailIfNoCorstone320, @@ -14,7 +15,6 @@ TosaPipelineBI, TosaPipelineMI, ) -from parameterized import parameterized scalar_input_t = tuple[torch.Tensor, int] @@ -23,11 +23,20 @@ class LshiftScalar(torch.nn.Module): torch_op_MI = "torch.ops.aten.__lshift__.Scalar" torch_op_BI = "torch.ops.aten.bitwise_left_shift.Tensor" exir_op = "executorch_exir_dialects_edge__ops_aten_bitwise_left_shift_Tensor" - test_data = [ - ((torch.randint(-8, 8, (1, 12, 3, 4), dtype=torch.int8), 1),), - ((torch.randint(-100, 100, (1, 5, 3, 4), dtype=torch.int16), 5),), - ((torch.randint(-100, 100, (1, 5, 3, 4), dtype=torch.int32), 2),), - ] + test_data = { + "randint_neg_8_int8": ( + torch.randint(-8, 8, (1, 12, 3, 4), dtype=torch.int8), + 1, + ), + "randint_neg_100_int16": ( + torch.randint(-100, 100, (1, 5, 3, 4), dtype=torch.int16), + 5, + ), + "randint_neg_100_int32": ( + torch.randint(-100, 100, (1, 5, 3, 4), dtype=torch.int32), + 2, + ), + } def forward(self, x: torch.Tensor, shift: int): return x << shift @@ -39,33 +48,27 @@ def forward(self, x: torch.Tensor, shift: int): class LshiftTensor(torch.nn.Module): torch_op = "torch.ops.aten.bitwise_left_shift.Tensor" exir_op = "executorch_exir_dialects_edge__ops_aten_bitwise_left_shift_Tensor" - test_data = [ - ( - ( - torch.randint(-8, 8, (3, 3), dtype=torch.int8), - torch.randint(0, 4, (3, 3), dtype=torch.int8), - ), + test_data = { + "randint_neg_8_tensor_int8": ( + torch.randint(-8, 8, (3, 3), dtype=torch.int8), + torch.randint(0, 4, (3, 3), dtype=torch.int8), ), - ( - ( - torch.randint(-1024, 1024, (3, 3, 3), dtype=torch.int16), - torch.randint(0, 5, (3, 3, 3), dtype=torch.int16), - ), + "randint_neg_1024_tensor_int16": ( + torch.randint(-1024, 1024, (3, 3, 3), dtype=torch.int16), + torch.randint(0, 5, (3, 3, 3), dtype=torch.int16), ), - ( - ( - torch.randint(0, 127, (1, 2, 3, 3), dtype=torch.int32), - torch.randint(0, 5, (1, 2, 3, 3), dtype=torch.int32), - ), + "randint_0_tensor_int16": ( + torch.randint(0, 127, (1, 2, 3, 3), dtype=torch.int32), + torch.randint(0, 5, (1, 2, 3, 3), dtype=torch.int32), ), - ] + } def forward(self, x: torch.Tensor, shift: torch.Tensor): return x.bitwise_left_shift(shift) -@parameterized.expand(LshiftScalar.test_data) -def test_lshift_scalar_tosa_MI(test_data): +@common.parametrize("test_data", LshiftScalar.test_data) +def test_lshift_scalar_tosa_MI_scalar(test_data): TosaPipelineMI[scalar_input_t]( LshiftScalar(), test_data, @@ -74,18 +77,21 @@ def test_lshift_scalar_tosa_MI(test_data): ).run() -@parameterized.expand(LshiftScalar.test_data) -def test_lshift_scalar_tosa_BI(test_data): +@common.parametrize("test_data", LshiftScalar.test_data) +def test_bitwise_left_shift_tensor_tosa_BI_scalar(test_data): pipeline = TosaPipelineBI[scalar_input_t]( - LshiftScalar(), test_data, LshiftScalar.torch_op_BI, LshiftScalar.exir_op + LshiftScalar(), + test_data, + LshiftScalar.torch_op_BI, + LshiftScalar.exir_op, ) pipeline.pop_stage("check.quant_nodes") pipeline.run() -@parameterized.expand(LshiftScalar.test_data) +@common.parametrize("test_data", LshiftScalar.test_data) @XfailIfNoCorstone300 -def test_lshift_scalar_tosa_u55(test_data): +def test_bitwise_left_shift_tensor_u55_BI_scalar(test_data): pipeline = EthosU55PipelineBI[scalar_input_t]( LshiftScalar(), test_data, @@ -97,9 +103,9 @@ def test_lshift_scalar_tosa_u55(test_data): pipeline.run() -@parameterized.expand(LshiftScalar.test_data) +@common.parametrize("test_data", LshiftScalar.test_data) @XfailIfNoCorstone320 -def test_lshift_scalar_tosa_u85(test_data): +def test_bitwise_left_shift_tensor_u85_BI_scalar(test_data): pipeline = EthosU85PipelineBI[scalar_input_t]( LshiftScalar(), test_data, @@ -111,8 +117,8 @@ def test_lshift_scalar_tosa_u85(test_data): pipeline.run() -@parameterized.expand(LshiftTensor.test_data) -def test_lshift_tensor_tosa_MI(test_data): +@common.parametrize("test_data", LshiftTensor.test_data) +def test_lshift_scalar_tosa_MI(test_data): TosaPipelineMI[scalar_input_t]( LshiftTensor(), test_data, @@ -121,18 +127,21 @@ def test_lshift_tensor_tosa_MI(test_data): ).run() -@parameterized.expand(LshiftTensor.test_data) -def test_lshift_tensor_tosa_BI(test_data): +@common.parametrize("test_data", LshiftTensor.test_data) +def test_bitwise_left_shift_tensor_tosa_BI(test_data): pipeline = TosaPipelineBI[scalar_input_t]( - LshiftTensor(), test_data, LshiftTensor.torch_op, LshiftTensor.exir_op + LshiftTensor(), + test_data, + LshiftTensor.torch_op, + LshiftTensor.exir_op, ) pipeline.pop_stage("check.quant_nodes") pipeline.run() -@parameterized.expand(LshiftTensor.test_data) +@common.parametrize("test_data", LshiftTensor.test_data) @XfailIfNoCorstone300 -def test_lshift_tensor_tosa_u55(test_data): +def test_bitwise_left_shift_tensor_u55_BI(test_data): pipeline = EthosU55PipelineBI[scalar_input_t]( LshiftTensor(), test_data, @@ -144,9 +153,9 @@ def test_lshift_tensor_tosa_u55(test_data): pipeline.run() -@parameterized.expand(LshiftTensor.test_data) +@common.parametrize("test_data", LshiftTensor.test_data) @XfailIfNoCorstone320 -def test_lshift_tensor_tosa_u85(test_data): +def test_bitwise_left_shift_tensor_u85_BI(test_data): pipeline = EthosU85PipelineBI[scalar_input_t]( LshiftTensor(), test_data, diff --git a/backends/arm/test/ops/test_lt.py b/backends/arm/test/ops/test_lt.py index f5664b7895d..92298ca70fa 100644 --- a/backends/arm/test/ops/test_lt.py +++ b/backends/arm/test/ops/test_lt.py @@ -63,24 +63,27 @@ def get_inputs(self): op_lt_scalar_rank4_randn = LessThan(torch.randn(3, 2, 2, 2), 0.3) test_data_tensor = { - "lt_tensor_rank1_ones": op_lt_tensor_rank1_ones, - "lt_tensor_rank2_rand": op_lt_tensor_rank2_rand, - "lt_tensor_rank3_randn": op_lt_tensor_rank3_randn, - "lt_tensor_rank4_randn": op_lt_tensor_rank4_randn, + "lt_tensor_rank1_ones": lambda: op_lt_tensor_rank1_ones, + "lt_tensor_rank2_rand": lambda: op_lt_tensor_rank2_rand, + "lt_tensor_rank3_randn": lambda: op_lt_tensor_rank3_randn, + "lt_tensor_rank4_randn": lambda: op_lt_tensor_rank4_randn, } test_data_scalar = { - "lt_scalar_rank1_ones": op_lt_scalar_rank1_ones, - "lt_scalar_rank2_rand": op_lt_scalar_rank2_rand, - "lt_scalar_rank3_randn": op_lt_scalar_rank3_randn, - "lt_scalar_rank4_randn": op_lt_scalar_rank4_randn, + "lt_scalar_rank1_ones": lambda: op_lt_scalar_rank1_ones, + "lt_scalar_rank2_rand": lambda: op_lt_scalar_rank2_rand, + "lt_scalar_rank3_randn": lambda: op_lt_scalar_rank3_randn, + "lt_scalar_rank4_randn": lambda: op_lt_scalar_rank4_randn, } @common.parametrize("test_module", test_data_tensor) def test_lt_tensor_tosa_MI(test_module): pipeline = TosaPipelineMI[input_t]( - test_module, test_module.get_inputs(), LessThan.aten_op_tensor, LessThan.exir_op + test_module(), + test_module().get_inputs(), + LessThan.aten_op_tensor, + LessThan.exir_op, ) pipeline.run() @@ -88,7 +91,10 @@ def test_lt_tensor_tosa_MI(test_module): @common.parametrize("test_module", test_data_scalar) def test_lt_scalar_tosa_MI(test_module): pipeline = TosaPipelineMI[input_t]( - test_module, test_module.get_inputs(), LessThan.aten_op_scalar, LessThan.exir_op + test_module(), + test_module().get_inputs(), + LessThan.aten_op_scalar, + LessThan.exir_op, ) pipeline.run() @@ -96,7 +102,10 @@ def test_lt_scalar_tosa_MI(test_module): @common.parametrize("test_module", test_data_tensor) def test_lt_tensor_tosa_BI(test_module): pipeline = TosaPipelineBI[input_t]( - test_module, test_module.get_inputs(), LessThan.aten_op_tensor, LessThan.exir_op + test_module(), + test_module().get_inputs(), + LessThan.aten_op_tensor, + LessThan.exir_op, ) pipeline.run() @@ -104,34 +113,39 @@ def test_lt_tensor_tosa_BI(test_module): @common.parametrize("test_module", test_data_scalar) def test_lt_scalar_tosa_BI(test_module): pipeline = TosaPipelineBI[input_t]( - test_module, test_module.get_inputs(), LessThan.aten_op_tensor, LessThan.exir_op + test_module(), + test_module().get_inputs(), + LessThan.aten_op_tensor, + LessThan.exir_op, ) pipeline.run() @common.parametrize("test_module", test_data_tensor) @common.XfailIfNoCorstone300 -def test_lt_tensor_u55_BI(test_module): +def test_lt_tensor_u55_BI_not_delegated(test_module): # LessThan is not supported on U55. pipeline = OpNotSupportedPipeline[input_t]( - test_module, - test_module.get_inputs(), - "TOSA-0.80+BI+u55", + test_module(), + test_module().get_inputs(), {LessThan.exir_op: 1}, + quantize=True, + u55_subset=True, ) pipeline.run() @common.parametrize("test_module", test_data_scalar) @common.XfailIfNoCorstone300 -def test_lt_scalar_u55_BI(test_module): +def test_lt_scalar_u55_BI_not_delegated(test_module): # LessThan is not supported on U55. pipeline = OpNotSupportedPipeline[input_t]( - test_module, - test_module.get_inputs(), - "TOSA-0.80+BI+u55", + test_module(), + test_module().get_inputs(), {LessThan.exir_op: 1}, n_expected_delegates=1, + quantize=True, + u55_subset=True, ) pipeline.run() @@ -146,8 +160,8 @@ def test_lt_scalar_u55_BI(test_module): @common.XfailIfNoCorstone320 def test_lt_tensor_u85_BI(test_module): pipeline = EthosU85PipelineBI[input_t]( - test_module, - test_module.get_inputs(), + test_module(), + test_module().get_inputs(), LessThan.aten_op_tensor, LessThan.exir_op, run_on_fvp=True, @@ -165,8 +179,8 @@ def test_lt_tensor_u85_BI(test_module): @common.XfailIfNoCorstone320 def test_lt_scalar_u85_BI(test_module): pipeline = EthosU85PipelineBI[input_t]( - test_module, - test_module.get_inputs(), + test_module(), + test_module().get_inputs(), LessThan.aten_op_tensor, LessThan.exir_op, run_on_fvp=True, diff --git a/backends/arm/test/ops/test_max_pool.py b/backends/arm/test/ops/test_max_pool.py index 4db8c62bd88..a1fd3ea30ec 100644 --- a/backends/arm/test/ops/test_max_pool.py +++ b/backends/arm/test/ops/test_max_pool.py @@ -5,280 +5,183 @@ # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. -import unittest from typing import Tuple -import pytest - import torch -from executorch.backends.arm.quantizer import ( - EthosUQuantizer, - get_symmetric_quantization_config, - TOSAQuantizer, -) -from executorch.backends.arm.test import common, conftest -from executorch.backends.arm.test.tester.arm_tester import ArmTester -from executorch.backends.arm.tosa_specification import TosaSpecification - -from executorch.backends.xnnpack.test.tester.tester import Quantize -from executorch.exir.backend.backend_details import CompileSpec -from parameterized import parameterized - - -test_data_suite = [ - # (test_name, test_data, [kernel_size, stride, padding]) - ("zeros", torch.zeros(1, 1, 4, 8), [2, 2, 1]), - ("ones", torch.ones(1, 16, 50, 32), [4, 2, 0]), - ("rand", torch.rand(1, 16, 52, 16), [4, 3, 0]), - ("non_divisible", torch.rand(1, 16, 112, 112), [3, 2, 1]), -] - -test_data_suite_mult_batches = [ - ("randn", torch.randn(5, 16, 50, 32), [4, 2, 0]), -] - - -class TestMaxPool2d(unittest.TestCase): - """Tests MaxPool2d.""" - - class MaxPool2d(torch.nn.Module): - def __init__( - self, - kernel_size: int | Tuple[int, int], - stride: int | Tuple[int, int], - padding: int | Tuple[int, int], - ): - super().__init__() - self.max_pool_2d = torch.nn.MaxPool2d( - kernel_size=kernel_size, stride=stride, padding=padding - ) - - def forward(self, x): - return self.max_pool_2d(x) - - def _test_maxpool2d_tosa_MI_pipeline( - self, module: torch.nn.Module, test_data: Tuple[torch.tensor] - ): - ( - ArmTester( - module, - example_inputs=test_data, - compile_spec=common.get_tosa_compile_spec( - "TOSA-0.80+MI", - ), - ) - .export() - .check(["torch.ops.aten.max_pool2d.default"]) - .check_not(["torch.ops.quantized_decomposed"]) - .to_edge() - .partition() - .check_not(["executorch_exir_dialects_edge__ops_aten_max_pool2d_default"]) - .check_not( - [ - "executorch_exir_dialects_edge__ops_aten_max_pool2d_with_indices_default" - ] - ) - .check_count({"torch.ops.higher_order.executorch_call_delegate": 1}) - .to_executorch() - ) - - def _test_maxpool2d_tosa_BI_pipeline( - self, module: torch.nn.Module, test_data: Tuple[torch.tensor] - ): - tosa_spec = TosaSpecification.create_from_string("TOSA-0.80+BI") - compile_spec = common.get_tosa_compile_spec(tosa_spec) - quantizer = TOSAQuantizer(tosa_spec).set_io(get_symmetric_quantization_config()) - ( - ArmTester(module, example_inputs=test_data, compile_spec=compile_spec) - .quantize(Quantize(quantizer, get_symmetric_quantization_config())) - .export() - .check_count({"torch.ops.aten.max_pool2d.default": 1}) - .check(["torch.ops.quantized_decomposed"]) - .to_edge() - .partition() - .check_not(["executorch_exir_dialects_edge__ops_aten_max_pool2d_default"]) - .check_not( - [ - "executorch_exir_dialects_edge__ops_aten_max_pool2d_with_indices_default" - ] - ) - .check_count({"torch.ops.higher_order.executorch_call_delegate": 1}) - .to_executorch() - .run_method_and_compare_outputs(inputs=test_data, qtol=1) - ) - def _test_maxpool2d_tosa_ethos_BI_pipeline( - self, - module: torch.nn.Module, - compile_spec: CompileSpec, - test_data: Tuple[torch.tensor], - ): - quantizer = EthosUQuantizer(compile_spec).set_io( - get_symmetric_quantization_config() - ) - tester = ( - ArmTester( - module, - example_inputs=test_data, - compile_spec=compile_spec, - ) - .quantize(Quantize(quantizer, get_symmetric_quantization_config())) - .export() - .check_count({"torch.ops.aten.max_pool2d.default": 1}) - .check(["torch.ops.quantized_decomposed"]) - .to_edge() - .partition() - .check_not(["executorch_exir_dialects_edge__ops_aten_max_pool2d_default"]) - .check_count({"torch.ops.higher_order.executorch_call_delegate": 1}) - .to_executorch() - .serialize() - ) - - return tester +from executorch.backends.arm.test import common - @parameterized.expand(test_data_suite) - def test_maxpool2d_tosa_MI( - self, - test_name: str, - test_data: torch.Tensor, - model_params: int | Tuple[int, int], - ): - self._test_maxpool2d_tosa_MI_pipeline( - self.MaxPool2d(*model_params), (test_data,) - ) - - @parameterized.expand(test_data_suite) - def test_maxpool2d_tosa_BI( - self, - test_name: str, - test_data: torch.Tensor, - model_params: int | Tuple[int, int], - ): - self._test_maxpool2d_tosa_BI_pipeline( - self.MaxPool2d(*model_params), (test_data,) - ) +from executorch.backends.arm.test.tester.test_pipeline import ( + EthosU55PipelineBI, + EthosU85PipelineBI, + TosaPipelineBI, + TosaPipelineMI, +) - @parameterized.expand(test_data_suite) - @pytest.mark.corstone_fvp - def test_maxpool2d_tosa_u55_BI( - self, - test_name: str, - test_data: torch.Tensor, - model_params: int | Tuple[int, int], - ): - tester = self._test_maxpool2d_tosa_ethos_BI_pipeline( - self.MaxPool2d(*model_params), - common.get_u55_compile_spec(), - (test_data,), - ) - if conftest.is_option_enabled("corstone_fvp"): - tester.run_method_and_compare_outputs(qtol=1, inputs=(test_data,)) - @parameterized.expand(test_data_suite) - @pytest.mark.corstone_fvp - def test_maxpool2d_tosa_u85_BI( - self, - test_name: str, - test_data: torch.Tensor, - model_params: int | Tuple[int, int], - ): - tester = self._test_maxpool2d_tosa_ethos_BI_pipeline( - self.MaxPool2d(*model_params), - common.get_u85_compile_spec(), - (test_data,), - ) - if conftest.is_option_enabled("corstone_fvp"): - tester.run_method_and_compare_outputs(qtol=1, inputs=(test_data,)) +test_data_suite = { + # (test_name, test_data, [kernel_size, stride, padding]) + "zeros": lambda: (torch.zeros(1, 1, 4, 8), [2, 2, 1]), + "ones": lambda: (torch.ones(1, 16, 50, 32), [4, 2, 0]), + "rand": lambda: (torch.rand(1, 16, 52, 16), [4, 3, 0]), + "non_divisible": lambda: (torch.rand(1, 16, 112, 112), [3, 2, 1]), +} - @parameterized.expand(test_data_suite_mult_batches) - def test_maxpool2d_tosa_MI_mult_batches( - self, - test_name: str, - test_data: torch.Tensor, - model_params: int | Tuple[int, int], - ): - self._test_maxpool2d_tosa_MI_pipeline( - self.MaxPool2d(*model_params), (test_data,) - ) +test_data_suite_mult_batches = { + "randn": lambda: (torch.randn(5, 16, 50, 32), [4, 2, 0]), +} - @parameterized.expand(test_data_suite_mult_batches) - def test_maxpool2d_tosa_BI_mult_batches( - self, - test_name: str, - test_data: torch.Tensor, - model_params: int | Tuple[int, int], - ): - self._test_maxpool2d_tosa_BI_pipeline( - self.MaxPool2d(*model_params), (test_data,) - ) - @parameterized.expand(test_data_suite_mult_batches) - @pytest.mark.corstone_fvp - @conftest.expectedFailureOnFVP # TODO: MLETORCH-433 - def test_maxpool2d_tosa_u85_BI_mult_batches( - self, - test_name: str, - test_data: torch.Tensor, - model_params: int | Tuple[int, int], - ): - tester = self._test_maxpool2d_tosa_ethos_BI_pipeline( - self.MaxPool2d(*model_params), - common.get_u85_compile_spec(), - (test_data,), - ) - if conftest.is_option_enabled("corstone_fvp"): - tester.run_method_and_compare_outputs(qtol=1, inputs=(test_data,)) +aten_op = "torch.ops.aten.max_pool2d.default" +exir_op = "executorch_exir_dialects_edge__ops_aten_max_pool2d_default" - @parameterized.expand(test_data_suite_mult_batches) - @pytest.mark.corstone_fvp - @conftest.expectedFailureOnFVP # TODO: MLETORCH-433 - def test_maxpool2d_tosa_u55_BI_mult_batches( - self, - test_name: str, - test_data: torch.Tensor, - model_params: int | Tuple[int, int], - ): - tester = self._test_maxpool2d_tosa_ethos_BI_pipeline( - self.MaxPool2d(*model_params), - common.get_u55_compile_spec(), - (test_data,), - ) - if conftest.is_option_enabled("corstone_fvp"): - tester.run_method_and_compare_outputs(qtol=1, inputs=(test_data,)) +input_t1 = Tuple[torch.Tensor] - reject_data_suite = [ - (MaxPool2d(1, 4, 0), torch.rand(1, 10, 10, 10)), - (MaxPool2d((1, 257), 1, 0), torch.rand(1, 16, 5, 300)), - (MaxPool2d((800, 90), 1, 0), torch.rand(1, 16, 850, 100)), - ] - @parameterized.expand(reject_data_suite) - def test_reject_maxpool2d_u55_BI( +class MaxPool2d(torch.nn.Module): + def __init__( self, - module: torch.nn.Module, - test_data: torch.tensor, + kernel_size: int | Tuple[int, int], + stride: int | Tuple[int, int], + padding: int | Tuple[int, int], ): - compile_spec = common.get_u55_compile_spec() - quantizer = EthosUQuantizer(compile_spec).set_io( - get_symmetric_quantization_config() + super().__init__() + self.max_pool_2d = torch.nn.MaxPool2d( + kernel_size=kernel_size, stride=stride, padding=padding ) - ( - ArmTester( - module, - example_inputs=(test_data,), - compile_spec=compile_spec, - ) - .quantize(Quantize(quantizer, get_symmetric_quantization_config())) - .export() - .check_count({"torch.ops.aten.max_pool2d.default": 1}) - .check(["torch.ops.quantized_decomposed"]) - .to_edge_transform_and_lower() - .check( - [ - "executorch_exir_dialects_edge__ops_aten_max_pool2d_with_indices_default" - ] - ) - .check_count({"torch.ops.higher_order.executorch_call_delegate": 0}) - ) + def forward(self, x): + return self.max_pool_2d(x) + + +@common.parametrize("test_data", test_data_suite) +def test_max_pool2d_tosa_MI(test_data: torch.Tensor): + test_data, model_params = test_data() + pipeline = TosaPipelineMI[input_t1]( + MaxPool2d(*model_params), (test_data,), aten_op, exir_op + ) + pipeline.run() + + +@common.parametrize("test_data", test_data_suite) +def test_max_pool2d_tosa_BI(test_data: torch.Tensor): + test_data, model_params = test_data() + pipeline = TosaPipelineBI[input_t1]( + MaxPool2d(*model_params), + (test_data,), + aten_op, + exir_op, + symmetric_io_quantization=True, + ) + pipeline.run() + + +@common.parametrize("test_data", test_data_suite) +@common.XfailIfNoCorstone300 +def test_max_pool2d_u55_BI(test_data: torch.Tensor): + test_data, model_params = test_data() + EthosU55PipelineBI[input_t1]( + MaxPool2d(*model_params), + (test_data,), + aten_op, + exir_ops=[], + symmetric_io_quantization=True, + run_on_fvp=True, + ).run() + + +@common.parametrize("test_data", test_data_suite) +@common.XfailIfNoCorstone320 +def test_max_pool2d_u85_BI(test_data: torch.Tensor): + test_data, model_params = test_data() + EthosU85PipelineBI[input_t1]( + MaxPool2d(*model_params), + (test_data,), + aten_op, + exir_ops=[], + symmetric_io_quantization=True, + run_on_fvp=True, + ).run() + + +@common.parametrize("test_data", test_data_suite_mult_batches) +def test_max_pool2d_tosa_MI_mult_batches(test_data: torch.Tensor): + test_data, model_params = test_data() + pipeline = TosaPipelineMI[input_t1]( + MaxPool2d(*model_params), + (test_data,), + aten_op, + exir_op, + ) + pipeline.run() + + +@common.parametrize("test_data", test_data_suite_mult_batches) +def test_max_pool2d_tosa_BI_mult_batches(test_data: torch.Tensor): + test_data, model_params = test_data() + pipeline = TosaPipelineBI[input_t1]( + MaxPool2d(*model_params), + (test_data,), + aten_op, + exir_op, + symmetric_io_quantization=True, + ) + pipeline.run() + + +x_fail = {"randn": "MLETORCH-986: Numerical issues with mutli batches."} + + +@common.parametrize("test_data", test_data_suite_mult_batches, x_fail) +@common.XfailIfNoCorstone300 +def test_max_pool2d_u55_BI_mult_batches(test_data: torch.Tensor): + test_data, model_params = test_data() + EthosU55PipelineBI[input_t1]( + MaxPool2d(*model_params), + (test_data,), + aten_op, + exir_ops=[], + run_on_fvp=True, + symmetric_io_quantization=True, + use_to_edge_transform_and_lower=True, + ).run() + + +@common.parametrize("test_data", test_data_suite_mult_batches, x_fail) +@common.XfailIfNoCorstone320 +def test_max_pool2d_u85_BI_mult_batches(test_data: torch.Tensor): + test_data, model_params = test_data() + EthosU85PipelineBI[input_t1]( + MaxPool2d(*model_params), + (test_data,), + aten_op, + exir_op, + run_on_fvp=True, + symmetric_io_quantization=True, + use_to_edge_transform_and_lower=True, + ).run() + + +reject_data_suite = { + "reject_1": lambda: (MaxPool2d(1, 4, 0), torch.rand(1, 10, 10, 10)), + "reject_2": lambda: (MaxPool2d((1, 257), 1, 0), torch.rand(1, 16, 5, 300)), + "reject_3": lambda: (MaxPool2d((800, 90), 1, 0), torch.rand(1, 16, 850, 100)), +} + + +@common.parametrize("test_data", reject_data_suite) +@common.XfailIfNoCorstone300 +def test_max_pool2d_u55_BI_failure_set(test_data: Tuple): + module, test_data = test_data() + pipeline = EthosU55PipelineBI[input_t1]( + module, + (test_data,), + aten_op, + exir_op, + run_on_fvp=False, + symmetric_io_quantization=True, + use_to_edge_transform_and_lower=True, + ) + pipeline.pop_stage("check_count.exir") + pipeline.run() diff --git a/backends/arm/test/ops/test_maximum.py b/backends/arm/test/ops/test_maximum.py index a255496d517..adcc7dc9cab 100644 --- a/backends/arm/test/ops/test_maximum.py +++ b/backends/arm/test/ops/test_maximum.py @@ -1,127 +1,75 @@ # Copyright (c) Meta Platforms, Inc. and affiliates. -# Copyright 2024-2025 Arm Limited and/or its affiliates. # All rights reserved. +# Copyright 2024-2025 Arm Limited and/or its affiliates. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. -import unittest from typing import Tuple import torch -from executorch.backends.arm.test import common, conftest -from executorch.backends.arm.test.tester.arm_tester import ArmTester -from executorch.exir.backend.compile_spec_schema import CompileSpec -from parameterized import parameterized - - -class TestMaximum(unittest.TestCase): - """Tests a single maximum op""" - - class Maximum(torch.nn.Module): - test_parameters = [ - ( - torch.FloatTensor([1, 2, 3, 5, 7]), - (torch.FloatTensor([2, 1, 2, 1, 10])), - ), - (torch.ones(1, 10, 4, 6), 2 * torch.ones(1, 10, 4, 6)), - (torch.randn(1, 1, 4, 4), torch.ones(1, 1, 4, 1)), - (torch.randn(1, 3, 4, 4), torch.randn(1, 3, 4, 4)), - (10000 * torch.randn(1, 1, 4, 4), torch.randn(1, 1, 4, 1)), - ] - - def __init__(self): - super().__init__() - - def forward(self, x, y): - return torch.maximum(x, y) - - def _test_maximum_tosa_MI_pipeline( - self, module: torch.nn.Module, test_data: Tuple[torch.Tensor] - ): - ( - ArmTester( - module, - example_inputs=test_data, - compile_spec=common.get_tosa_compile_spec("TOSA-0.80+MI"), - ) - .export() - .check_count({"torch.ops.aten.maximum.default": 1}) - .check_not(["torch.ops.quantized_decomposed"]) - .to_edge() - .partition() - .check_count({"torch.ops.higher_order.executorch_call_delegate": 1}) - .to_executorch() - .run_method_and_compare_outputs(inputs=test_data) - ) - - def _test_maximum_tosa_BI_pipeline( - self, module: torch.nn.Module, test_data: Tuple[torch.Tensor] - ): - ( - ArmTester( - module, - example_inputs=test_data, - compile_spec=common.get_tosa_compile_spec("TOSA-0.80+BI"), - ) - .quantize() - .export() - .check_count({"torch.ops.aten.maximum.default": 1}) - .check(["torch.ops.quantized_decomposed"]) - .to_edge() - .partition() - .check_count({"torch.ops.higher_order.executorch_call_delegate": 1}) - .to_executorch() - .run_method_and_compare_outputs(inputs=test_data, qtol=1) - ) - - def _test_maximum_ethos_BI_pipeline( - self, - module: torch.nn.Module, - compile_spec: CompileSpec, - test_data: Tuple[torch.Tensor], - ): - tester = ( - ArmTester( - module, - example_inputs=test_data, - compile_spec=compile_spec, - ) - .quantize() - .export() - .to_edge() - .partition() - .to_executorch() - .serialize() - ) - - return tester - - @parameterized.expand(Maximum.test_parameters) - def test_maximum_tosa_MI(self, operand1: torch.Tensor, operand2: torch.Tensor): - test_data = (operand1, operand2) - self._test_maximum_tosa_MI_pipeline(self.Maximum(), test_data) - - @parameterized.expand(Maximum.test_parameters) - def test_maximum_tosa_BI(self, operand1: torch.Tensor, operand2: torch.Tensor): - test_data = (operand1, operand2) - self._test_maximum_tosa_BI_pipeline(self.Maximum(), test_data) - - @parameterized.expand(Maximum.test_parameters) - def test_maximum_u55_BI(self, operand1: torch.Tensor, operand2: torch.Tensor): - test_data = (operand1, operand2) - tester = self._test_maximum_ethos_BI_pipeline( - self.Maximum(), common.get_u55_compile_spec(), test_data - ) - if conftest.is_option_enabled("corstone_fvp"): - tester.run_method_and_compare_outputs(qtol=1, inputs=test_data) - - @parameterized.expand(Maximum.test_parameters) - def test_maximum_u85_BI(self, operand1: torch.Tensor, operand2: torch.Tensor): - test_data = (operand1, operand2) - tester = self._test_maximum_ethos_BI_pipeline( - self.Maximum(), common.get_u85_compile_spec(), test_data - ) - if conftest.is_option_enabled("corstone_fvp"): - tester.run_method_and_compare_outputs(qtol=1, inputs=test_data) +from executorch.backends.arm.test import common +from executorch.backends.arm.test.tester.test_pipeline import ( + EthosU55PipelineBI, + EthosU85PipelineBI, + TosaPipelineBI, + TosaPipelineMI, +) + +test_t = tuple[torch.Tensor, torch.Tensor] +aten_op = "torch.ops.aten.maximum.default" + + +class Maximum(torch.nn.Module): + test_parameters = { + "float_tensor": lambda: ( + torch.FloatTensor([1, 2, 3, 5, 7]), + (torch.FloatTensor([2, 1, 2, 1, 10])), + ), + "ones": lambda: (torch.ones(1, 10, 4, 6), 2 * torch.ones(1, 10, 4, 6)), + "rand_diff": lambda: (torch.randn(1, 1, 4, 4), torch.ones(1, 1, 4, 1)), + "rand_same": lambda: (torch.randn(1, 3, 4, 4), torch.randn(1, 3, 4, 4)), + "rand_large": lambda: ( + 10000 * torch.randn(1, 1, 4, 4), + torch.randn(1, 1, 4, 1), + ), + } + + def __init__(self): + super().__init__() + + def forward(self, x, y): + return torch.maximum(x, y) + + +@common.parametrize("test_data", Maximum.test_parameters) +def test_maximum_tosa_MI(test_data: Tuple): + TosaPipelineMI[test_t](Maximum(), test_data(), aten_op).run() + + +@common.parametrize("test_data", Maximum.test_parameters) +def test_maximum_tosa_BI(test_data: Tuple): + TosaPipelineBI[test_t](Maximum(), test_data(), aten_op).run() + + +@common.parametrize("test_data", Maximum.test_parameters) +@common.XfailIfNoCorstone300 +def test_maximum_u55_BI(test_data: Tuple): + EthosU55PipelineBI[test_t]( + Maximum(), + test_data(), + aten_op, + run_on_fvp=True, + ).run() + + +@common.parametrize("test_data", Maximum.test_parameters) +@common.XfailIfNoCorstone320 +def test_maximum_u85_BI(test_data: Tuple): + EthosU85PipelineBI[test_t]( + Maximum(), + test_data(), + aten_op, + run_on_fvp=True, + ).run() diff --git a/backends/arm/test/ops/test_mean_dim.py b/backends/arm/test/ops/test_mean_dim.py index 2351b0f9e9c..43063058805 100644 --- a/backends/arm/test/ops/test_mean_dim.py +++ b/backends/arm/test/ops/test_mean_dim.py @@ -20,10 +20,10 @@ class AdaptiveAveragePool2d(torch.nn.Module): test_data_suite = { # (test_name, test_data) - "zeros": (torch.zeros(1, 1280, 7, 7),), - "ones": (torch.ones(1, 1280, 7, 7),), - "rand": (torch.rand(1, 1280, 7, 7),), - "randn": (torch.randn(1, 1280, 7, 7),), + "zeros": lambda: (torch.zeros(1, 1280, 7, 7),), + "ones": lambda: (torch.ones(1, 1280, 7, 7),), + "rand": lambda: (torch.rand(1, 1280, 7, 7),), + "randn": lambda: (torch.randn(1, 1280, 7, 7),), } aten_op = "torch.ops.aten.adaptive_avg_pool2d.default" exir_op = "executorch_exir_dialects_edge__ops_aten_mean_dim" @@ -40,7 +40,7 @@ def forward(self, x): def test_adaptive_avg_pool2d_tosa_MI(test_data): TosaPipelineMI[input_t]( AdaptiveAveragePool2d(), - test_data, + test_data(), AdaptiveAveragePool2d.aten_op, AdaptiveAveragePool2d.exir_op, ).run() @@ -50,38 +50,18 @@ def test_adaptive_avg_pool2d_tosa_MI(test_data): def test_adaptive_avg_pool2d_tosa_BI(test_data): TosaPipelineBI[input_t]( AdaptiveAveragePool2d(), - test_data, + test_data(), AdaptiveAveragePool2d.aten_op, AdaptiveAveragePool2d.exir_op, ).run() @common.parametrize("test_data", AdaptiveAveragePool2d.test_data_suite) -def test_adaptive_avg_pool2d_u55(test_data): - EthosU55PipelineBI[input_t]( - AdaptiveAveragePool2d(), - test_data, - AdaptiveAveragePool2d.aten_op, - AdaptiveAveragePool2d.exir_op, - ).run() - - -@common.parametrize("test_data", AdaptiveAveragePool2d.test_data_suite) -def test_adaptive_avg_pool2d_u85(test_data): - EthosU85PipelineBI[input_t]( - AdaptiveAveragePool2d(), - test_data, - AdaptiveAveragePool2d.aten_op, - AdaptiveAveragePool2d.exir_op, - ).run() - - -@common.parametrize("test_data", AdaptiveAveragePool2d.test_data_suite) -@common.SkipIfNoCorstone300 -def test_adaptive_avg_pool2d_u55_on_fvp(test_data): +@common.XfailIfNoCorstone300 +def test_adaptive_avg_pool2d_u55_BI(test_data): EthosU55PipelineBI[input_t]( AdaptiveAveragePool2d(), - test_data, + test_data(), AdaptiveAveragePool2d.aten_op, AdaptiveAveragePool2d.exir_op, run_on_fvp=True, @@ -89,11 +69,11 @@ def test_adaptive_avg_pool2d_u55_on_fvp(test_data): @common.parametrize("test_data", AdaptiveAveragePool2d.test_data_suite) -@common.SkipIfNoCorstone320 -def test_adaptive_avg_pool2d_u85_on_fvp(test_data): +@common.XfailIfNoCorstone320 +def test_adaptive_avg_pool2d_u85_BI(test_data): EthosU85PipelineBI[input_t]( AdaptiveAveragePool2d(), - test_data, + test_data(), AdaptiveAveragePool2d.aten_op, AdaptiveAveragePool2d.exir_op, run_on_fvp=True, @@ -102,14 +82,14 @@ def test_adaptive_avg_pool2d_u85_on_fvp(test_data): class MeanDim(torch.nn.Module): test_data_suite: dict[str, tuple] = { - "zeros": (torch.zeros(1, 1280, 7, 7), -1, True), - "ones": (torch.ones(1, 1280, 7, 7), (-1, 2), False), - "rand": ( + "zeros": lambda: (torch.zeros(1, 1280, 7, 7), -1, True), + "ones": lambda: (torch.ones(1, 1280, 7, 7), (-1, 2), False), + "rand": lambda: ( torch.rand(1, 1280, 7, 7), (-1), True, ), - "randn": ( + "randn": lambda: ( torch.randn(1, 1280, 7, 7), (-1, -2, -3), False, @@ -128,20 +108,22 @@ def forward(self, x: torch.Tensor): @common.parametrize("test_data", MeanDim.test_data_suite) -def test_mean_tosa_MI(test_data): +def test_mean_dim_tosa_MI(test_data): + test_data, dim, keep_dim = test_data() TosaPipelineMI[input_t]( - MeanDim(test_data[1], test_data[2]), - (test_data[0],), + MeanDim(dim, keep_dim), + (test_data,), MeanDim.torch_op, MeanDim.exir_op, ).run() @common.parametrize("test_data", MeanDim.test_data_suite) -def test_mean_tosa_BI(test_data): +def test_mean_dim_tosa_BI(test_data): + test_data, dim, keep_dim = test_data() pipeline = TosaPipelineBI[input_t]( - MeanDim(test_data[1], test_data[2]), - (test_data[0],), + MeanDim(dim, keep_dim), + (test_data,), "torch.ops.aten.sum.dim_IntList", # Just check for sum op included in the mean decomposition ) pipeline.change_args("run_method_and_compare_outputs", qtol=1) @@ -150,10 +132,11 @@ def test_mean_tosa_BI(test_data): @common.parametrize("test_data", MeanDim.test_data_suite) @common.XfailIfNoCorstone300 -def test_mean_u55_BI(test_data): +def test_mean_dim_u55_BI(test_data): + test_data, dim, keep_dim = test_data() pipeline = EthosU55PipelineBI[input_t]( - MeanDim(test_data[1], test_data[2]), - (test_data[0],), + MeanDim(dim, keep_dim), + (test_data,), "torch.ops.aten.sum.dim_IntList", # Just check for sum op included in the mean decomposition run_on_fvp=True, ) @@ -163,10 +146,11 @@ def test_mean_u55_BI(test_data): @common.parametrize("test_data", MeanDim.test_data_suite) @common.XfailIfNoCorstone320 -def test_mean_u85_BI(test_data): +def test_mean_dim_u85_BI(test_data): + test_data, dim, keep_dim = test_data() pipeline = EthosU85PipelineBI[input_t]( - MeanDim(test_data[1], test_data[2]), - (test_data[0],), + MeanDim(dim, keep_dim), + (test_data,), "torch.ops.aten.sum.dim_IntList", # Just check for sum op included in the mean decomposition run_on_fvp=True, ) diff --git a/backends/arm/test/ops/test_minimum.py b/backends/arm/test/ops/test_minimum.py index 04693a46435..27922cda5e0 100644 --- a/backends/arm/test/ops/test_minimum.py +++ b/backends/arm/test/ops/test_minimum.py @@ -1,130 +1,75 @@ # Copyright (c) Meta Platforms, Inc. and affiliates. -# Copyright 2024-2025 Arm Limited and/or its affiliates. # All rights reserved. +# Copyright 2024-2025 Arm Limited and/or its affiliates. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. -import unittest from typing import Tuple import torch -from executorch.backends.arm.test import common, conftest -from executorch.backends.arm.test.tester.arm_tester import ArmTester -from executorch.exir.backend.compile_spec_schema import CompileSpec -from parameterized import parameterized - - -class TestMinimum(unittest.TestCase): - """Tests a single minimum op""" - - class Minimum(torch.nn.Module): - test_parameters = [ - ( - torch.FloatTensor([1, 2, 3, 5, 7]), - (torch.FloatTensor([2, 1, 2, 1, 10])), - ), - (torch.ones(1, 10, 4, 6), 2 * torch.ones(1, 10, 4, 6)), - (torch.randn(1, 1, 4, 4), torch.ones(1, 1, 4, 1)), - (torch.randn(1, 3, 4, 4), torch.randn(1, 3, 4, 4)), - (10000 * torch.randn(1, 1, 4, 4), torch.randn(1, 1, 4, 1)), - ] - - def __init__(self): - super().__init__() - - def forward(self, x, y): - return torch.minimum(x, y) - - def _test_minimum_tosa_MI_pipeline( - self, module: torch.nn.Module, test_data: Tuple[torch.Tensor] - ): - ( - ArmTester( - module, - example_inputs=test_data, - compile_spec=common.get_tosa_compile_spec("TOSA-0.80+MI"), - ) - .export() - .check_count({"torch.ops.aten.minimum.default": 1}) - .check_not(["torch.ops.quantized_decomposed"]) - .to_edge() - .partition() - .check_count({"torch.ops.higher_order.executorch_call_delegate": 1}) - .to_executorch() - .run_method_and_compare_outputs(inputs=test_data) - ) - - def _test_minimum_tosa_BI_pipeline( - self, module: torch.nn.Module, test_data: Tuple[torch.Tensor] - ): - ( - ArmTester( - module, - example_inputs=test_data, - compile_spec=common.get_tosa_compile_spec("TOSA-0.80+BI"), - ) - .quantize() - .export() - .check_count({"torch.ops.aten.minimum.default": 1}) - .check(["torch.ops.quantized_decomposed"]) - .to_edge() - .partition() - .check_count({"torch.ops.higher_order.executorch_call_delegate": 1}) - .to_executorch() - .run_method_and_compare_outputs(inputs=test_data, qtol=1) - ) - - def _test_minimum_ethos_BI_pipeline( - self, - module: torch.nn.Module, - compile_spec: CompileSpec, - test_data: Tuple[torch.Tensor], - ): - tester = ( - ArmTester( - module, - example_inputs=test_data, - compile_spec=compile_spec, - ) - .quantize() - .export() - .to_edge() - .partition() - .to_executorch() - .serialize() - ) - - return tester - - @parameterized.expand(Minimum.test_parameters) - def test_minimum_tosa_MI(self, operand1: torch.Tensor, operand2: torch.Tensor): - test_data = (operand1, operand2) - self._test_minimum_tosa_MI_pipeline(self.Minimum(), test_data) - - @parameterized.expand(Minimum.test_parameters) - def test_minimum_tosa_BI(self, operand1: torch.Tensor, operand2: torch.Tensor): - test_data = (operand1, operand2) - self._test_minimum_tosa_BI_pipeline(self.Minimum(), test_data) - - @parameterized.expand(Minimum.test_parameters) - def test_minimum_u55_BI(self, operand1: torch.Tensor, operand2: torch.Tensor): - test_data = (operand1, operand2) - tester = self._test_minimum_ethos_BI_pipeline( - self.Minimum(), common.get_u55_compile_spec(), test_data - ) - if conftest.is_option_enabled("corstone_fvp"): - tester.run_method_and_compare_outputs(qtol=1, inputs=test_data) - - @parameterized.expand(Minimum.test_parameters) - def test_minimum_u85_BI(self, operand1: torch.Tensor, operand2: torch.Tensor): - test_data = (operand1, operand2) - tester = self._test_minimum_ethos_BI_pipeline( - self.Minimum(), common.get_u85_compile_spec(), test_data - ) - if conftest.is_option_enabled("corstone_fvp"): - tester.run_method_and_compare_outputs( - qtol=1, - inputs=test_data, - ) +from executorch.backends.arm.test import common +from executorch.backends.arm.test.tester.test_pipeline import ( + EthosU55PipelineBI, + EthosU85PipelineBI, + TosaPipelineBI, + TosaPipelineMI, +) + +test_t = tuple[torch.Tensor, torch.Tensor] +aten_op = "torch.ops.aten.minimum.default" + + +class Minimum(torch.nn.Module): + test_parameters = { + "float_tensor": lambda: ( + torch.FloatTensor([1, 2, 3, 5, 7]), + (torch.FloatTensor([2, 1, 2, 1, 10])), + ), + "ones": lambda: (torch.ones(1, 10, 4, 6), 2 * torch.ones(1, 10, 4, 6)), + "rand_diff": lambda: (torch.randn(1, 1, 4, 4), torch.ones(1, 1, 4, 1)), + "rand_same": lambda: (torch.randn(1, 3, 4, 4), torch.randn(1, 3, 4, 4)), + "rand_large": lambda: ( + 10000 * torch.randn(1, 1, 4, 4), + torch.randn(1, 1, 4, 1), + ), + } + + def __init__(self): + super().__init__() + + def forward(self, x, y): + return torch.minimum(x, y) + + +@common.parametrize("test_data", Minimum.test_parameters) +def test_minimum_tosa_MI(test_data: Tuple): + TosaPipelineMI[test_t](Minimum(), test_data(), aten_op).run() + + +@common.parametrize("test_data", Minimum.test_parameters) +def test_minimum_tosa_BI(test_data: Tuple): + TosaPipelineBI[test_t](Minimum(), test_data(), aten_op).run() + + +@common.parametrize("test_data", Minimum.test_parameters) +@common.XfailIfNoCorstone300 +def test_minimum_u55_BI(test_data: Tuple): + EthosU55PipelineBI[test_t]( + Minimum(), + test_data(), + aten_op, + run_on_fvp=True, + ).run() + + +@common.parametrize("test_data", Minimum.test_parameters) +@common.XfailIfNoCorstone320 +def test_minimum_u85_BI(test_data: Tuple): + EthosU85PipelineBI[test_t]( + Minimum(), + test_data(), + aten_op, + run_on_fvp=True, + ).run() diff --git a/backends/arm/test/ops/test_mm.py b/backends/arm/test/ops/test_mm.py index a4503280db9..a5a3b4b98b9 100644 --- a/backends/arm/test/ops/test_mm.py +++ b/backends/arm/test/ops/test_mm.py @@ -4,7 +4,7 @@ # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. -from typing import Callable +from typing import Tuple import pytest import torch @@ -15,19 +15,18 @@ TosaPipelineBI, TosaPipelineMI, ) -from parameterized import parameterized test_t = tuple[torch.Tensor, torch.Tensor] class MM(torch.nn.Module): - test_data_generators = [ - lambda: (torch.rand(3, 5), torch.rand(5, 2)), - lambda: (torch.rand(1, 1), torch.rand(1, 1)), - lambda: (torch.ones(55, 3), torch.ones(3, 44)), - lambda: (10000 * torch.randn(1, 10), torch.randn(10, 5)), - lambda: (-10 * torch.randn(32, 64), 5 + 5 * torch.randn(64, 32)), - ] + test_data_generators = { + "rand_2d": lambda: (torch.rand(3, 5), torch.rand(5, 2)), + "rand_same": lambda: (torch.rand(1, 1), torch.rand(1, 1)), + "ones": lambda: (torch.ones(55, 3), torch.ones(3, 44)), + "randn_large": lambda: (10000 * torch.randn(1, 10), torch.randn(10, 5)), + "rand_neg": lambda: (-10 * torch.randn(32, 64), 5 + 5 * torch.randn(64, 32)), + } aten_op = "torch.ops.aten.mm.default" exir_op = "executorch_exir_dialects_edge__ops_aten_mm_default" @@ -35,43 +34,35 @@ def forward(self, x, y): return torch.mm(x, y) -@parameterized.expand(MM.test_data_generators) -def test_mm_tosa_MI(test_data_generator: Callable[[], tuple]): - test_data = test_data_generator() - TosaPipelineMI[test_t](MM(), test_data, MM.aten_op).run() +@common.parametrize("test_data", MM.test_data_generators) +def test_mm_tosa_MI(test_data: Tuple): + TosaPipelineMI[test_t](MM(), test_data(), MM.aten_op).run() -@parameterized.expand(MM.test_data_generators) -def test_mm_tosa_BI(test_data_generator: Callable[[], tuple]): - test_data = test_data_generator() - TosaPipelineBI[test_t](MM(), test_data, MM.aten_op, MM.exir_op).run() +@common.parametrize("test_data", MM.test_data_generators) +def test_mm_tosa_BI(test_data: Tuple): + TosaPipelineBI[test_t](MM(), test_data(), MM.aten_op, MM.exir_op).run() -@parameterized.expand(MM.test_data_generators) -def test_mm_tosa_u55(test_data_generator: Callable[[], tuple]): - test_data = test_data_generator() - EthosU55PipelineBI[test_t](MM(), test_data, MM.aten_op).run() - - -@parameterized.expand(MM.test_data_generators) +@common.parametrize("test_data", MM.test_data_generators) +@common.XfailIfNoCorstone300 @pytest.mark.flaky # Investigate flakiness (MLETORCH-870) -def test_mm_tosa_u85(test_data_generator: Callable[[], tuple]): - test_data = test_data_generator() - EthosU85PipelineBI[test_t](MM(), test_data, MM.aten_op, MM.exir_op).run() - - -@parameterized.expand(MM.test_data_generators) -@common.SkipIfNoCorstone300 -def test_mm_tosa_u55_on_fvp(test_data_generator: Callable[[], tuple]): - test_data = test_data_generator() - EthosU55PipelineBI[test_t](MM(), test_data, MM.aten_op, run_on_fvp=True).run() +def test_mm_u55_BI(test_data: Tuple): + EthosU55PipelineBI[test_t]( + MM(), + test_data(), + MM.aten_op, + run_on_fvp=True, + ).run() -@parameterized.expand(MM.test_data_generators) -@common.SkipIfNoCorstone320 -@pytest.mark.flaky # Investigate flakiness (MLETORCH-870) -def test_mm_tosa_u85_on_fvp(test_data_generator: Callable[[], tuple]): - test_data = test_data_generator() +@common.parametrize("test_data", MM.test_data_generators) +@common.XfailIfNoCorstone320 +def test_mm_u85_BI(test_data: Tuple): EthosU85PipelineBI[test_t]( - MM(), test_data, MM.aten_op, MM.exir_op, run_on_fvp=True + MM(), + test_data(), + MM.aten_op, + MM.exir_op, + run_on_fvp=True, ).run() diff --git a/backends/arm/test/ops/test_mul.py b/backends/arm/test/ops/test_mul.py index 739864a4982..f960f348a87 100644 --- a/backends/arm/test/ops/test_mul.py +++ b/backends/arm/test/ops/test_mul.py @@ -1,226 +1,155 @@ # Copyright (c) Meta Platforms, Inc. and affiliates. -# Copyright 2024-2025 Arm Limited and/or its affiliates. # All rights reserved. +# Copyright 2024-2025 Arm Limited and/or its affiliates. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. -import unittest -import pytest +from typing import Tuple import torch -from executorch.backends.arm.test import common, conftest -from executorch.backends.arm.test.tester.arm_tester import ArmTester -from executorch.exir.backend.backend_details import CompileSpec -from parameterized import parameterized -test_data_suite = [ +from executorch.backends.arm.test import common +from executorch.backends.arm.test.tester.test_pipeline import ( + EthosU55PipelineBI, + EthosU85PipelineBI, + TosaPipelineBI, + TosaPipelineMI, +) + +input_t1 = Tuple[torch.Tensor, torch.Tensor] # Input x +aten_op = "torch.ops.aten.mul.Tensor" + +test_data_suite = { # (test_name, input, other,) See torch.mul() for info - ( - "op_mul_rank1_rand", + "op_mul_rank1_rand": lambda: ( torch.rand(5) * 3.7, torch.rand(5) * 1.5, ), - ( - "op_mul_rank2_rand", + "op_mul_rank2_rand": lambda: ( torch.rand(4, 5), torch.rand(1, 5), ), - ( - "op_mul_rank3_randn", + "op_mul_rank3_randn": lambda: ( torch.randn(10, 5, 2), torch.randn(10, 5, 2), ), - ( - "op_mul_rank4_randn", + "op_mul_rank4_randn": lambda: ( torch.randn(1, 10, 25, 20), torch.randn(1, 10, 25, 20), ), - ( - "op_mul_rank4_ones_mul_negative", + "op_mul_rank4_ones_mul_negative": lambda: ( torch.ones(1, 10, 25, 20), (-1) * torch.ones(1, 10, 25, 20), ), - ( - "op_mul_rank4_negative_large_rand", + "op_mul_rank4_negative_large_rand": lambda: ( (-200) * torch.rand(1, 10, 25, 20), torch.rand(1, 1, 1, 20), ), - ( - "op_mul_rank4_large_randn", + "op_mul_rank4_large_randn": lambda: ( 200 * torch.randn(1, 10, 25, 20), torch.rand(1, 10, 25, 1), ), -] +} -test_data_suite_2 = [ +test_data_suite_2 = { # (test_name, input, other,) See torch.mul() for info - ( - "op_mul_rank2_rand", + "op_mul_rank2_rand": lambda: ( torch.rand(4, 5), torch.rand(5), ), - ( - "op_mul_rank3_randn", + "op_mul_rank3_randn": lambda: ( torch.randn(10, 5, 2), torch.randn(5, 2), ), - ( - "op_mul_rank4_randn", + "op_mul_rank4_randn": lambda: ( torch.randn(1, 10, 25, 20), torch.randn(1, 25, 20), ), - ( - "op_mul_rank4_randn_2", + "op_mul_rank4_randn_2": lambda: ( torch.randn(1, 25, 1), torch.randn(1, 3, 25, 10), ), -] - - -class TestMul(unittest.TestCase): - class Mul(torch.nn.Module): - - def forward( - self, - input_: torch.Tensor, - other_: torch.Tensor, - ): - return input_ * other_ - - def _test_mul_tosa_MI_pipeline( - self, module: torch.nn.Module, test_data: tuple[torch.Tensor, torch.Tensor] - ): - ( - ArmTester( - module, - example_inputs=test_data, - compile_spec=common.get_tosa_compile_spec( - "TOSA-0.80+MI", - ), - ) - .export() - .check_count({"torch.ops.aten.mul.Tensor": 1}) - .check_not(["torch.ops.quantized_decomposed"]) - .to_edge() - .partition() - .check_count({"torch.ops.higher_order.executorch_call_delegate": 1}) - .to_executorch() - .run_method_and_compare_outputs(inputs=test_data) - ) - - def _test_mul_tosa_BI_pipeline( - self, module: torch.nn.Module, test_data: tuple[torch.Tensor, torch.Tensor] - ): - ( - ArmTester( - module, - example_inputs=test_data, - compile_spec=common.get_tosa_compile_spec( - "TOSA-0.80+BI", - ), - ) - .quantize() - .export() - .check_count({"torch.ops.aten.mul.Tensor": 1}) - .check(["torch.ops.quantized_decomposed"]) - .to_edge() - .partition() - .check_count({"torch.ops.higher_order.executorch_call_delegate": 1}) - .to_executorch() - .run_method_and_compare_outputs(inputs=test_data, qtol=1.0) - ) - - def _test_mul_ethosu_BI_pipeline( - self, - compile_spec: CompileSpec, - module: torch.nn.Module, - test_data: tuple[torch.Tensor, torch.Tensor], - ): - tester = ( - ArmTester( - module, - example_inputs=test_data, - compile_spec=compile_spec, - ) - .quantize() - .export() - .check_count({"torch.ops.aten.mul.Tensor": 1}) - .check(["torch.ops.quantized_decomposed"]) - .to_edge() - .partition() - .check_count({"torch.ops.higher_order.executorch_call_delegate": 1}) - .to_executorch() - .serialize() - ) - if conftest.is_option_enabled("corstone_fvp"): - tester.run_method_and_compare_outputs(qtol=1, inputs=test_data) - - @parameterized.expand(test_data_suite) - def test_mul_tosa_MI( - self, - test_name: str, - input_: torch.Tensor, - other_: torch.Tensor, - ): - test_data = (input_, other_) - self._test_mul_tosa_MI_pipeline(self.Mul(), test_data) - - @parameterized.expand(test_data_suite_2) - def test_mul_diff_input_ranks_tosa_MI( - self, - test_name: str, - input_: torch.Tensor, - other_: torch.Tensor, - ): - test_data = (input_, other_) - self._test_mul_tosa_MI_pipeline(self.Mul(), test_data) +} - @parameterized.expand(test_data_suite_2) - def test_mul_diff_input_ranks_tosa_BI( - self, - test_name: str, - input_: torch.Tensor, - other_: torch.Tensor, - ): - test_data = (input_, other_) - self._test_mul_tosa_BI_pipeline(self.Mul(), test_data) - @parameterized.expand(test_data_suite) - def test_mul_tosa_BI( - self, - test_name: str, - input_: torch.Tensor, - other_: torch.Tensor, - ): - - test_data = (input_, other_) - self._test_mul_tosa_BI_pipeline(self.Mul(), test_data) +class Mul(torch.nn.Module): - @parameterized.expand(test_data_suite) - @pytest.mark.corstone_fvp - def test_mul_u55_BI( - self, - test_name: str, - input_: torch.Tensor, - other_: torch.Tensor, - ): - test_data = (input_, other_) - self._test_mul_ethosu_BI_pipeline( - common.get_u55_compile_spec(), self.Mul(), test_data - ) - - @parameterized.expand(test_data_suite) - @pytest.mark.corstone_fvp - def test_mul_u85_BI( + def forward( self, - test_name: str, input_: torch.Tensor, other_: torch.Tensor, ): - test_data = (input_, other_) - self._test_mul_ethosu_BI_pipeline( - common.get_u85_compile_spec(), self.Mul(), test_data - ) + return input_ * other_ + + +@common.parametrize("test_data", test_data_suite) +def test_mul_tensor_tosa_MI(test_data: torch.Tensor): + pipeline = TosaPipelineMI[input_t1]( + Mul(), + test_data(), + aten_op, + exir_op=[], + ) + pipeline.run() + + +@common.parametrize("test_data", test_data_suite_2) +def test_mul_tensor_tosa_MI_diff_input_ranks(test_data: torch.Tensor): + pipeline = TosaPipelineMI[input_t1]( + Mul(), + test_data(), + aten_op, + exir_op=[], + ) + pipeline.run() + + +@common.parametrize("test_data", test_data_suite_2) +def test_mul_tensor_tosa_BI_diff_input_ranks(test_data: torch.Tensor): + pipeline = TosaPipelineBI[input_t1]( + Mul(), + test_data(), + aten_op, + exir_op=[], + ) + pipeline.run() + + +@common.parametrize("test_data", test_data_suite) +def test_mul_tensor_tosa_BI(test_data: torch.Tensor): + pipeline = TosaPipelineBI[input_t1]( + Mul(), + test_data(), + aten_op, + exir_op=[], + ) + pipeline.run() + + +@common.parametrize("test_data", test_data_suite) +@common.XfailIfNoCorstone300 +def test_mul_tensor_u55_BI(test_data: torch.Tensor): + pipeline = EthosU55PipelineBI[input_t1]( + Mul(), + test_data(), + aten_op, + exir_ops=[], + run_on_fvp=True, + ) + pipeline.run() + + +@common.parametrize("test_data", test_data_suite) +@common.XfailIfNoCorstone320 +def test_mul_tensor_u85_BI(test_data: torch.Tensor): + pipeline = EthosU85PipelineBI[input_t1]( + Mul(), + test_data(), + aten_op, + exir_ops=[], + run_on_fvp=True, + ) + pipeline.run() diff --git a/backends/arm/test/ops/test_ne.py b/backends/arm/test/ops/test_ne.py index 7ab75827cad..2ceacdb31b9 100644 --- a/backends/arm/test/ops/test_ne.py +++ b/backends/arm/test/ops/test_ne.py @@ -126,11 +126,12 @@ def test_ne_tensor_u55_BI(test_module): pipeline = OpNotSupportedPipeline[input_t]( test_module, test_module.get_inputs(), - "TOSA-0.80+BI+u55", { NotEqual.decomposed_exir_ops[0]: 1, NotEqual.decomposed_exir_ops[1]: 1, }, + quantize=True, + u55_subset=True, ) pipeline.run() @@ -143,11 +144,12 @@ def test_ne_scalar_u55_BI(test_module): pipeline = OpNotSupportedPipeline[input_t]( test_module, test_module.get_inputs(), - "TOSA-0.80+BI+u55", { NotEqual.decomposed_exir_ops[0]: 1, NotEqual.decomposed_exir_ops[1]: 1, }, + quantize=True, + u55_subset=True, n_expected_delegates=1, ) pipeline.run() diff --git a/backends/arm/test/ops/test_permute.py b/backends/arm/test/ops/test_permute.py index 50db1231b41..3bbfdb69903 100644 --- a/backends/arm/test/ops/test_permute.py +++ b/backends/arm/test/ops/test_permute.py @@ -5,185 +5,105 @@ # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. -import unittest from typing import Tuple -import pytest - import torch -from executorch.backends.arm.quantizer import ( - EthosUQuantizer, - get_symmetric_quantization_config, - TOSAQuantizer, +from executorch.backends.arm.test import common + +from executorch.backends.arm.test.tester.test_pipeline import ( + EthosU55PipelineBI, + EthosU85PipelineBI, + TosaPipelineBI, + TosaPipelineMI, ) -from executorch.backends.arm.test import common, conftest -from executorch.backends.arm.test.tester.arm_tester import ArmTester -from executorch.backends.arm.test.tester.test_pipeline import OpNotSupportedPipeline -from executorch.backends.arm.tosa_specification import TosaSpecification -from executorch.backends.xnnpack.test.tester.tester import Quantize -from executorch.exir.backend.compile_spec_schema import CompileSpec -from parameterized import parameterized from torchvision.ops import Permute -test_data_suite = [ +input_t1 = Tuple[torch.Tensor] # Input x + +aten_op = "torch.ops.aten.permute.default" +exir_op = "executorch_exir_dialects_edge__ops_aten_permute_default" + +test_data_suite = { # (test_name,test_data,dims) - ("rank_2", torch.rand(10, 10), [1, 0]), - ("rank_3", torch.rand(10, 10, 10), [2, 0, 1]), - ("rank_3", torch.rand(10, 10, 10), [1, 2, 0]), - ("rank_4", torch.rand(1, 5, 1, 10), [0, 2, 3, 1]), - ("rank_4", torch.rand(1, 2, 5, 10), [1, 0, 2, 3]), - ("rank_4", torch.rand(1, 10, 10, 5), [2, 0, 1, 3]), -] - - -class TestPermute(unittest.TestCase): - """Tests Permute Operator.""" - - class Permute(torch.nn.Module): - - def __init__(self, dims: list[int]): - super().__init__() - - self.permute = Permute(dims=dims) - - def forward(self, x): - return self.permute(x) - - def _test_permute_tosa_MI_pipeline( - self, - module: torch.nn.Module, - test_data: Tuple[torch.tensor], - ): - ( - ArmTester( - module, - example_inputs=test_data, - compile_spec=common.get_tosa_compile_spec("TOSA-0.80+MI"), - ) - .export() - .check(["torch.ops.aten.permute.default"]) - .check_not(["torch.ops.quantized_decomposed"]) - .to_edge() - .partition() - .check_not(["executorch_exir_dialects_edge__ops_aten_permute_default"]) - .check_count({"torch.ops.higher_order.executorch_call_delegate": 1}) - .to_executorch() - .run_method_and_compare_outputs(inputs=test_data) - ) - - def _test_permute_tosa_BI_pipeline( - self, module: torch.nn.Module, test_data: Tuple[torch.tensor] - ): - tosa_spec = TosaSpecification.create_from_string("TOSA-0.80+BI") - compile_spec = common.get_tosa_compile_spec(tosa_spec) - quantizer = TOSAQuantizer(tosa_spec).set_io(get_symmetric_quantization_config()) - ( - ArmTester(module, example_inputs=test_data, compile_spec=compile_spec) - .quantize(Quantize(quantizer, get_symmetric_quantization_config())) - .export() - .check_count({"torch.ops.aten.permute.default": 1}) - .check(["torch.ops.quantized_decomposed"]) - .to_edge() - .partition() - .check_not(["executorch_exir_dialects_edge__ops_aten_permute_default"]) - .check_count({"torch.ops.higher_order.executorch_call_delegate": 1}) - .to_executorch() - .run_method_and_compare_outputs(inputs=test_data) - ) - - def _test_permute_ethos_BI_pipeline( - self, - module: torch.nn.Module, - compile_spec: CompileSpec, - test_data: Tuple[torch.Tensor], - ): - quantizer = EthosUQuantizer(compile_spec).set_io( - get_symmetric_quantization_config() - ) - tester = ( - ArmTester( - module, - example_inputs=test_data, - compile_spec=compile_spec, - ) - .quantize(Quantize(quantizer, get_symmetric_quantization_config())) - .export() - .check_count({"torch.ops.aten.permute.default": 1}) - .check(["torch.ops.quantized_decomposed"]) - .to_edge() - .partition() - .check_not(["executorch_exir_dialects_edge__ops_aten_permute_default"]) - .check_count({"torch.ops.higher_order.executorch_call_delegate": 1}) - .to_executorch() - .serialize() - ) - if conftest.is_option_enabled("corstone_fvp"): - tester.run_method_and_compare_outputs(qtol=1, inputs=test_data) - - @parameterized.expand(test_data_suite) - def test_permute_tosa_MI( - self, test_name: str, test_data: torch.Tensor, dims: list[int] - ): - self._test_permute_tosa_MI_pipeline(self.Permute(dims=dims), (test_data,)) - self._test_permute_tosa_MI_pipeline(self.Permute(dims=dims), (test_data,)) - - @parameterized.expand(test_data_suite) - def test_permute_tosa_BI( - self, test_name: str, test_data: torch.Tensor, dims: list[int] - ): - self._test_permute_tosa_BI_pipeline(self.Permute(dims=dims), (test_data,)) - - # Expected to fail as TOSA.Transpose is not supported by Ethos-U55. - @parameterized.expand(test_data_suite[0:1]) - @pytest.mark.corstone_fvp - def test_permute_u55_BI( - self, test_name: str, test_data: torch.Tensor, dims: list[int] - ): - self._test_permute_ethos_BI_pipeline( - self.Permute(dims=dims), common.get_u55_compile_spec(), (test_data,) - ) - - @parameterized.expand(test_data_suite[:-2]) - @pytest.mark.corstone_fvp - def test_permute_u85_BI( - self, test_name: str, test_data: torch.Tensor, dims: list[int] - ): - self._test_permute_ethos_BI_pipeline( - self.Permute(dims=dims), common.get_u85_compile_spec(), (test_data,) - ) - - # Fails since on FVP since N > 1 is not supported. MLETORCH-517 - @parameterized.expand(test_data_suite[-2:]) - @pytest.mark.corstone_fvp - @conftest.expectedFailureOnFVP - def test_permute_u85_BI_xfails( - self, test_name: str, test_data: torch.Tensor, dims: list[int] - ): - self._test_permute_ethos_BI_pipeline( - self.Permute(dims=dims), common.get_u85_compile_spec(), (test_data,) - ) - - -reject_data_suite = { - "int8_r3_axes_product": ([1, 700, 1000], [2, 1, 0], torch.int8), - "int8_r5_axes_product": ([1, 1, 1, 700, 1000], [0, 1, 2, 3, 4], torch.int8), - "int8_r4_NH_too_large": ([700, 100, 1, 1], [0, 1, 3, 2], torch.int8), - "int32_r5_no_support": ([2, 2, 2, 2, 2], [3, 4, 2, 1, 0], torch.int32), + "rank_2": lambda: (torch.rand(10, 10), [1, 0]), + "rank_3": lambda: (torch.rand(10, 10, 10), [2, 0, 1]), + "rank_3_2": lambda: (torch.rand(10, 10, 10), [1, 2, 0]), + "rank_4": lambda: (torch.rand(1, 5, 1, 10), [0, 2, 3, 1]), + "rank_4_2": lambda: (torch.rand(1, 2, 5, 10), [1, 0, 2, 3]), + "rank_4_3": lambda: (torch.rand(1, 10, 10, 5), [2, 0, 1, 3]), +} + + +class SimplePermute(torch.nn.Module): + + def __init__(self, dims: list[int]): + super().__init__() + + self.permute = Permute(dims=dims) + + def forward(self, x): + return self.permute(x) + + +@common.parametrize("test_data", test_data_suite) +def test_permute_tosa_MI(test_data: torch.Tensor): + test_data, dims = test_data() + pipeline = TosaPipelineMI[input_t1]( + SimplePermute(dims=dims), + (test_data,), + aten_op, + exir_op, + ) + pipeline.run() + + +@common.parametrize("test_data", test_data_suite) +def test_permute_tosa_BI(test_data: torch.Tensor): + test_data, dims = test_data() + pipeline = TosaPipelineBI[input_t1]( + SimplePermute(dims=dims), + (test_data,), + aten_op, + exir_op, + symmetric_io_quantization=True, + ) + pipeline.run() + + +x_fails = { + "rank_4_2": "AssertionError: Output 0 does not match reference output.", + "rank_4_3": "AssertionError: Output 0 does not match reference output.", } -input_t = tuple[torch.Tensor] - - -@common.parametrize("test_data", reject_data_suite) -def test_permute_u55_BI_not_delegated(test_data): - # Tests that we don't delegate these ops since they are not supported on U55. - shape, permutation, dtype = test_data - data = ((torch.rand(shape) * 10).to(dtype),) - pipeline = OpNotSupportedPipeline[input_t]( - TestPermute.Permute(dims=permutation), - data, - "TOSA-0.80+BI+u55", - {"executorch_exir_dialects_edge__ops_aten_permute_copy_default": 1}, + + +@common.parametrize("test_data", test_data_suite, x_fails) +@common.XfailIfNoCorstone300 +def test_permute_u55_BI(test_data): + test_data, dims = test_data() + pipeline = EthosU55PipelineBI[input_t1]( + SimplePermute(dims=dims), + (test_data,), + aten_op, + exir_ops="executorch_exir_dialects_edge__ops_aten_permute_copy_default", + run_on_fvp=True, + symmetric_io_quantization=True, + ) + pipeline.run() + + +# Fails since on FVP since N > 1 is not supported. MLETORCH-517 +@common.parametrize("test_data", test_data_suite, x_fails) +@common.XfailIfNoCorstone320 +def test_permute_u85_BI(test_data: torch.Tensor): + test_data, dims = test_data() + pipeline = EthosU85PipelineBI[input_t1]( + SimplePermute(dims=dims), + (test_data,), + aten_op, + exir_ops="executorch_exir_dialects_edge__ops_aten_permute_copy_default", + run_on_fvp=True, + symmetric_io_quantization=True, ) pipeline.run() diff --git a/backends/arm/test/ops/test_pow.py b/backends/arm/test/ops/test_pow.py index 618acf50fc2..98b23870f21 100644 --- a/backends/arm/test/ops/test_pow.py +++ b/backends/arm/test/ops/test_pow.py @@ -81,8 +81,14 @@ def forward(self, x: torch.Tensor): return torch.pow(x, self.exp) -@common.parametrize("test_data", Pow_TensorTensor.test_data) -def test_pow_tensor_tensor_MI(test_data: Pow_TensorTensor.input_t): +x_fail = { + "zero_base_zero_exp": "TOSA constraints: If x == 0 and y ⇐ 0, the result is undefined.", + "neg_base_zero_exp": "TOSA constraints: If x == 0 and y ⇐ 0, the result is undefined.", +} + + +@common.parametrize("test_data", Pow_TensorTensor.test_data, x_fail, strict=False) +def test_pow_tensor_tensor_tosa_MI(test_data: Pow_TensorTensor.input_t): pipeline = TosaPipelineMI[Pow_TensorTensor.input_t]( Pow_TensorTensor(), test_data(), @@ -92,8 +98,18 @@ def test_pow_tensor_tensor_MI(test_data: Pow_TensorTensor.input_t): pipeline.run() -@common.parametrize("test_data", Pow_TensorScalar.test_data) -def test_pow_tensor_scalar_MI(test_data: Pow_TensorScalar.input_t): +x_fail = { + "exp_minus_three": "TOSA constraints: If x == 0 and y ⇐ 0, the result is undefined.", + "exp_minus_one": "TOSA constraints: If x == 0 and y ⇐ 0, the result is undefined.", + "exp_zero": "TOSA constraints: If x == 0 and y ⇐ 0, the result is undefined.", + "exp_one": "TOSA constraints: If x == 0 and y ⇐ 0, the result is undefined.", + "exp_two": "TOSA constraints: If x == 0 and y ⇐ 0, the result is undefined.", + "non_neg_base_exp_pos_decimal": "TOSA constraints: If x == 0 and y ⇐ 0, the result is undefined.", +} + + +@common.parametrize("test_data", Pow_TensorScalar.test_data, x_fail, strict=False) +def test_pow_tensor_scalar_tosa_MI(test_data: Pow_TensorScalar.input_t): base, exp = test_data() pipeline = TosaPipelineMI[Pow_TensorScalar.input_t]( Pow_TensorScalar(exp), @@ -104,8 +120,8 @@ def test_pow_tensor_scalar_MI(test_data: Pow_TensorScalar.input_t): pipeline.run() -@common.parametrize("test_data", Pow_TensorScalar.test_data) -def test_pow_tensor_scalar_BI(test_data: Pow_TensorScalar.input_t): +@common.parametrize("test_data", Pow_TensorScalar.test_data, x_fail, strict=False) +def test_pow_tensor_scalar_tosa_BI(test_data: Pow_TensorScalar.input_t): base, exp = test_data() pipeline = TosaPipelineBI[Pow_TensorScalar.input_t]( Pow_TensorScalar(exp), diff --git a/backends/arm/test/ops/test_reciprocal.py b/backends/arm/test/ops/test_reciprocal.py index b3233d02a92..92a33346015 100644 --- a/backends/arm/test/ops/test_reciprocal.py +++ b/backends/arm/test/ops/test_reciprocal.py @@ -1,120 +1,91 @@ -# Copyright 2024 Arm Limited and/or its affiliates. -# All rights reserved. +# Copyright 2024-2025 Arm Limited and/or its affiliates. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. -import unittest -import pytest +from typing import Tuple import torch -from executorch.backends.arm.test import common, conftest -from executorch.backends.arm.test.tester.arm_tester import ArmTester -from parameterized import parameterized - -test_data_t = tuple[str, torch.Tensor] -test_data_suite: list[test_data_t] = [ - ( - "op_reciprocal_rank1_ones", - torch.ones(5), - ), - ( - "op_reciprocal_rank1_rand", - torch.rand(5) * 5, - ), - ("op_reciprocal_rank1_negative_ones", torch.ones(5) * (-1)), - ("op_reciprocal_rank4_ones", torch.ones(1, 10, 25, 20)), - ("op_reciprocal_rank4_negative_ones", (-1) * torch.ones(1, 10, 25, 20)), - ("op_reciprocal_rank4_ones_reciprocal_negative", torch.ones(1, 10, 25, 20)), - ("op_reciprocal_rank4_large_rand", 200 * torch.rand(1, 10, 25, 20)), - ("op_reciprocal_rank4_negative_large_rand", (-200) * torch.rand(1, 10, 25, 20)), - ("op_reciprocal_rank4_large_randn", 200 * torch.randn(1, 10, 25, 20) + 1), -] - - -class TestReciprocal(unittest.TestCase): - """Tests reciprocal""" - - class Reciprocal(torch.nn.Module): - - def forward(self, input_: torch.Tensor): - return input_.reciprocal() - - def _test_reciprocal_tosa_MI_pipeline( - self, module: torch.nn.Module, test_data: tuple[torch.Tensor] - ): - ( - ArmTester( - module, - example_inputs=test_data, - compile_spec=common.get_tosa_compile_spec("TOSA-0.80+MI"), - ) - .export() - .check_count({"torch.ops.aten.reciprocal.default": 1}) - .check_not(["torch.ops.quantized_decomposed"]) - .to_edge() - .partition() - .check_count({"torch.ops.higher_order.executorch_call_delegate": 1}) - .to_executorch() - .run_method_and_compare_outputs(inputs=test_data) - ) - - def _test_reciprocal_tosa_BI_pipeline( - self, module: torch.nn.Module, test_data: tuple[torch.Tensor] - ): - ( - ArmTester( - module, - example_inputs=test_data, - compile_spec=common.get_tosa_compile_spec("TOSA-0.80+BI"), - ) - .quantize() - .export() - .check_count({"torch.ops.aten.reciprocal.default": 1}) - .check(["torch.ops.quantized_decomposed"]) - .to_edge() - .partition() - .check_count({"torch.ops.higher_order.executorch_call_delegate": 1}) - .to_executorch() - .run_method_and_compare_outputs(inputs=test_data) - ) - - def _test_reciprocal_u55_BI_pipeline( - self, module: torch.nn.Module, test_data: tuple[torch.Tensor] - ): - tester = ( - ArmTester( - module, - example_inputs=test_data, - compile_spec=common.get_u55_compile_spec(), - ) - .quantize() - .export() - .check_count({"torch.ops.aten.reciprocal.default": 1}) - .check(["torch.ops.quantized_decomposed"]) - .to_edge() - .partition() - .check_count({"torch.ops.higher_order.executorch_call_delegate": 1}) - .to_executorch() - .serialize() - ) - if conftest.is_option_enabled("corstone_fvp"): - tester.run_method_and_compare_outputs(qtol=1, inputs=test_data) - - @parameterized.expand(test_data_suite) - def test_reciprocal_tosa_MI(self, test_name: str, input_: torch.Tensor): - test_data = (input_,) - self._test_reciprocal_tosa_MI_pipeline(self.Reciprocal(), test_data) - - @parameterized.expand(test_data_suite) - def test_reciprocal_tosa_BI(self, test_name: str, input_: torch.Tensor): - - test_data = (input_,) - self._test_reciprocal_tosa_BI_pipeline(self.Reciprocal(), test_data) - - @parameterized.expand(test_data_suite) - @pytest.mark.corstone_fvp - def test_reciprocal_u55_BI(self, test_name: str, input_: torch.Tensor): - test_data = (input_,) - self._test_reciprocal_u55_BI_pipeline(self.Reciprocal(), test_data) + +from executorch.backends.arm.test import common + +from executorch.backends.arm.test.tester.test_pipeline import ( + EthosU55PipelineBI, + EthosU85PipelineBI, + TosaPipelineBI, + TosaPipelineMI, +) + +input_t1 = Tuple[torch.Tensor] # Input x, Input y +aten_op = "torch.ops.aten.reciprocal.default" + +test_data_suite = { + "op_reciprocal_rank1_ones": lambda: torch.ones(5), + "op_reciprocal_rank1_rand": lambda: torch.rand(5) * 5, + "op_reciprocal_rank1_negative_ones": lambda: torch.ones(5) * (-1), + "op_reciprocal_rank4_ones": lambda: torch.ones(1, 10, 25, 20), + "op_reciprocal_rank4_negative_ones": lambda: (-1) * torch.ones(1, 10, 25, 20), + "op_reciprocal_rank4_ones_reciprocal_negative": lambda: torch.ones(1, 10, 25, 20), + "op_reciprocal_rank4_large_rand": lambda: 200 * torch.rand(1, 10, 25, 20), + "op_reciprocal_rank4_negative_large_rand": lambda: (-200) + * torch.rand(1, 10, 25, 20), + "op_reciprocal_rank4_large_randn": lambda: 200 * torch.randn(1, 10, 25, 20) + 1, +} + + +class Reciprocal(torch.nn.Module): + + def forward(self, input_: torch.Tensor): + return input_.reciprocal() + + +@common.parametrize("test_data", test_data_suite) +def test_reciprocal_tosa_MI(test_data: torch.Tensor): + pipeline = TosaPipelineMI[input_t1]( + Reciprocal(), + (test_data(),), + aten_op, + exir_op=[], + ) + pipeline.run() + + +@common.parametrize("test_data", test_data_suite) +def test_reciprocal_tosa_BI(test_data: torch.Tensor): + pipeline = TosaPipelineBI[input_t1]( + Reciprocal(), + (test_data(),), + aten_op, + exir_op=[], + symmetric_io_quantization=True, + ) + pipeline.run() + + +@common.parametrize("test_data", test_data_suite) +@common.XfailIfNoCorstone300 +def test_reciprocal_u55_BI(test_data: torch.Tensor): + pipeline = EthosU55PipelineBI[input_t1]( + Reciprocal(), + (test_data(),), + aten_op, + exir_ops=[], + run_on_fvp=False, + symmetric_io_quantization=True, + ) + pipeline.run() + + +@common.parametrize("test_data", test_data_suite) +@common.XfailIfNoCorstone320 +def test_reciprocal_u85_BI(test_data: torch.Tensor): + pipeline = EthosU85PipelineBI[input_t1]( + Reciprocal(), + (test_data(),), + aten_op, + exir_ops=[], + run_on_fvp=False, + symmetric_io_quantization=True, + ) + pipeline.run() diff --git a/backends/arm/test/ops/test_relu.py b/backends/arm/test/ops/test_relu.py index 3fc64c89be1..e27a65e76da 100644 --- a/backends/arm/test/ops/test_relu.py +++ b/backends/arm/test/ops/test_relu.py @@ -1,134 +1,91 @@ # Copyright (c) Meta Platforms, Inc. and affiliates. -# Copyright 2024-2025 Arm Limited and/or its affiliates. # All rights reserved. +# Copyright 2024-2025 Arm Limited and/or its affiliates. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. -import unittest from typing import Tuple import torch -from executorch.backends.arm.quantizer import ( - EthosUQuantizer, - get_symmetric_quantization_config, - TOSAQuantizer, -) from executorch.backends.arm.test import common -from executorch.backends.arm.test.tester.arm_tester import ArmTester -from executorch.backends.arm.tosa_specification import TosaSpecification -from executorch.backends.xnnpack.test.tester.tester import Quantize -from executorch.exir.backend.backend_details import CompileSpec -from parameterized import parameterized +from executorch.backends.arm.test.tester.test_pipeline import ( + EthosU55PipelineBI, + EthosU85PipelineBI, + TosaPipelineBI, + TosaPipelineMI, +) +input_t1 = Tuple[torch.Tensor] # Input x -test_data_suite = [ +aten_op = "torch.ops.aten.relu.default" +exir_op = "executorch_exir_dialects_edge__ops_aten_relu_default" + +test_data_suite = { # (test_name, test_data) - ("zeros", torch.zeros(1, 10, 10, 10)), - ("ones", torch.ones(10, 10, 10)), - ("rand", torch.rand(10, 10) - 0.5), - ("randn_pos", torch.randn(10) + 10), - ("randn_neg", torch.randn(10) - 10), - ("ramp", torch.arange(-16, 16, 0.2)), -] - - -class TestRelu(unittest.TestCase): - class Relu(torch.nn.Module): - def __init__(self): - super().__init__() - self.relu = torch.nn.ReLU() - - def forward(self, x): - return self.relu(x) - - def _test_relu_tosa_MI_pipeline( - self, module: torch.nn.Module, test_data: Tuple[torch.tensor] - ): - ( - ArmTester( - module, - example_inputs=test_data, - compile_spec=common.get_tosa_compile_spec("TOSA-0.80+MI"), - ) - .export() - .check(["torch.ops.aten.relu.default"]) - .check_not(["torch.ops.quantized_decomposed"]) - .to_edge() - .partition() - .check_not(["executorch_exir_dialects_edge__ops_aten_relu_default"]) - .check_count({"torch.ops.higher_order.executorch_call_delegate": 1}) - .to_executorch() - .run_method_and_compare_outputs(inputs=test_data) - ) - - def _test_relu_tosa_BI_pipeline( - self, module: torch.nn.Module, test_data: Tuple[torch.tensor] - ): - tosa_spec = TosaSpecification.create_from_string("TOSA-0.80+BI") - compile_spec = common.get_tosa_compile_spec(tosa_spec) - quantizer = TOSAQuantizer(tosa_spec).set_io(get_symmetric_quantization_config()) - ( - ArmTester(module, example_inputs=test_data, compile_spec=compile_spec) - .quantize(Quantize(quantizer, get_symmetric_quantization_config())) - .export() - .check_count({"torch.ops.aten.relu.default": 1}) - .check(["torch.ops.quantized_decomposed"]) - .to_edge() - .partition() - .check_not(["executorch_exir_dialects_edge__ops_aten_relu_default"]) - .check_count({"torch.ops.higher_order.executorch_call_delegate": 1}) - .to_executorch() - .run_method_and_compare_outputs(inputs=test_data) - ) - - def _test_relu_ethosu_BI_pipeline( - self, - compile_spec: CompileSpec, - module: torch.nn.Module, - test_data: Tuple[torch.tensor], - ): - quantizer = EthosUQuantizer(compile_spec).set_io( - get_symmetric_quantization_config() - ) - ( - ArmTester( - module, - example_inputs=test_data, - compile_spec=compile_spec, - ) - .quantize(Quantize(quantizer, get_symmetric_quantization_config())) - .export() - .check_count({"torch.ops.aten.relu.default": 1}) - .check(["torch.ops.quantized_decomposed"]) - .to_edge() - .partition() - .check_not(["executorch_exir_dialects_edge__ops_aten_relu_default"]) - .check_count({"torch.ops.higher_order.executorch_call_delegate": 1}) - .to_executorch() - ) - - @parameterized.expand(test_data_suite) - def test_relu_tosa_MI( - self, - test_name: str, - test_data: torch.Tensor, - ): - self._test_relu_tosa_MI_pipeline(self.Relu(), (test_data,)) - - @parameterized.expand(test_data_suite) - def test_relu_tosa_BI(self, test_name: str, test_data: torch.Tensor): - self._test_relu_tosa_BI_pipeline(self.Relu(), (test_data,)) - - @parameterized.expand(test_data_suite) - def test_relu_u55_BI(self, test_name: str, test_data: torch.Tensor): - self._test_relu_ethosu_BI_pipeline( - common.get_u55_compile_spec(), self.Relu(), (test_data,) - ) - - @parameterized.expand(test_data_suite) - def test_relu_u85_BI(self, test_name: str, test_data: torch.Tensor): - self._test_relu_ethosu_BI_pipeline( - common.get_u85_compile_spec(), self.Relu(), (test_data,) - ) + "zeros": lambda: torch.zeros(1, 10, 10, 10), + "ones": lambda: torch.ones(10, 10, 10), + "rand": lambda: torch.rand(10, 10) - 0.5, + "randn_pos": lambda: torch.randn(10) + 10, + "randn_neg": lambda: torch.randn(10) - 10, + "ramp": lambda: torch.arange(-16, 16, 0.2), +} + + +class Relu(torch.nn.Module): + def __init__(self): + super().__init__() + self.relu = torch.nn.ReLU() + + def forward(self, x): + return self.relu(x) + + +@common.parametrize("test_data", test_data_suite) +def test_relu_tosa_MI(test_data: torch.Tensor): + pipeline = TosaPipelineMI[input_t1]( + Relu(), + (test_data(),), + aten_op, + exir_op, + ) + pipeline.run() + + +@common.parametrize("test_data", test_data_suite) +def test_relu_tosa_BI(test_data: torch.Tensor): + pipeline = TosaPipelineBI[input_t1]( + Relu(), + (test_data(),), + aten_op, + exir_op, + symmetric_io_quantization=True, + ) + pipeline.run() + + +@common.parametrize("test_data", test_data_suite) +def test_relu_u55_BI(test_data: torch.Tensor): + pipeline = EthosU55PipelineBI[input_t1]( + Relu(), + (test_data(),), + aten_op, + exir_op, + run_on_fvp=False, + symmetric_io_quantization=True, + ) + pipeline.run() + + +@common.parametrize("test_data", test_data_suite) +def test_relu_u85_BI(test_data: torch.Tensor): + pipeline = EthosU85PipelineBI[input_t1]( + Relu(), + (test_data(),), + aten_op, + exir_op, + run_on_fvp=False, + symmetric_io_quantization=True, + ) + pipeline.run() diff --git a/backends/arm/test/ops/test_repeat.py b/backends/arm/test/ops/test_repeat.py index da2770cfafe..3a7a37196ec 100644 --- a/backends/arm/test/ops/test_repeat.py +++ b/backends/arm/test/ops/test_repeat.py @@ -1,5 +1,4 @@ # Copyright 2024-2025 Arm Limited and/or its affiliates. -# All rights reserved. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. @@ -8,115 +7,83 @@ # Tests the repeat op which copies the data of the input tensor (possibly with new data format) # -import unittest + from typing import Sequence, Tuple import torch -from executorch.backends.arm.quantizer import ( - EthosUQuantizer, - get_symmetric_quantization_config, - TOSAQuantizer, -) from executorch.backends.arm.test import common -from executorch.backends.arm.test.tester.arm_tester import ArmTester -from executorch.backends.arm.tosa_specification import TosaSpecification - -from executorch.backends.xnnpack.test.tester.tester import Quantize -from executorch.exir.backend.backend_details import CompileSpec -from parameterized import parameterized - - -class TestSimpleRepeat(unittest.TestCase): - """Tests Tensor.repeat for different ranks and dimensions.""" - - class Repeat(torch.nn.Module): - # (input tensor, multiples) - test_parameters = [ - (torch.randn(3), (2,)), - (torch.randn(3, 4), (2, 1)), - (torch.randn(1, 1, 2, 2), (1, 2, 3, 4)), - (torch.randn(3), (2, 2)), - (torch.randn(3), (1, 2, 3)), - (torch.randn((3, 3)), (2, 2, 2)), - (torch.randn((3, 3, 3)), (2, 1, 2, 4)), - ] - - def forward(self, x: torch.Tensor, multiples: Sequence): - return x.repeat(multiples) - - def _test_repeat_tosa_MI_pipeline(self, module: torch.nn.Module, test_data: Tuple): - ( - ArmTester( - module, - example_inputs=test_data, - compile_spec=common.get_tosa_compile_spec("TOSA-0.80+MI"), - ) - .export() - .check_count({"torch.ops.aten.repeat.default": 1}) - .to_edge() - .partition() - .check_not(["torch.ops.aten.repeat.default"]) - .check_count({"torch.ops.higher_order.executorch_call_delegate": 1}) - .to_executorch() - .run_method_and_compare_outputs(inputs=test_data) - ) - - def _test_repeat_tosa_BI_pipeline(self, module: torch.nn.Module, test_data: Tuple): - tosa_spec = TosaSpecification.create_from_string("TOSA-0.80+BI") - compile_spec = common.get_tosa_compile_spec(tosa_spec) - quantizer = TOSAQuantizer(tosa_spec).set_io(get_symmetric_quantization_config()) - ( - ArmTester(module, example_inputs=test_data, compile_spec=compile_spec) - .quantize(Quantize(quantizer, get_symmetric_quantization_config())) - .export() - .check_count({"torch.ops.aten.repeat.default": 1}) - .to_edge() - .partition() - .check_not(["torch.ops.aten.repeat.default"]) - .check_count({"torch.ops.higher_order.executorch_call_delegate": 1}) - .to_executorch() - .run_method_and_compare_outputs(inputs=test_data, qtol=1) - ) - - def _test_repeat_ethosu_pipeline( - self, compile_spec: CompileSpec, module: torch.nn.Module, test_data: Tuple - ): - quantizer = EthosUQuantizer(compile_spec).set_io( - get_symmetric_quantization_config() - ) - ( - ArmTester( - module, - example_inputs=test_data, - compile_spec=compile_spec, - ) - .quantize(Quantize(quantizer, get_symmetric_quantization_config())) - .export() - .check_count({"torch.ops.aten.repeat.default": 1}) - .to_edge() - .partition() - .check_not(["torch.ops.aten.repeat.default"]) - .check_count({"torch.ops.higher_order.executorch_call_delegate": 1}) - .to_executorch() - ) - - @parameterized.expand(Repeat.test_parameters) - def test_repeat_tosa_MI(self, test_input, multiples): - self._test_repeat_tosa_MI_pipeline(self.Repeat(), (test_input, multiples)) - - @parameterized.expand(Repeat.test_parameters) - def test_repeat_tosa_BI(self, test_input, multiples): - self._test_repeat_tosa_BI_pipeline(self.Repeat(), (test_input, multiples)) - - @parameterized.expand(Repeat.test_parameters) - def test_repeat_u55_BI(self, test_input, multiples): - self._test_repeat_ethosu_pipeline( - common.get_u55_compile_spec(), self.Repeat(), (test_input, multiples) - ) - - @parameterized.expand(Repeat.test_parameters) - def test_repeat_u85_BI(self, test_input, multiples): - self._test_repeat_ethosu_pipeline( - common.get_u85_compile_spec(), self.Repeat(), (test_input, multiples) - ) +from executorch.backends.arm.test.tester.test_pipeline import ( + EthosU55PipelineBI, + EthosU85PipelineBI, + TosaPipelineBI, + TosaPipelineMI, +) + +input_t1 = Tuple[torch.Tensor, torch.Tensor] # Input x, Input y +aten_op = "torch.ops.aten.repeat.default" + + +"""Tests Tensor.repeat for different ranks and dimensions.""" + + +class Repeat(torch.nn.Module): + # (input tensor, multiples) + test_parameters = { + "1_x_1": lambda: (torch.randn(3), (2,)), + "2_x_2": lambda: (torch.randn(3, 4), (2, 1)), + "4_x_4": lambda: (torch.randn(1, 1, 2, 2), (1, 2, 3, 4)), + "1_x_2": lambda: (torch.randn(3), (2, 2)), + "1_x_3": lambda: (torch.randn(3), (1, 2, 3)), + "2_x_3": lambda: (torch.randn((3, 3)), (2, 2, 2)), + "1_x_4": lambda: (torch.randn((3, 3, 3)), (2, 1, 2, 4)), + } + + def forward(self, x: torch.Tensor, multiples: Sequence): + return x.repeat(multiples) + + +@common.parametrize("test_data", Repeat.test_parameters) +def test_repeat_tosa_MI(test_data: Tuple): + pipeline = TosaPipelineMI[input_t1]( + Repeat(), + test_data(), + aten_op, + exir_op=[], + ) + pipeline.run() + + +@common.parametrize("test_data", Repeat.test_parameters) +def test_repeat_tosa_BI(test_data: Tuple): + pipeline = TosaPipelineBI[input_t1]( + Repeat(), + test_data(), + aten_op, + exir_op=[], + ) + pipeline.run() + + +@common.parametrize("test_data", Repeat.test_parameters) +def test_repeat_u55_BI(test_data: Tuple): + pipeline = EthosU55PipelineBI[input_t1]( + Repeat(), + test_data(), + aten_op, + exir_ops=[], + run_on_fvp=False, + ) + pipeline.run() + + +@common.parametrize("test_data", Repeat.test_parameters) +def test_repeat_u85_BI(test_data: Tuple): + pipeline = EthosU85PipelineBI[input_t1]( + Repeat(), + test_data(), + aten_op, + exir_ops=[], + run_on_fvp=False, + ) + pipeline.run() diff --git a/backends/arm/test/ops/test_rshift.py b/backends/arm/test/ops/test_rshift.py index 52c05d48038..2e11cee5183 100644 --- a/backends/arm/test/ops/test_rshift.py +++ b/backends/arm/test/ops/test_rshift.py @@ -4,6 +4,7 @@ # LICENSE file in the root directory of this source tree. import torch +from executorch.backends.arm.test import common from executorch.backends.arm.test.common import ( XfailIfNoCorstone300, XfailIfNoCorstone320, @@ -14,7 +15,6 @@ TosaPipelineBI, TosaPipelineMI, ) -from parameterized import parameterized scalar_input_t = tuple[torch.Tensor, int] @@ -23,11 +23,20 @@ class RshiftScalar(torch.nn.Module): torch_op_MI = "torch.ops.aten.__rshift__.Scalar" torch_op_BI = "torch.ops.aten.bitwise_right_shift.Tensor" exir_op = "executorch_exir_dialects_edge__ops_aten_bitwise_right_shift_Tensor" - test_data = [ - ((torch.randint(-100, 100, (1, 12, 3, 4), dtype=torch.int8), 1),), - ((torch.randint(-100, 100, (1, 5, 3, 4), dtype=torch.int16), 5),), - ((torch.randint(-100, 100, (1, 5, 3, 4), dtype=torch.int32), 2),), - ] + test_data = { + "randint_neg_100_int8": lambda: ( + torch.randint(-100, 100, (1, 12, 3, 4), dtype=torch.int8), + 1, + ), + "randint_neg_100_int16": lambda: ( + torch.randint(-100, 100, (1, 5, 3, 4), dtype=torch.int16), + 5, + ), + "randint_neg_100_int32": lambda: ( + torch.randint(-100, 100, (1, 5, 3, 4), dtype=torch.int32), + 2, + ), + } def forward(self, x: torch.Tensor, shift: int): return x >> shift @@ -39,53 +48,53 @@ def forward(self, x: torch.Tensor, shift: int): class RshiftTensor(torch.nn.Module): torch_op = "torch.ops.aten.bitwise_right_shift.Tensor" exir_op = "executorch_exir_dialects_edge__ops_aten_bitwise_right_shift_Tensor" - test_data = [ - ( - ( - torch.randint(-128, 127, (3, 3), dtype=torch.int8), - torch.randint(0, 5, (3, 3), dtype=torch.int8), - ), + test_data = { + "randint_neg_128_int8": lambda: ( + torch.randint(-128, 127, (3, 3), dtype=torch.int8), + torch.randint(0, 5, (3, 3), dtype=torch.int8), ), - ( - ( - torch.randint(-1024, 1024, (3, 3, 3), dtype=torch.int16), - torch.randint(0, 5, (3, 3, 3), dtype=torch.int16), - ), + "randint_neg_1024_int16": lambda: ( + torch.randint(-1024, 1024, (3, 3, 3), dtype=torch.int16), + torch.randint(0, 5, (3, 3, 3), dtype=torch.int16), ), - ( - ( - torch.randint(0, 127, (1, 2, 3, 3), dtype=torch.int32), - torch.randint(0, 5, (1, 2, 3, 3), dtype=torch.int32), - ), + "randint_0_127_int32": lambda: ( + torch.randint(0, 127, (1, 2, 3, 3), dtype=torch.int32), + torch.randint(0, 5, (1, 2, 3, 3), dtype=torch.int32), ), - ] + } def forward(self, x: torch.Tensor, shift: torch.Tensor): return x.bitwise_right_shift(shift) -@parameterized.expand(RshiftScalar.test_data) -def test_rshift_scalar_tosa_MI(test_data): +@common.parametrize("test_data", RshiftScalar.test_data) +def test_rshift_scalar_tosa_MI_scalar(test_data): TosaPipelineMI[scalar_input_t]( - RshiftScalar(), test_data, RshiftScalar.torch_op_MI, RshiftScalar.exir_op + RshiftScalar(), + test_data(), + RshiftScalar.torch_op_MI, + RshiftScalar.exir_op, ).run() -@parameterized.expand(RshiftScalar.test_data) -def test_rshift_scalar_tosa_BI(test_data): +@common.parametrize("test_data", RshiftScalar.test_data) +def test_bitwise_right_shift_tensor_tosa_BI_scalar(test_data): pipeline = TosaPipelineBI[scalar_input_t]( - RshiftScalar(), test_data, RshiftScalar.torch_op_BI, RshiftScalar.exir_op + RshiftScalar(), + test_data(), + RshiftScalar.torch_op_BI, + RshiftScalar.exir_op, ) pipeline.pop_stage("check.quant_nodes") pipeline.run() -@parameterized.expand(RshiftScalar.test_data) +@common.parametrize("test_data", RshiftScalar.test_data) @XfailIfNoCorstone300 -def test_rshift_scalar_tosa_u55(test_data): +def test_bitwise_right_shift_tensor_u55_BI_scalar(test_data): pipeline = EthosU55PipelineBI[scalar_input_t]( RshiftScalar(), - test_data, + test_data(), RshiftScalar.torch_op_BI, RshiftScalar.exir_op, run_on_fvp=True, @@ -93,16 +102,16 @@ def test_rshift_scalar_tosa_u55(test_data): pipeline.pop_stage("check.quant_nodes") # Forced rounding in U55 HW causes off-by-one errors. - pipeline.change_args("run_method_and_compare_outputs", inputs=test_data, atol=1) + pipeline.change_args("run_method_and_compare_outputs", inputs=test_data(), atol=1) pipeline.run() -@parameterized.expand(RshiftScalar.test_data) +@common.parametrize("test_data", RshiftScalar.test_data) @XfailIfNoCorstone320 -def test_rshift_scalar_tosa_u85(test_data): +def test_bitwise_right_shift_tensor_u85_BI_scalar(test_data): pipeline = EthosU85PipelineBI[scalar_input_t]( RshiftScalar(), - test_data, + test_data(), RshiftScalar.torch_op_BI, RshiftScalar.exir_op, run_on_fvp=True, @@ -111,28 +120,34 @@ def test_rshift_scalar_tosa_u85(test_data): pipeline.run() -@parameterized.expand(RshiftTensor.test_data) -def test_rshift_tensor_tosa_MI(test_data): +@common.parametrize("test_data", RshiftTensor.test_data) +def test_rshift_scalar_tosa_MI(test_data): TosaPipelineMI[scalar_input_t]( - RshiftTensor(), test_data, RshiftTensor.torch_op, RshiftTensor.exir_op + RshiftTensor(), + test_data(), + RshiftTensor.torch_op, + RshiftTensor.exir_op, ).run() -@parameterized.expand(RshiftTensor.test_data) -def test_rshift_tensor_tosa_BI(test_data): +@common.parametrize("test_data", RshiftTensor.test_data) +def test_bitwise_right_shift_tensor_tosa_BI(test_data): pipeline = TosaPipelineBI[scalar_input_t]( - RshiftTensor(), test_data, RshiftTensor.torch_op, RshiftTensor.exir_op + RshiftTensor(), + test_data(), + RshiftTensor.torch_op, + RshiftTensor.exir_op, ) pipeline.pop_stage("check.quant_nodes") pipeline.run() -@parameterized.expand(RshiftTensor.test_data) +@common.parametrize("test_data", RshiftTensor.test_data) @XfailIfNoCorstone300 -def test_rshift_tensor_tosa_u55(test_data): +def test_bitwise_right_shift_tensor_u55_BI(test_data): pipeline = EthosU55PipelineBI[scalar_input_t]( RshiftTensor(), - test_data, + test_data(), RshiftTensor.torch_op, RshiftTensor.exir_op, run_on_fvp=True, @@ -140,16 +155,16 @@ def test_rshift_tensor_tosa_u55(test_data): pipeline.pop_stage("check.quant_nodes") # Forced rounding in U55 HW causes off-by-one errors. - pipeline.change_args("run_method_and_compare_outputs", inputs=test_data, atol=1) + pipeline.change_args("run_method_and_compare_outputs", inputs=test_data(), atol=1) pipeline.run() -@parameterized.expand(RshiftTensor.test_data) +@common.parametrize("test_data", RshiftTensor.test_data) @XfailIfNoCorstone320 -def test_rshift_tensor_tosa_u85(test_data): +def test_bitwise_right_shift_tensor_u85_BI(test_data): pipeline = EthosU85PipelineBI[scalar_input_t]( RshiftTensor(), - test_data, + test_data(), RshiftTensor.torch_op, RshiftTensor.exir_op, run_on_fvp=True, diff --git a/backends/arm/test/ops/test_rsqrt.py b/backends/arm/test/ops/test_rsqrt.py index 2bf5fc371c8..0a9e95d890e 100644 --- a/backends/arm/test/ops/test_rsqrt.py +++ b/backends/arm/test/ops/test_rsqrt.py @@ -1,5 +1,4 @@ -# Copyright 2024 Arm Limited and/or its affiliates. -# All rights reserved. +# Copyright 2024-2025 Arm Limited and/or its affiliates. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. @@ -7,101 +6,78 @@ # Tests the rsqrt op. # -import unittest +from typing import Tuple import torch + from executorch.backends.arm.test import common -from executorch.backends.arm.test.tester.arm_tester import ArmTester -from executorch.exir.backend.compile_spec_schema import CompileSpec -from parameterized import parameterized - - -class TestRsqrt(unittest.TestCase): - class Rsqrt(torch.nn.Module): - test_parameters = [ - (torch.ones(1, 10, 10, 10),), - (torch.rand(1, 10, 10, 10),), - (torch.rand(1, 5, 10, 20),), - (torch.rand(5, 10, 20),), - ] - - def forward(self, x: torch.Tensor): - return x.rsqrt() - - def _test_rsqrt_tosa_MI_pipeline( - self, module: torch.nn.Module, test_data: tuple[torch.Tensor] - ): - ( - ArmTester( - module, - example_inputs=test_data, - compile_spec=common.get_tosa_compile_spec("TOSA-0.80+MI"), - ) - .export() - .check_count({"torch.ops.aten.rsqrt.default": 1}) - .to_edge() - .partition() - .check_count({"torch.ops.higher_order.executorch_call_delegate": 1}) - .to_executorch() - .run_method_and_compare_outputs(inputs=test_data) - ) - - def _test_rsqrt_tosa_BI_pipeline( - self, module: torch.nn.Module, test_data: tuple[torch.Tensor] - ): - ( - ArmTester( - module, - example_inputs=test_data, - compile_spec=common.get_tosa_compile_spec("TOSA-0.80+BI"), - ) - .quantize() - .export() - .check_count({"torch.ops.aten.rsqrt.default": 1}) - .to_edge() - .partition() - .check_count({"torch.ops.higher_order.executorch_call_delegate": 1}) - .to_executorch() - .run_method_and_compare_outputs(inputs=test_data) - ) - - def _test_rsqrt_ethosu_BI_pipeline( - self, - compile_spec: CompileSpec, - module: torch.nn.Module, - test_data: tuple[torch.Tensor], - ): - ( - ArmTester( - module, - example_inputs=test_data, - compile_spec=compile_spec, - ) - .quantize() - .export() - .check_count({"torch.ops.aten.rsqrt.default": 1}) - .to_edge() - .partition() - .check_count({"torch.ops.higher_order.executorch_call_delegate": 1}) - .to_executorch() - ) - - @parameterized.expand(Rsqrt.test_parameters) - def test_rsqrt_tosa_MI(self, test_tensor: torch.Tensor): - self._test_rsqrt_tosa_MI_pipeline(self.Rsqrt(), (test_tensor,)) - - @parameterized.expand(Rsqrt.test_parameters) - def test_rsqrt_tosa_BI(self, test_tensor: torch.Tensor): - self._test_rsqrt_tosa_BI_pipeline(self.Rsqrt(), (test_tensor,)) - - @parameterized.expand(Rsqrt.test_parameters) - def test_rsqrt_u55_BI(self, test_tensor: torch.Tensor): - self._test_rsqrt_ethosu_BI_pipeline( - common.get_u55_compile_spec(), self.Rsqrt(), (test_tensor,) - ) - - @parameterized.expand(Rsqrt.test_parameters) - def test_rsqrt_u85_BI(self, test_tensor: torch.Tensor): - self._test_rsqrt_ethosu_BI_pipeline( - common.get_u85_compile_spec(), self.Rsqrt(), (test_tensor,) - ) +from executorch.backends.arm.test.tester.test_pipeline import ( + EthosU55PipelineBI, + EthosU85PipelineBI, + TosaPipelineBI, + TosaPipelineMI, +) + + +aten_op = "torch.ops.aten.rsqrt.default" +input_t1 = Tuple[torch.Tensor] # Input x + + +class Rsqrt(torch.nn.Module): + test_parameters = { + "ones_4d": lambda: (torch.ones(1, 10, 10, 10),), + "rand_4d_1": lambda: (torch.rand(1, 10, 10, 10),), + "rand_4d_2": lambda: (torch.rand(1, 5, 10, 20),), + "rand_3d": lambda: (torch.rand(5, 10, 20),), + } + + def forward(self, x: torch.Tensor): + return x.rsqrt() + + +@common.parametrize("test_tensor", Rsqrt.test_parameters) +def test_rsqrt_tosa_MI(test_tensor: torch.Tensor): + pipeline = TosaPipelineMI[input_t1]( + Rsqrt(), + test_tensor(), + aten_op, + exir_op=[], + ) + pipeline.run() + + +@common.parametrize("test_tensor", Rsqrt.test_parameters) +def test_rsqrt_tosa_BI(test_tensor: torch.Tensor): + pipeline = TosaPipelineBI[input_t1]( + Rsqrt(), + test_tensor(), + aten_op, + exir_op=[], + ) + pipeline.run() + + +@common.parametrize("test_tensor", Rsqrt.test_parameters) +@common.XfailIfNoCorstone300 +def test_rsqrt_u55_BI(test_tensor: torch.Tensor): + pipeline = EthosU55PipelineBI[input_t1]( + Rsqrt(), + test_tensor(), + aten_op, + exir_ops=[], + run_on_fvp=True, + ) + pipeline.run() + + +@common.parametrize("test_tensor", Rsqrt.test_parameters) +@common.XfailIfNoCorstone320 +def test_rsqrt_u85_BI(test_tensor: torch.Tensor): + pipeline = EthosU85PipelineBI[input_t1]( + Rsqrt(), + test_tensor(), + aten_op, + exir_ops=[], + run_on_fvp=True, + ) + pipeline.run() diff --git a/backends/arm/test/ops/test_scalar_tensor.py b/backends/arm/test/ops/test_scalar_tensor.py index ad9d385c1d1..7b050f7787e 100644 --- a/backends/arm/test/ops/test_scalar_tensor.py +++ b/backends/arm/test/ops/test_scalar_tensor.py @@ -14,22 +14,22 @@ ) float_test_data_suite = { - "scalar_tensor_float_1": (3.7, torch.float32, torch.rand((1, 2, 3, 4))), - "scalar_tensor_float_2": (66, torch.float32, torch.rand((1, 2, 3))), + "scalar_tensor_float_1": lambda: (3.7, torch.float32, torch.rand((1, 2, 3, 4))), + "scalar_tensor_float_2": lambda: (66, torch.float32, torch.rand((1, 2, 3))), } int_test_data_suite = { - "scalar_tensor_int32": ( + "scalar_tensor_int32": lambda: ( 33, torch.int32, torch.randint(0, 10, (1, 2), dtype=torch.int32), ), - "scalar_tensor_int8": ( + "scalar_tensor_int8": lambda: ( 8, torch.int8, torch.rand(1, 2, 3), ), - "scalar_tensor_int16": ( + "scalar_tensor_int16": lambda: ( 16 * 16 * 16, torch.int16, torch.rand((1,)).unsqueeze(0), # Rank 0 inputs not supported @@ -49,17 +49,29 @@ def forward(self, x: torch.Tensor): return torch.scalar_tensor(self.scalar, dtype=self.dtype) + x -@common.parametrize("test_data", int_test_data_suite | float_test_data_suite) +@common.parametrize( + "test_data", + int_test_data_suite | float_test_data_suite, +) def test_scalar_tensor_tosa_MI(test_data): # Note TOSA MI supports all types - scalar, dtype, data = test_data - TosaPipelineMI(ScalarTensor(scalar, dtype), tuple(data), ScalarTensor.aten_op).run() + scalar, dtype, data = test_data() + TosaPipelineMI( + ScalarTensor(scalar, dtype), + tuple(data), + ScalarTensor.aten_op, + ).run() -@common.parametrize("test_data", int_test_data_suite | float_test_data_suite) +@common.parametrize( + "test_data", + int_test_data_suite | float_test_data_suite, +) def test_scalar_tensor_tosa_BI(test_data): - scalar, dtype, data = test_data + scalar, dtype, data = test_data() pipeline: TosaPipelineBI = TosaPipelineBI( - ScalarTensor(scalar, dtype), tuple(data), ScalarTensor.aten_op + ScalarTensor(scalar, dtype), + tuple(data), + ScalarTensor.aten_op, ) pipeline.pop_stage("check.quant_nodes") pipeline.run() @@ -67,8 +79,8 @@ def test_scalar_tensor_tosa_BI(test_data): @common.parametrize("test_data", float_test_data_suite) @common.XfailIfNoCorstone300 -def test_scalar_tensor_tosa_u55(test_data): - scalar, dtype, data = test_data +def test_scalar_tensor_u55_BI(test_data): + scalar, dtype, data = test_data() EthosU55PipelineBI( ScalarTensor(scalar, dtype), tuple(data), @@ -80,8 +92,8 @@ def test_scalar_tensor_tosa_u55(test_data): @common.parametrize("test_data", float_test_data_suite) @common.XfailIfNoCorstone320 -def test_scalar_tensor_tosa_u85(test_data): - scalar, dtype, data = test_data +def test_scalar_tensor_u85_BI(test_data): + scalar, dtype, data = test_data() EthosU85PipelineBI( ScalarTensor(scalar, dtype), tuple(data), diff --git a/backends/arm/test/ops/test_scalars.py b/backends/arm/test/ops/test_scalars.py index 97af070120b..a4748e93fdb 100644 --- a/backends/arm/test/ops/test_scalars.py +++ b/backends/arm/test/ops/test_scalars.py @@ -3,13 +3,12 @@ # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. -import unittest from typing import Tuple -import common import torch +from executorch.backends.arm.test import common from executorch.backends.arm.test.tester.test_pipeline import ( TosaPipelineBI, TosaPipelineMI, @@ -32,90 +31,103 @@ input_t1 = Tuple[torch.Tensor, torch.scalar_tensor] # Input x, Input y -class TestScalars(unittest.TestCase): - """Tests various scalar cases""" +"""Tests various scalar cases""" - class Add(torch.nn.Module): - def forward(self, x, y): - return x + y - class Sub(torch.nn.Module): - def forward(self, x, y): - return x - y +class Add(torch.nn.Module): + def forward(self, x, y): + return x + y - class Div(torch.nn.Module): - def forward(self, x, y): - return x / y - class Mul(torch.nn.Module): - def forward(self, x, y): - return x * y +class Sub(torch.nn.Module): + def forward(self, x, y): + return x - y - class MulScalar(torch.nn.Module): - def forward(self, x, y): - return torch.ops.aten.mul.Scalar(x, y) - class DivScalar(torch.nn.Module): - def forward(self, x, y): - return torch.ops.aten.div.Scalar(x, y) +class Div(torch.nn.Module): + def forward(self, x, y): + return x / y - class AddScalar(torch.nn.Module): - def forward(self, x, y): - return torch.ops.aten.add.Scalar(x, y) - class SubScalar(torch.nn.Module): - def forward(self, x, y): - return torch.ops.aten.sub.Scalar(x, y) +class Mul(torch.nn.Module): + def forward(self, x, y): + return x * y - class AddInplace(torch.nn.Module): - def forward(self, x, y): - x += y - return x - class SubInplace(torch.nn.Module): - def forward(self, x, y): - x -= y - return x +class MulScalar(torch.nn.Module): + def forward(self, x, y): + return torch.ops.aten.mul.Scalar(x, y) - class DivInplace(torch.nn.Module): - def forward(self, x, y): - x /= y - return x - class MulInplace(torch.nn.Module): - def forward(self, x, y): - x *= y - return x +class DivScalar(torch.nn.Module): + def forward(self, x, y): + return torch.ops.aten.div.Scalar(x, y) - class AddConst(torch.nn.Module): - def forward(self, x): - x = 1.0 + x - return x - class ShiftInplaceSub(torch.nn.Module): - def forward(self, x): - x = x >> 4 - x -= 10 - return x +class AddScalar(torch.nn.Module): + def forward(self, x, y): + return torch.ops.aten.add.Scalar(x, y) + + +class SubScalar(torch.nn.Module): + def forward(self, x, y): + return torch.ops.aten.sub.Scalar(x, y) + + +class AddInplace(torch.nn.Module): + def forward(self, x, y): + x += y + return x + + +class SubInplace(torch.nn.Module): + def forward(self, x, y): + x -= y + return x + + +class DivInplace(torch.nn.Module): + def forward(self, x, y): + x /= y + return x + + +class MulInplace(torch.nn.Module): + def forward(self, x, y): + x *= y + return x + + +class AddConst(torch.nn.Module): + def forward(self, x): + x = 1.0 + x + return x + + +class ShiftInplaceSub(torch.nn.Module): + def forward(self, x): + x = x >> 4 + x -= 10 + return x # Inplace ops end with '_' (from aten naming) ops = [ - ("Add", TestScalars.Add()), - ("Sub", TestScalars.Sub()), - ("Mul", TestScalars.Mul()), - ("Div", TestScalars.Div()), - ("Add_", TestScalars.AddInplace()), - ("Sub_", TestScalars.SubInplace()), - ("Mul_", TestScalars.MulInplace()), - ("Div_", TestScalars.DivInplace()), - ("MulScalar", TestScalars.MulScalar()), - ("DivScalar", TestScalars.DivScalar()), - ("AddScalar", TestScalars.AddScalar()), - ("SubScalar", TestScalars.SubScalar()), + ("Add", Add()), + ("Sub", Sub()), + ("Mul", Mul()), + ("Div", Div()), + ("Add_", AddInplace()), + ("Sub_", SubInplace()), + ("Mul_", MulInplace()), + ("Div_", DivInplace()), + ("MulScalar", MulScalar()), + ("DivScalar", DivScalar()), + ("AddScalar", AddScalar()), + ("SubScalar", SubScalar()), ] -const_ops = [("Add", TestScalars.AddConst())] +const_ops = [("Add", AddConst())] dtypes = [("int", 3), ("float", 3.0)] sizes = [("r1", (1)), ("r4", (2, 4, 5, 3))] @@ -198,16 +210,18 @@ def _test_add_tosa_BI_pipeline( } -@common.parametrize("tensor_scalar_tests", tensor_scalar_tests, MI_xfails) -def test_MI(tensor_scalar_tests: list): +@common.parametrize( + "tensor_scalar_tests", + tensor_scalar_tests, + MI_xfails, +) +def test_tosa_MI(tensor_scalar_tests: list): op, x, y = tensor_scalar_tests _test_add_tosa_MI_pipeline(op, (x, y)) def _test_passes_tosa_BI_pipeline(module: torch.nn.Module, test_data: tuple): - pipeline = TransformAnnotationPassPipeline[input_t1]( - module, test_data, tosa_version="TOSA-0.80+BI" - ) + pipeline = TransformAnnotationPassPipeline[input_t1](module, test_data) pipeline.run() @@ -221,39 +235,47 @@ def _test_passes_tosa_BI_pipeline(module: torch.nn.Module, test_data: tuple): @common.parametrize( - "tensor_scalar_tests", tensor_scalar_tests, passes_xfails, strict=False + "tensor_scalar_tests", + tensor_scalar_tests, + passes_xfails, + strict=False, ) -def test_passes_BI(tensor_scalar_tests: list): +def test_scalars_tosa_BI_passes(tensor_scalar_tests: list): op, x, y = tensor_scalar_tests _test_passes_tosa_BI_pipeline(op, (x, y)) # op(Scalar float, tensor) works if the scalar is constant. @common.parametrize("tensor_const_tests", tensor_const_tests) -def test_MI_const(tensor_const_tests: list): +def test_scalars_tosa_MI(tensor_const_tests: list): op, x = tensor_const_tests _test_add_tosa_MI_pipeline(op, (x,)) @common.parametrize("tensor_scalar_tests", tensor_scalar_tests) -def test_BI(tensor_scalar_tests: list): +def test_scalars_tosa_BI(tensor_scalar_tests: list): op, x, y = tensor_scalar_tests _test_add_tosa_BI_pipeline(op, (x, y)) # op(Scalar float, tensor) works if the scalar is constant. @common.parametrize("tensor_const_tests", tensor_const_tests) -def test_BI_const(tensor_const_tests: list): +def test_scalars_tosa_BI_const(tensor_const_tests: list): op, x = tensor_const_tests _test_add_tosa_BI_pipeline(op, (x,)) def test_shift_sub_inplace_tosa_MI(): - _test_add_tosa_MI_pipeline(TestScalars.ShiftInplaceSub(), (torch.IntTensor(5),)) + _test_add_tosa_MI_pipeline( + ShiftInplaceSub(), + (torch.IntTensor(5),), + ) # Do not check for quant nodes in the graph for rshift. def test_shift_sub_inplace_tosa_BI(): _test_add_tosa_BI_pipeline( - TestScalars.ShiftInplaceSub(), (torch.IntTensor(5),), check_quant_nodes=False + ShiftInplaceSub(), + (torch.IntTensor(5),), + check_quant_nodes=False, ) diff --git a/backends/arm/test/ops/test_select.py b/backends/arm/test/ops/test_select.py index fbeb4ebf9e7..a0b72942d44 100644 --- a/backends/arm/test/ops/test_select.py +++ b/backends/arm/test/ops/test_select.py @@ -1,182 +1,157 @@ # Copyright (c) Meta Platforms, Inc. and affiliates. # Copyright 2024-2025 Arm Limited and/or its affiliates. -# All rights reserved. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. -import unittest + +from typing import Tuple import torch from executorch.backends.arm.test import common -from executorch.backends.arm.test.tester.arm_tester import ArmTester -from executorch.exir.backend.compile_spec_schema import CompileSpec -from parameterized import parameterized +from executorch.backends.arm.test.tester.test_pipeline import ( + EthosU55PipelineBI, + EthosU85PipelineBI, + TosaPipelineBI, + TosaPipelineMI, +) -test_data_t = tuple[torch.Tensor, int, int] +input_t1 = Tuple[torch.Tensor, int, int] -test_data_suite: list[tuple[test_data_t]] = [ +test_data_suite = { # (test_data, dim, index) - ((torch.zeros(5, 3, 20), -1, 0),), - ((torch.rand(5, 3, 20), 0, -1),), - ((torch.zeros(5, 3, 20), 0, 4),), - ((torch.ones(10, 10, 10), 0, 2),), - ((torch.rand(5, 3, 20, 2), 0, 2),), - ((torch.rand(10, 10) - 0.5, 0, 0),), - ((torch.randn(10) + 10, 0, 1),), - ((torch.randn(10) - 10, 0, 2),), - ((torch.arange(-16, 16, 0.2), 0, 1),), -] - - -class TestSelect(unittest.TestCase): - class SelectCopy(torch.nn.Module): - def __init__(self): - super().__init__() - - def forward(self, x, dim: int, index: int): - return torch.select_copy(x, dim=dim, index=index) - - class SelectInt(torch.nn.Module): - def __init__(self): - super().__init__() - - def forward(self, x, dim: int, index: int): - return torch.select(x, dim=dim, index=index) - - def _test_select_tosa_MI_pipeline( - self, - module: torch.nn.Module, - test_data: test_data_t, - export_target: str, - ): - ( - ArmTester( - module, - example_inputs=test_data, - compile_spec=common.get_tosa_compile_spec("TOSA-0.80+MI"), - ) - .export() - .check([export_target]) - .check_not(["torch.ops.quantized_decomposed"]) - .to_edge() - .partition() - .check_count({"torch.ops.higher_order.executorch_call_delegate": 1}) - .to_executorch() - .run_method_and_compare_outputs(inputs=test_data) - ) - - def _test_select_tosa_BI_pipeline( - self, - module: torch.nn.Module, - test_data: test_data_t, - export_target: str, - ): - ( - ArmTester( - module, - example_inputs=test_data, - compile_spec=common.get_tosa_compile_spec("TOSA-0.80+BI"), - ) - .quantize() - .export() - .check([export_target]) - .check(["torch.ops.quantized_decomposed"]) - .to_edge() - .partition() - .check_count({"torch.ops.higher_order.executorch_call_delegate": 1}) - .to_executorch() - .run_method_and_compare_outputs(inputs=test_data) - ) - - def _test_select_ethos_BI_pipeline( - self, - compile_spec: list[CompileSpec], - module: torch.nn.Module, - test_data: test_data_t, - export_target: str, - ): - ( - ArmTester( - module, - example_inputs=test_data, - compile_spec=compile_spec, - ) - .quantize() - .export() - .check([export_target]) - .check(["torch.ops.quantized_decomposed"]) - .to_edge() - .partition() - .check_count({"torch.ops.higher_order.executorch_call_delegate": 1}) - .to_executorch() - ) - - def _test_select_tosa_u55_BI_pipeline( - self, module: torch.nn.Module, test_data: test_data_t, export_target: str - ): - self._test_select_ethos_BI_pipeline( - common.get_u55_compile_spec(), - module, - test_data, - export_target, - ) - - def _test_select_tosa_u85_BI_pipeline( - self, module: torch.nn.Module, test_data: test_data_t, export_target: str - ): - self._test_select_ethos_BI_pipeline( - common.get_u85_compile_spec(), - module, - test_data, - export_target, - ) - - @parameterized.expand(test_data_suite) - def test_select_copy_tosa_MI(self, test_data: test_data_t): - self._test_select_tosa_MI_pipeline( - self.SelectCopy(), test_data, export_target="torch.ops.aten.select_copy.int" - ) - - @parameterized.expand(test_data_suite) - def test_select_int_tosa_MI(self, test_data: test_data_t): - self._test_select_tosa_MI_pipeline( - self.SelectInt(), test_data, export_target="torch.ops.aten.select.int" - ) - - @parameterized.expand(test_data_suite) - def test_select_copy_tosa_BI(self, test_data: test_data_t): - self._test_select_tosa_BI_pipeline( - self.SelectCopy(), test_data, export_target="torch.ops.aten.select_copy.int" - ) - - @parameterized.expand(test_data_suite) - def test_select_int_tosa_BI(self, test_data: test_data_t): - self._test_select_tosa_BI_pipeline( - self.SelectInt(), test_data, export_target="torch.ops.aten.select.int" - ) - - @parameterized.expand(test_data_suite) - def test_select_copy_tosa_u55_BI(self, test_data: test_data_t): - self._test_select_tosa_u55_BI_pipeline( - self.SelectCopy(), test_data, export_target="torch.ops.aten.select_copy.int" - ) - - @parameterized.expand(test_data_suite) - def test_select_int_tosa_u55_BI(self, test_data: test_data_t): - self._test_select_tosa_u55_BI_pipeline( - self.SelectInt(), test_data, export_target="torch.ops.aten.select.int" - ) - - @parameterized.expand(test_data_suite) - def test_select_copy_tosa_u85_BI(self, test_data: test_data_t): - self._test_select_tosa_u85_BI_pipeline( - self.SelectCopy(), test_data, export_target="torch.ops.aten.select_copy.int" - ) - - @parameterized.expand(test_data_suite) - def test_select_int_tosa_u85_BI(self, test_data: test_data_t): - self._test_select_tosa_u85_BI_pipeline( - self.SelectInt(), test_data, export_target="torch.ops.aten.select.int" - ) + "select3d_neg_1_dim_0_index": lambda: (torch.zeros(5, 3, 20), -1, 0), + "select3d_0_dim_neg_1_index": lambda: (torch.rand(5, 3, 20), 0, -1), + "select3d_0_dim_4_index": lambda: (torch.zeros(5, 3, 20), 0, 4), + "select3d_0_dim_2_index": lambda: (torch.ones(10, 10, 10), 0, 2), + "select4d_0_dim_2_index": lambda: (torch.rand(5, 3, 20, 2), 0, 2), + "select2d_0_dim_0_index": lambda: (torch.rand(10, 10) - 0.5, 0, 0), + "select1d_0_dim_1_index": lambda: (torch.randn(10) + 10, 0, 1), + "select1d_0_dim_0_index": lambda: (torch.randn(10) - 10, 0, 2), + "select3d_0_dim_1_index": lambda: (torch.arange(-16, 16, 0.2), 0, 1), +} + +aten_op_copy = "torch.ops.aten.select_copy.int" +aten_op_int = "torch.ops.aten.select.int" + + +class SelectCopy(torch.nn.Module): + def __init__(self): + super().__init__() + + def forward(self, x, dim: int, index: int): + return torch.select_copy(x, dim=dim, index=index) + + +class SelectInt(torch.nn.Module): + def __init__(self): + super().__init__() + + def forward(self, x, dim: int, index: int): + return torch.select(x, dim=dim, index=index) + + +@common.parametrize("test_data", test_data_suite) +def test_select_int_tosa_MI_copy(test_data: Tuple): + pipeline = TosaPipelineMI[input_t1]( + SelectCopy(), + test_data(), + aten_op=aten_op_copy, + exir_op=[], + ) + pipeline.run() + + +@common.parametrize("test_data", test_data_suite) +def test_select_int_tosa_MI(test_data: Tuple): + pipeline = TosaPipelineMI[input_t1]( + SelectInt(), + test_data(), + aten_op=aten_op_int, + exir_op=[], + ) + pipeline.run() + + +@common.parametrize("test_data", test_data_suite) +def test_select_int_tosa_BI_copy(test_data: Tuple): + pipeline = TosaPipelineBI[input_t1]( + SelectCopy(), + test_data(), + aten_op=aten_op_copy, + exir_op=[], + ) + pipeline.run() + + +@common.parametrize("test_data", test_data_suite) +def test_select_int_tosa_BI(test_data: Tuple): + pipeline = TosaPipelineBI[input_t1]( + SelectInt(), + test_data(), + aten_op=aten_op_int, + exir_op=[], + ) + pipeline.run() + + +x_fails = { + "select4d_0_dim_2_index": "AssertionError: Output 0 does not match reference output." +} + + +@common.parametrize("test_data", test_data_suite, x_fails) +@common.XfailIfNoCorstone300 +def test_select_int_u55_BI_copy(test_data: Tuple): + pipeline = EthosU55PipelineBI[input_t1]( + SelectCopy(), + test_data(), + aten_op_copy, + exir_ops=[], + run_on_fvp=True, + use_to_edge_transform_and_lower=True, + ) + pipeline.run() + + +@common.parametrize("test_data", test_data_suite, x_fails) +@common.XfailIfNoCorstone300 +def test_select_int_u55_BI(test_data: Tuple): + pipeline = EthosU55PipelineBI[input_t1]( + SelectInt(), + test_data(), + aten_op_int, + exir_ops=[], + run_on_fvp=True, + use_to_edge_transform_and_lower=True, + ) + pipeline.run() + + +@common.parametrize("test_data", test_data_suite, x_fails) +@common.XfailIfNoCorstone320 +def test_select_int_u85_BI_copy(test_data: Tuple): + pipeline = EthosU85PipelineBI[input_t1]( + SelectCopy(), + test_data(), + aten_op_copy, + exir_ops=[], + run_on_fvp=True, + use_to_edge_transform_and_lower=True, + ) + pipeline.run() + + +@common.parametrize("test_data", test_data_suite, x_fails) +@common.XfailIfNoCorstone320 +def test_select_int_u85_BI(test_data: Tuple): + pipeline = EthosU85PipelineBI[input_t1]( + SelectInt(), + test_data(), + aten_op_int, + exir_ops=[], + run_on_fvp=True, + use_to_edge_transform_and_lower=True, + ) + pipeline.run() diff --git a/backends/arm/test/ops/test_sigmoid.py b/backends/arm/test/ops/test_sigmoid.py index 43b4abd2039..b5ee68b987b 100644 --- a/backends/arm/test/ops/test_sigmoid.py +++ b/backends/arm/test/ops/test_sigmoid.py @@ -5,189 +5,158 @@ # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. -import unittest from typing import Tuple -import pytest - import torch from executorch.backends.arm.test import common, conftest -from executorch.backends.arm.test.tester.arm_tester import ArmTester -from executorch.exir.backend.compile_spec_schema import CompileSpec -from parameterized import parameterized +from executorch.backends.arm.test.tester.test_pipeline import ( + EthosU55PipelineBI, + EthosU85PipelineBI, + TosaPipelineBI, + TosaPipelineMI, +) + +aten_op = "torch.ops.aten.sigmoid.default" # Used for checking that we do not have softmax in the graph after decompose +exir_op = "executorch_exir_dialects_edge__ops_aten_sigmoid_default" +input_t1 = Tuple[torch.Tensor] # Input x + +test_data_suite = { + # (test_name, test_data) + "zeros": lambda: torch.zeros(10, 10, 10, 10), + "ones": lambda: torch.ones(10, 10, 10), + "rand": lambda: torch.rand(10, 10) - 0.5, + "randn_pos": lambda: torch.randn(10) + 10, + "randn_neg": lambda: torch.randn(10) - 10, + "ramp": lambda: torch.arange(-16, 16, 0.2), +} -test_data_suite = [ - # (test_name, test_data) - ("zeros", torch.zeros(10, 10, 10, 10)), - ("ones", torch.ones(10, 10, 10)), - ("rand", torch.rand(10, 10) - 0.5), - ("randn_pos", torch.randn(10) + 10), - ("randn_neg", torch.randn(10) - 10), - ("ramp", torch.arange(-16, 16, 0.2)), -] - - -class TestSigmoid(unittest.TestCase): - class Sigmoid(torch.nn.Module): - def __init__(self): - super().__init__() - self.sigmoid = torch.nn.Sigmoid() - - def forward(self, x): - return self.sigmoid(x) - - class AddSigmoid(torch.nn.Module): - def __init__(self): - super().__init__() - self.sigmoid = torch.nn.Sigmoid() - - def forward(self, x): - return self.sigmoid(x + x) - - class SigmoidAdd(torch.nn.Module): - def __init__(self): - super().__init__() - self.sigmoid = torch.nn.Sigmoid() - - def forward(self, x): - return x + self.sigmoid(x) - - class SigmoidAddSigmoid(torch.nn.Module): - def __init__(self): - super().__init__() - self.sigmoid = torch.nn.Sigmoid() - - def forward(self, x, y): - return self.sigmoid((self.sigmoid(y) + self.sigmoid(x))) - - def _test_sigmoid_tosa_MI_pipeline( - self, module: torch.nn.Module, test_data: Tuple[torch.tensor] - ): - tester = ( - ArmTester( - module, - example_inputs=test_data, - compile_spec=common.get_tosa_compile_spec("TOSA-0.80+MI"), - ) - .export() - .check(["torch.ops.aten.sigmoid.default"]) - .check_not(["torch.ops.quantized_decomposed"]) - .to_edge() - .partition() - .check_not(["executorch_exir_dialects_edge__ops_aten_sigmoid_default"]) - .check_count({"torch.ops.higher_order.executorch_call_delegate": 1}) - .to_executorch() - ) - - if conftest.is_option_enabled("tosa_ref_model"): - tester.run_method_and_compare_outputs(inputs=test_data) - - def _test_sigmoid_tosa_BI_pipeline(self, module: torch.nn.Module, test_data: Tuple): - tester = ( - ArmTester( - module, - example_inputs=test_data, - compile_spec=common.get_tosa_compile_spec("TOSA-0.80+BI"), - ) - .quantize() - .export() - .check(["torch.ops.aten.sigmoid.default"]) - .check(["torch.ops.quantized_decomposed"]) - .to_edge() - .partition() - .check_not(["executorch_exir_dialects_edge__ops_aten_sigmoid_default"]) - .check_count({"torch.ops.higher_order.executorch_call_delegate": 1}) - .to_executorch() - ) - - if conftest.is_option_enabled("tosa_ref_model"): - tester.run_method_and_compare_outputs(inputs=test_data) - - def _test_sigmoid_tosa_ethos_BI_pipeline( - self, - compile_spec: list[CompileSpec], - module: torch.nn.Module, - test_data: Tuple[torch.tensor], - ): - ( - ArmTester( - module, - example_inputs=test_data, - compile_spec=compile_spec, - ) - .quantize() - .export() - .check_count({"torch.ops.aten.sigmoid.default": 1}) - .check(["torch.ops.quantized_decomposed"]) - .to_edge() - .partition() - .check_not(["executorch_exir_dialects_edge__ops_aten_sigmoid_default"]) - .check_count({"torch.ops.higher_order.executorch_call_delegate": 1}) - .to_executorch() - ) - - def _test_sigmoid_tosa_u55_BI_pipeline( - self, module: torch.nn.Module, test_data: Tuple[torch.tensor] - ): - self._test_sigmoid_tosa_ethos_BI_pipeline( - common.get_u55_compile_spec(), module, test_data - ) - - def _test_sigmoid_tosa_u85_BI_pipeline( - self, module: torch.nn.Module, test_data: Tuple[torch.tensor] - ): - self._test_sigmoid_tosa_ethos_BI_pipeline( - common.get_u85_compile_spec(), module, test_data - ) - - @parameterized.expand(test_data_suite) - @pytest.mark.tosa_ref_model - def test_sigmoid_tosa_MI( - self, - test_name: str, - test_data: torch.Tensor, - ): - self._test_sigmoid_tosa_MI_pipeline(self.Sigmoid(), (test_data,)) - - @parameterized.expand(test_data_suite) - @pytest.mark.tosa_ref_model - def test_sigmoid_tosa_BI(self, test_name: str, test_data: torch.Tensor): - self._test_sigmoid_tosa_BI_pipeline(self.Sigmoid(), (test_data,)) - - @pytest.mark.tosa_ref_model - def test_add_sigmoid_tosa_MI(self): - self._test_sigmoid_tosa_MI_pipeline(self.AddSigmoid(), (test_data_suite[0][1],)) - - @pytest.mark.tosa_ref_model - def test_add_sigmoid_tosa_BI(self): - self._test_sigmoid_tosa_BI_pipeline(self.AddSigmoid(), (test_data_suite[5][1],)) - - @pytest.mark.tosa_ref_model - def test_sigmoid_add_tosa_MI(self): - self._test_sigmoid_tosa_MI_pipeline(self.SigmoidAdd(), (test_data_suite[0][1],)) - - @pytest.mark.tosa_ref_model - def test_sigmoid_add_tosa_BI(self): - self._test_sigmoid_tosa_BI_pipeline(self.SigmoidAdd(), (test_data_suite[0][1],)) - - @pytest.mark.tosa_ref_model - def test_sigmoid_add_sigmoid_tosa_MI(self): - self._test_sigmoid_tosa_MI_pipeline( - self.SigmoidAddSigmoid(), (test_data_suite[4][1], test_data_suite[3][1]) - ) - - @pytest.mark.tosa_ref_model - def test_sigmoid_add_sigmoid_tosa_BI(self): - self._test_sigmoid_tosa_BI_pipeline( - self.SigmoidAddSigmoid(), (test_data_suite[4][1], test_data_suite[3][1]) - ) - - @parameterized.expand(test_data_suite) - def test_sigmoid_tosa_u55_BI(self, test_name: str, test_data: torch.Tensor): - self._test_sigmoid_tosa_u55_BI_pipeline(self.Sigmoid(), (test_data,)) - - @parameterized.expand(test_data_suite) - def test_sigmoid_tosa_u85_BI(self, test_name: str, test_data: torch.Tensor): - self._test_sigmoid_tosa_u85_BI_pipeline(self.Sigmoid(), (test_data,)) +class Sigmoid(torch.nn.Module): + def __init__(self): + super().__init__() + self.sigmoid = torch.nn.Sigmoid() + + def forward(self, x): + return self.sigmoid(x) + + +class AddSigmoid(torch.nn.Module): + def __init__(self): + super().__init__() + self.sigmoid = torch.nn.Sigmoid() + + def forward(self, x): + return self.sigmoid(x + x) + + +class SigmoidAdd(torch.nn.Module): + def __init__(self): + super().__init__() + self.sigmoid = torch.nn.Sigmoid() + + def forward(self, x): + return x + self.sigmoid(x) + + +class SigmoidAddSigmoid(torch.nn.Module): + def __init__(self): + super().__init__() + self.sigmoid = torch.nn.Sigmoid() + + def forward(self, x, y): + return self.sigmoid((self.sigmoid(y) + self.sigmoid(x))) + + +@common.parametrize("test_data", test_data_suite) +def test_sigmoid_tosa_MI(test_data: torch.Tensor): + TosaPipelineMI[input_t1](Sigmoid(), (test_data(),), aten_op, exir_op).run() + + +@common.parametrize("test_data", test_data_suite) +def test_sigmoid_tosa_BI(test_data: torch.Tensor): + TosaPipelineBI[input_t1](Sigmoid(), (test_data(),), aten_op, exir_op).run() + + +def test_sigmoid_tosa_MI_add(): + TosaPipelineMI[input_t1]( + AddSigmoid(), + (test_data_suite["zeros"](),), + aten_op, + exir_op, + tosa_version=conftest.get_option("tosa_version"), + ).run() + + +def test_sigmoid_tosa_BI_add(): + TosaPipelineBI[input_t1]( + AddSigmoid(), + (test_data_suite["ramp"](),), + aten_op, + exir_op, + tosa_version=conftest.get_option("tosa_version"), + ).run() + + +def test_sigmoid_tosa_MI_add_2(): + TosaPipelineMI[input_t1]( + SigmoidAdd(), + (test_data_suite["zeros"](),), + aten_op, + exir_op, + tosa_version=conftest.get_option("tosa_version"), + ).run() + + +def test_sigmoid_tosa_BI_add_2(): + TosaPipelineBI[input_t1]( + SigmoidAdd(), + (test_data_suite["zeros"](),), + aten_op, + exir_op, + tosa_version=conftest.get_option("tosa_version"), + ).run() + + +def test_sigmoid_tosa_MI_add_3(): + TosaPipelineMI[input_t1]( + SigmoidAddSigmoid(), + (test_data_suite["randn_neg"](), test_data_suite["randn_pos"]()), + aten_op, + exir_op, + tosa_version=conftest.get_option("tosa_version"), + ).run() + + +def test_sigmoid_tosa_BI_3(): + TosaPipelineBI[input_t1]( + SigmoidAddSigmoid(), + (test_data_suite["randn_neg"](), test_data_suite["randn_pos"]()), + aten_op, + exir_op, + tosa_version=conftest.get_option("tosa_version"), + ).run() + + +@common.parametrize("test_data", test_data_suite) +def test_sigmoid_u55_BI(test_data: Tuple): + pipeline = EthosU55PipelineBI[input_t1]( + Sigmoid(), + (test_data(),), + aten_op, + exir_op, + run_on_fvp=False, + ) + pipeline.run() + + +@common.parametrize("test_data", test_data_suite) +def test_sigmoid_u85_BI(test_data: Tuple): + pipeline = EthosU85PipelineBI[input_t1]( + Sigmoid(), + (test_data(),), + aten_op, + exir_op, + run_on_fvp=False, + ) + pipeline.run() diff --git a/backends/arm/test/ops/test_sigmoid_16bit.py b/backends/arm/test/ops/test_sigmoid_16bit.py index 3cd11699a0a..ddec8c61eb9 100644 --- a/backends/arm/test/ops/test_sigmoid_16bit.py +++ b/backends/arm/test/ops/test_sigmoid_16bit.py @@ -11,12 +11,13 @@ TOSAQuantizer, ) from executorch.backends.arm.quantizer.quantization_config import QuantizationConfig -from executorch.backends.arm.test import common +from executorch.backends.arm.test import common, conftest from executorch.backends.arm.test.tester.test_pipeline import ( EthosU85PipelineBI, OpNotSupportedPipeline, TosaPipelineBI, ) +from executorch.backends.arm.tosa_specification import TosaSpecification from executorch.backends.xnnpack.test.tester import Quantize from torch.ao.quantization.observer import HistogramObserver from torch.ao.quantization.quantizer import QuantizationSpec @@ -37,9 +38,18 @@ def _get_16_bit_quant_config(): return qconfig -def get_16bit_sigmoid_quantizer(tosa_str: str): - tosa_spec = common.TosaSpecification.create_from_string(tosa_str) - quantizer = TOSAQuantizer(tosa_spec) +def get_16bit_sigmoid_quantizer(u55_config=False): + tosa_version = conftest.get_option("tosa_version") + tosa_profiles = { + "0.80": TosaSpecification.create_from_string( + "TOSA-0.80+BI" + ("+u55" if u55_config else "") + ), + "1.0": TosaSpecification.create_from_string( + "TOSA-1.0+INT" + ("+u55" if u55_config else "") + ), + } + + quantizer = TOSAQuantizer(tosa_profiles[tosa_version]) quantizer.set_global(get_symmetric_quantization_config()) quantizer.set_module_type( torch.nn.modules.activation.Sigmoid, _get_16_bit_quant_config() @@ -86,7 +96,7 @@ def test_sigmoid_tosa_BI(test_data): pipeline = TosaPipelineBI( Sigmoid(), (test_data(),), Sigmoid.aten_op, Sigmoid.exir_op ) - pipeline.change_args("quantize", get_16bit_sigmoid_quantizer("TOSA-0.80+BI")) + pipeline.change_args("quantize", get_16bit_sigmoid_quantizer()) pipeline.run() @@ -96,26 +106,41 @@ def test_sigmoid_tosa_BI(test_data): xfails={ "ramp": "AssertionError: Output 0 does not match reference output. MLETORCH-787" }, + strict=False, ) @pytest.mark.flaky(reruns=32) # Flaky due to Vela bug: MLBEDSW-10642 -def test_sigmoid_add_sigmoid_tosa_BI(test_data): +def test_sigmoid_tosa_BI_add_sigmoid(test_data): pipeline = TosaPipelineBI( - SigmoidAddSigmoid(), (test_data(),), Sigmoid.aten_op, Sigmoid.exir_op + SigmoidAddSigmoid(), + (test_data(),), + Sigmoid.aten_op, + Sigmoid.exir_op, ) - pipeline.change_args("quantize", get_16bit_sigmoid_quantizer("TOSA-0.80+BI")) pipeline.run() +xfails = { + "ones": "AssertionError: Output 0 does not match reference output. MLETORCH-787", + "rand": "AssertionError: Output 0 does not match reference output. MLETORCH-787", + "rand_4d": "AssertionError: Output 0 does not match reference output. MLETORCH-787", + "ramp": "AssertionError: Output 0 does not match reference output. MLETORCH-787", +} + + @common.parametrize( "test_data", test_data_suite, ) @pytest.mark.flaky(reruns=32) # Flaky due to Vela bug: MLBEDSW-10642 -def test_sigmoid_tosa_u55(test_data): +def test_sigmoid_u55_BI(test_data): pipeline = OpNotSupportedPipeline( - Sigmoid(), (test_data(),), "TOSA-0.80+BI+u55", {Sigmoid.exir_op: 1} + Sigmoid(), + (test_data(),), + {Sigmoid.exir_op: 1}, + quantize=True, + u55_subset=True, ) - pipeline.change_args("quantize", get_16bit_sigmoid_quantizer("TOSA-0.80+BI+u55")) + pipeline.change_args("quantize", get_16bit_sigmoid_quantizer(True)) pipeline.run() @@ -124,26 +149,31 @@ def test_sigmoid_tosa_u55(test_data): test_data_suite, ) @pytest.mark.flaky(reruns=32) # Flaky due to Vela bug: MLBEDSW-10642 -def test_sigmoid_add_sigmoid_tosa_u55(test_data): +def test_sigmoid_u55_BI_add_sigmoid(test_data): pipeline = OpNotSupportedPipeline( SigmoidAddSigmoid(), (test_data(),), - "TOSA-0.80+BI+u55", {Sigmoid.exir_op: 3}, n_expected_delegates=1, + quantize=True, + u55_subset=True, ) - pipeline.change_args("quantize", get_16bit_sigmoid_quantizer("TOSA-0.80+BI+u55")) + pipeline.change_args("quantize", get_16bit_sigmoid_quantizer(True)) pipeline.run() @common.parametrize("test_data", test_data_suite) @pytest.mark.flaky(reruns=32) # Flaky due to Vela bug: MLBEDSW-10642 @common.XfailIfNoCorstone320 -def test_sigmoid_tosa_u85(test_data): +def test_sigmoid_u85_BI(test_data): pipeline = EthosU85PipelineBI( - Sigmoid(), (test_data(),), Sigmoid.aten_op, Sigmoid.exir_op, run_on_fvp=True + Sigmoid(), + (test_data(),), + Sigmoid.aten_op, + Sigmoid.exir_op, + run_on_fvp=True, ) - pipeline.change_args("quantize", get_16bit_sigmoid_quantizer("TOSA-0.80+BI")) + pipeline.change_args("quantize", get_16bit_sigmoid_quantizer()) pipeline.run() @@ -156,7 +186,7 @@ def test_sigmoid_tosa_u85(test_data): ) @pytest.mark.flaky(reruns=32) # Flaky due to Vela bug: MLBEDSW-10642 @common.XfailIfNoCorstone320 -def test_sigmoid_add_sigmoid_tosa_u85(test_data): +def test_sigmoid_u85_BI_add_sigmoid(test_data): pipeline = EthosU85PipelineBI( SigmoidAddSigmoid(), (test_data(),), @@ -164,5 +194,5 @@ def test_sigmoid_add_sigmoid_tosa_u85(test_data): Sigmoid.exir_op, run_on_fvp=True, ) - pipeline.change_args("quantize", get_16bit_sigmoid_quantizer("TOSA-0.80+BI")) + pipeline.change_args("quantize", get_16bit_sigmoid_quantizer()) pipeline.run() diff --git a/backends/arm/test/ops/test_sigmoid_32bit.py b/backends/arm/test/ops/test_sigmoid_32bit.py index fbfc263a6d0..a0fe077da5f 100644 --- a/backends/arm/test/ops/test_sigmoid_32bit.py +++ b/backends/arm/test/ops/test_sigmoid_32bit.py @@ -7,12 +7,13 @@ import torch from executorch.backends.arm.quantizer import TOSAQuantizer from executorch.backends.arm.quantizer.quantization_config import QuantizationConfig -from executorch.backends.arm.test import common +from executorch.backends.arm.test import common, conftest from executorch.backends.arm.test.tester.test_pipeline import ( EthosU85PipelineBI, OpNotSupportedPipeline, TosaPipelineBI, ) +from executorch.backends.arm.tosa_specification import TosaSpecification from executorch.backends.xnnpack.test.tester import Quantize from torch.ao.quantization.observer import HistogramObserver from torch.ao.quantization.quantizer import QuantizationSpec @@ -53,9 +54,18 @@ def _get_32_bit_quant_config(): return qconfig -def get_32bit_sigmoid_quantizer(tosa_str: str): - tosa_spec = common.TosaSpecification.create_from_string(tosa_str) - quantizer = TOSAQuantizer(tosa_spec) +def get_32bit_sigmoid_quantizer(u55_config=False): + tosa_version = conftest.get_option("tosa_version") + tosa_profiles = { + "0.80": TosaSpecification.create_from_string( + "TOSA-0.80+BI" + ("+u55" if u55_config else "") + ), + "1.0": TosaSpecification.create_from_string( + "TOSA-1.0+INT" + ("+u55" if u55_config else "") + ), + } + + quantizer = TOSAQuantizer(tosa_profiles[tosa_version]) quantizer.set_global(_get_32_bit_quant_config()) quantizer.set_module_type( torch.nn.modules.activation.Sigmoid, _get_16_bit_quant_config() @@ -105,55 +115,65 @@ def test_sigmoid_tosa_BI(test_data): Sigmoid.aten_op, Sigmoid.exir_op, ) - pipeline.change_args("quantize", get_32bit_sigmoid_quantizer("TOSA-0.80+BI")) + pipeline.change_args("quantize", get_32bit_sigmoid_quantizer()) pipeline.run() @common.parametrize("test_data", test_data_suite) @pytest.mark.flaky(reruns=32) # Flaky due to Vela bug: MLBEDSW-10642 -def test_sigmoid_add_sigmoid_tosa_BI(test_data): +def test_sigmoid_tosa_BI_add_sigmoid(test_data): pipeline = TosaPipelineBI( SigmoidAddSigmoid(), (test_data(),), Sigmoid.aten_op, Sigmoid.exir_op, ) - pipeline.change_args("quantize", get_32bit_sigmoid_quantizer("TOSA-0.80+BI")) + pipeline.change_args("quantize", get_32bit_sigmoid_quantizer()) pipeline.run() @common.parametrize("test_data", test_data_suite) @pytest.mark.flaky(reruns=32) # Flaky due to Vela bug: MLBEDSW-10642 -def test_sigmoid_tosa_u55(test_data): +def test_sigmoid_u55_BI(test_data): pipeline = OpNotSupportedPipeline( - Sigmoid(), (test_data(),), "TOSA-0.80+BI+u55", {Sigmoid.exir_op: 1} + Sigmoid(), + (test_data(),), + {Sigmoid.exir_op: 1}, + quantize=True, + u55_subset=True, ) - pipeline.change_args("quantize", get_32bit_sigmoid_quantizer("TOSA-0.80+BI+u55")) + pipeline.change_args("quantize", get_32bit_sigmoid_quantizer(True)) pipeline.run() @common.parametrize("test_data", test_data_suite) @pytest.mark.flaky(reruns=32) # Flaky due to Vela bug: MLBEDSW-10642 -def test_sigmoid_add_sigmoid_tosa_u55(test_data): +def test_sigmoid_u55_BI_add_sigmoid(test_data): pipeline = OpNotSupportedPipeline( SigmoidAddSigmoid(), (test_data(),), - "TOSA-0.80+BI+u55", {Sigmoid.exir_op: 3}, n_expected_delegates=1, + quantize=True, + u55_subset=True, ) - pipeline.change_args("quantize", get_32bit_sigmoid_quantizer("TOSA-0.80+BI+u55")) + pipeline.change_args("quantize", get_32bit_sigmoid_quantizer(True)) pipeline.run() @common.parametrize("test_data", test_data_suite) @pytest.mark.flaky(reruns=32) # Flaky due to Vela bug: MLBEDSW-10642 @common.XfailIfNoCorstone320 -def test_sigmoid_tosa_u85(test_data): +@pytest.mark.flaky(reruns=5) +def test_sigmoid_u85_BI(test_data): pipeline = EthosU85PipelineBI( - Sigmoid(), (test_data(),), Sigmoid.aten_op, Sigmoid.exir_op, run_on_fvp=True + Sigmoid(), + (test_data(),), + Sigmoid.aten_op, + Sigmoid.exir_op, + run_on_fvp=True, ) - pipeline.change_args("quantize", get_32bit_sigmoid_quantizer("TOSA-0.80+BI")) + pipeline.change_args("quantize", get_32bit_sigmoid_quantizer()) pipeline.run() @@ -162,11 +182,14 @@ def test_sigmoid_tosa_u85(test_data): test_data_suite, xfails={ "ramp": "AssertionError: Output 0 does not match reference output.", + "rand": "AssertionError: Output 0 does not match reference output.", + "rand_4d": "AssertionError: Output 0 does not match reference output.", }, ) @pytest.mark.flaky(reruns=32) # Flaky due to Vela bug: MLBEDSW-10642 @common.XfailIfNoCorstone320 -def test_sigmoid_add_sigmoid_tosa_u85(test_data): +@pytest.mark.flaky(reruns=5) +def test_sigmoid_u85_BI_add_sigmoid(test_data): pipeline = EthosU85PipelineBI( SigmoidAddSigmoid(), (test_data(),), @@ -174,5 +197,5 @@ def test_sigmoid_add_sigmoid_tosa_u85(test_data): Sigmoid.exir_op, run_on_fvp=True, ) - pipeline.change_args("quantize", get_32bit_sigmoid_quantizer("TOSA-0.80+BI")) + pipeline.change_args("quantize", get_32bit_sigmoid_quantizer()) pipeline.run() diff --git a/backends/arm/test/ops/test_silu.py b/backends/arm/test/ops/test_silu.py index 51748b02450..e1736bf10e6 100644 --- a/backends/arm/test/ops/test_silu.py +++ b/backends/arm/test/ops/test_silu.py @@ -30,14 +30,14 @@ def forward( return torch.nn.SiLU(inplace=_inplace)(_input) test_data: list[input_t] = { - "op_silu_rank1_ones": (torch.ones(5),), - "op_silu_rank1_negative_ones": (torch.ones(5) * (-1),), - "op_silu_rank1_rand": (torch.rand(5) * 5,), - "op_silu_rank4_ones": (torch.ones(1, 10, 25, 20),), - "op_silu_rank4_negative_ones": ((-1) * torch.ones(1, 10, 25, 20),), - "op_silu_rank4_large_rand": (200 * torch.rand(1, 10, 25, 20),), - "op_silu_rank4_negative_large_rand": ((-200) * torch.rand(1, 10, 25, 20),), - "op_silu_rank4_large_randn": (200 * torch.randn(1, 10, 25, 20) + 1,), + "op_silu_rank1_ones": lambda: torch.ones(5), + "op_silu_rank1_negative_ones": lambda: torch.ones(5) * (-1), + "op_silu_rank1_rand": lambda: torch.rand(5) * 5, + "op_silu_rank4_ones": lambda: torch.ones(1, 10, 25, 20), + "op_silu_rank4_negative_ones": lambda: (-1) * torch.ones(1, 10, 25, 20), + "op_silu_rank4_large_rand": lambda: 200 * torch.rand(1, 10, 25, 20), + "op_silu_rank4_negative_large_rand": lambda: (-200) * torch.rand(1, 10, 25, 20), + "op_silu_rank4_large_randn": lambda: 200 * torch.randn(1, 10, 25, 20) + 1, } aten_op_MI = "torch.ops.aten.silu.default" @@ -47,28 +47,28 @@ def forward( @common.parametrize("test_data", Silu.test_data) def test_silu_tosa_MI(test_data: input_t): - silu_data = (test_data[0], False) + silu_data = (test_data(), False) pipeline = TosaPipelineMI[input_t](Silu(), silu_data, Silu.aten_op_MI) pipeline.run() @common.parametrize("test_data", Silu.test_data) def test_silu_tosa_MI_inplace(test_data: input_t): - silu_data = (test_data[0], True) + silu_data = (test_data(), True) pipeline = TosaPipelineMI[input_t](Silu(), silu_data, Silu.aten_op_inplace_MI) pipeline.run() @common.parametrize("test_data", Silu.test_data) def test_silu_tosa_BI(test_data: input_t): - silu_data = (test_data[0], False) + silu_data = (test_data(), False) pipeline = TosaPipelineBI[input_t](Silu(), silu_data, Silu.aten_op_BI) pipeline.run() @common.parametrize("test_data", Silu.test_data) def test_silu_tosa_BI_inplace(test_data: input_t): - silu_data = (test_data[0], True) + silu_data = (test_data(), True) pipeline = TosaPipelineBI[input_t](Silu(), silu_data, Silu.aten_op_BI) pipeline.run() @@ -76,7 +76,7 @@ def test_silu_tosa_BI_inplace(test_data: input_t): @common.parametrize("test_data", Silu.test_data) @common.XfailIfNoCorstone300 def test_silu_u55_BI(test_data: input_t): - silu_data = (test_data[0], False) + silu_data = (test_data(), False) pipeline = EthosU55PipelineBI[input_t]( Silu(), silu_data, Silu.aten_op_BI, run_on_fvp=True ) @@ -86,7 +86,7 @@ def test_silu_u55_BI(test_data: input_t): @common.parametrize("test_data", Silu.test_data) @common.XfailIfNoCorstone300 def test_silu_u55_BI_inplace(test_data: input_t): - silu_data = (test_data[0], True) + silu_data = (test_data(), True) pipeline = EthosU55PipelineBI[input_t]( Silu(), silu_data, Silu.aten_op_BI, run_on_fvp=True ) @@ -96,7 +96,7 @@ def test_silu_u55_BI_inplace(test_data: input_t): @common.parametrize("test_data", Silu.test_data) @common.XfailIfNoCorstone320 def test_silu_u85_BI(test_data: input_t): - silu_data = (test_data[0], False) + silu_data = (test_data(), False) pipeline = EthosU85PipelineBI[input_t]( Silu(), silu_data, Silu.aten_op_BI, run_on_fvp=True ) @@ -106,7 +106,7 @@ def test_silu_u85_BI(test_data: input_t): @common.parametrize("test_data", Silu.test_data) @common.XfailIfNoCorstone320 def test_silu_u85_BI_inplace(test_data: input_t): - silu_data = (test_data[0], True) + silu_data = (test_data(), True) pipeline = EthosU85PipelineBI[input_t]( Silu(), silu_data, Silu.aten_op_BI, run_on_fvp=True ) diff --git a/backends/arm/test/ops/test_slice.py b/backends/arm/test/ops/test_slice.py index 91ef51cc2a2..6ae12c41657 100644 --- a/backends/arm/test/ops/test_slice.py +++ b/backends/arm/test/ops/test_slice.py @@ -4,135 +4,91 @@ # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. -import unittest -from typing import Tuple -import pytest +from typing import Tuple import torch -from executorch.backends.arm.test import common, conftest -from executorch.backends.arm.test.tester.arm_tester import ArmTester -from executorch.exir.backend.compile_spec_schema import CompileSpec -from parameterized import parameterized - -test_data_suite = [ - (torch.ones(10), [(3, -3)]), - (torch.ones(10), [(-8, 3)]), - (torch.ones(10, 10), [(1, 3), (3, None)]), - (torch.ones(10, 10, 10), [(0, 7), (0, None), (0, 8)]), - (torch.ones((1, 12, 10, 10)), [(None, None), (None, 5), (3, 5), (4, 10)]), -] - - -class TestSimpleSlice(unittest.TestCase): - - class Slice(torch.nn.Module): - def forward(self, x: torch.Tensor, s: list[tuple[int, int]]): - slices = [slice(*i) for i in s] - return x[slices] - - def _test_slice_tosa_MI_pipeline( - self, module: torch.nn.Module, test_data: torch.Tensor - ): - tester = ( - ArmTester( - module, - example_inputs=test_data, - compile_spec=common.get_tosa_compile_spec("TOSA-0.80+MI"), - ) - .export() - .check(["torch.ops.aten.slice.Tensor"]) - .to_edge() - .check(["executorch_exir_dialects_edge__ops_aten_slice_copy"]) - .partition() - .check_count({"torch.ops.higher_order.executorch_call_delegate": 1}) - .to_executorch() - ) - - if conftest.is_option_enabled("tosa_ref_model"): - tester.run_method_and_compare_outputs(inputs=test_data) - - def _test_slice_tosa_BI_pipeline( - self, module: torch.nn.Module, test_data: Tuple[torch.Tensor] - ): - - tester = ( - ArmTester( - module, - example_inputs=test_data, - compile_spec=common.get_tosa_compile_spec("TOSA-0.80+BI"), - ) - .quantize() - .export() - .check(["torch.ops.aten.slice.Tensor"]) - .to_edge() - .partition() - .check_count({"torch.ops.higher_order.executorch_call_delegate": 1}) - .to_executorch() - ) - - if conftest.is_option_enabled("tosa_ref_model"): - tester.run_method_and_compare_outputs(inputs=test_data, qtol=1) - - def _test_slice_ethos_BI_pipeline( - self, - compile_spec: list[CompileSpec], - module: torch.nn.Module, - test_data: Tuple[torch.Tensor], - ): - ( - ArmTester( - module, - example_inputs=test_data, - compile_spec=common.get_u55_compile_spec(), - ) - .quantize() - .export() - .check(["torch.ops.aten.slice.Tensor"]) - .to_edge() - .partition() - .check_count({"torch.ops.higher_order.executorch_call_delegate": 1}) - .to_executorch() - ) - - def _test_slice_u55_BI_pipeline( - self, module: torch.nn.Module, test_data: Tuple[torch.Tensor] - ): - self._test_slice_ethos_BI_pipeline( - common.get_u55_compile_spec(), module, test_data - ) - - def _test_slice_u85_BI_pipeline( - self, module: torch.nn.Module, test_data: Tuple[torch.Tensor] - ): - self._test_slice_ethos_BI_pipeline( - common.get_u85_compile_spec(), module, test_data - ) - - @parameterized.expand(test_data_suite) - @pytest.mark.tosa_ref_model - def test_slice_tosa_MI(self, tensor: torch.Tensor, slices: list[tuple[int, int]]): - self._test_slice_tosa_MI_pipeline(self.Slice(), (tensor, slices)) - - @parameterized.expand(test_data_suite) - @pytest.mark.tosa_ref_model - def test_slice_nchw_tosa_BI( - self, tensor: torch.Tensor, slices: list[tuple[int, int]] - ): - self._test_slice_tosa_BI_pipeline(self.Slice(), (tensor, slices)) - - @parameterized.expand(test_data_suite) - @pytest.mark.tosa_ref_model - def test_slice_nhwc_tosa_BI( - self, tensor: torch.Tensor, slices: list[tuple[int, int]] - ): - self._test_slice_tosa_BI_pipeline(self.Slice(), (tensor, slices)) - - @parameterized.expand(test_data_suite) - def test_slice_u55_BI(self, tensor: torch.Tensor, slices: list[tuple[int, int]]): - self._test_slice_u55_BI_pipeline(self.Slice(), (tensor, slices)) - - @parameterized.expand(test_data_suite) - def test_slice_u85_BI(self, tensor: torch.Tensor, slices: list[tuple[int, int]]): - self._test_slice_u85_BI_pipeline(self.Slice(), (tensor, slices)) +from executorch.backends.arm.test import common + +from executorch.backends.arm.test.tester.test_pipeline import ( + EthosU55PipelineBI, + EthosU85PipelineBI, + TosaPipelineBI, + TosaPipelineMI, +) + +aten_op = "torch.ops.aten.slice.Tensor" +exir_op = "executorch_exir_dialects_edge__ops_aten_slice_copy" + +input_t1 = Tuple[torch.Tensor] # Input x + +test_data_suite = { + "ones_neg_3": lambda: (torch.ones(10), [(3, -3)]), + "ones_neg_8": lambda: (torch.ones(10), [(-8, 3)]), + "ones_slice_2": lambda: (torch.ones(10, 10), [(1, 3), (3, None)]), + "ones_slice_3": lambda: (torch.ones(10, 10, 10), [(0, 7), (0, None), (0, 8)]), + "ones_slice_4": lambda: ( + torch.ones((1, 12, 10, 10)), + [(None, None), (None, 5), (3, 5), (4, 10)], + ), +} + + +class Slice(torch.nn.Module): + + def forward(self, x: torch.Tensor, s: list[tuple[int, int]]): + slices = [slice(*i) for i in s] + return x[slices] + + +@common.parametrize("test_data", test_data_suite) +def test_slice_tensor_tosa_MI(test_data: torch.Tensor): + pipeline = TosaPipelineMI[input_t1](Slice(), test_data(), aten_op, exir_op) + pipeline.run() + + +@common.parametrize("test_data", test_data_suite) +def test_slice_tensor_tosa_BI_nchw(test_data: torch.Tensor): + pipeline = TosaPipelineBI[input_t1]( + Slice(), + test_data(), + aten_op, + exir_op, + ) + pipeline.run() + + +@common.parametrize("test_data", test_data_suite) +def test_slice_tensor_tosa_BI_nhwc(test_data: torch.Tensor): + pipeline = TosaPipelineBI[input_t1]( + Slice(), + test_data(), + aten_op, + exir_op, + ) + pipeline.run() + + +@common.parametrize("test_data", test_data_suite) +def test_slice_tensor_u55_BI(test_data: torch.Tensor): + pipeline = EthosU55PipelineBI[input_t1]( + Slice(), + test_data(), + aten_ops=[], + exir_ops=[], + run_on_fvp=False, + ) + pipeline.run() + + +@common.parametrize("test_data", test_data_suite) +def test_slice_tensor_u85_BI(test_data: torch.Tensor): + pipeline = EthosU85PipelineBI[input_t1]( + Slice(), + test_data(), + aten_ops=[], + exir_ops=[], + run_on_fvp=False, + ) + pipeline.run() diff --git a/backends/arm/test/ops/test_softmax.py b/backends/arm/test/ops/test_softmax.py index dcee5d038f2..5ab616c0eea 100644 --- a/backends/arm/test/ops/test_softmax.py +++ b/backends/arm/test/ops/test_softmax.py @@ -18,7 +18,6 @@ aten_op = "torch.ops.aten.softmax.default" # Used for checking that we do not have softmax in the graph after decompose exir_op = "executorch_exir_dialects_edge__ops_aten__softmax_tensor" - input_t1 = Tuple[torch.Tensor] # Input x @@ -31,20 +30,20 @@ def forward(self, x): return self.softmax(x) test_data = { - "ones": ((torch.ones(10, 10),), 1), - "ones_neg_dim": ((torch.ones(1, 3, 4),), -1), - "randn_neg_dim": ((torch.randn(1, 5, 8, 7),), -3), - "zeros": ((torch.zeros(1, 8, 5, 2),), 0), - "zeros_neg_dim": ((torch.zeros(1, 7, 8, 9),), -4), - "rand": ((torch.rand(1, 2, 5, 8),), 2), - "rand_neg_dim": ((torch.rand(1, 10, 8, 10),), -2), - "randn_mult_batches": ((torch.randn(2, 10, 10, 10),), 3), + "ones": lambda: ((torch.ones(10, 10),), 1), + "ones_neg_dim": lambda: ((torch.ones(1, 3, 4),), -1), + "randn_neg_dim": lambda: ((torch.randn(1, 5, 8, 7),), -3), + "zeros": lambda: ((torch.zeros(1, 8, 5, 2),), 0), + "zeros_neg_dim": lambda: ((torch.zeros(1, 7, 8, 9),), -4), + "rand": lambda: ((torch.rand(1, 2, 5, 8),), 2), + "rand_neg_dim": lambda: ((torch.rand(1, 10, 8, 10),), -2), + "randn_mult_batches": lambda: ((torch.randn(2, 10, 10, 10),), 3), } @common.parametrize("test_data", Softmax.test_data) def test_softmax_tosa_MI(test_data): - data, dim = test_data + data, dim = test_data() pipeline = TosaPipelineMI[input_t1](Softmax(dim), data, []) pipeline.add_stage_after( "to_edge_transform_and_lower", pipeline.tester.check_not, [exir_op] @@ -54,7 +53,7 @@ def test_softmax_tosa_MI(test_data): @common.parametrize("test_data", Softmax.test_data) def test_softmax_tosa_BI(test_data): - data, dim = test_data + data, dim = test_data() pipeline = TosaPipelineBI[input_t1](Softmax(dim), data, []) pipeline.add_stage_after("quantize", pipeline.tester.check_not, [aten_op]) pipeline.change_args("run_method_and_compare_outputs", qtol=1) @@ -70,7 +69,7 @@ def test_softmax_tosa_BI(test_data): ) @common.XfailIfNoCorstone300 def test_softmax_u55_BI(test_data): - data, dim = test_data + data, dim = test_data() pipeline = EthosU55PipelineBI[input_t1](Softmax(dim), data, [], run_on_fvp=True) pipeline.add_stage_after("quantize", pipeline.tester.check_not, [aten_op]) pipeline.change_args("run_method_and_compare_outputs", qtol=1) @@ -86,7 +85,7 @@ def test_softmax_u55_BI(test_data): ) @common.XfailIfNoCorstone320 def test_softmax_u85_BI(test_data): - data, dim = test_data + data, dim = test_data() pipeline = EthosU85PipelineBI[input_t1](Softmax(dim), data, [], run_on_fvp=True) pipeline.add_stage_after("quantize", pipeline.tester.check_not, [aten_op]) pipeline.change_args("run_method_and_compare_outputs", qtol=1) diff --git a/backends/arm/test/ops/test_split.py b/backends/arm/test/ops/test_split.py index b86e27f1a4c..90458584995 100644 --- a/backends/arm/test/ops/test_split.py +++ b/backends/arm/test/ops/test_split.py @@ -1,141 +1,147 @@ # Copyright 2024-2025 Arm Limited and/or its affiliates. -# All rights reserved. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. -import unittest + +from typing import Tuple import torch from executorch.backends.arm.test import common -from executorch.backends.arm.test.tester.arm_tester import ArmTester -from executorch.exir.backend.compile_spec_schema import CompileSpec -from parameterized import parameterized - -test_data_t = tuple[torch.Tensor, int | list[int], int] - - -class TestSimpleSplit(unittest.TestCase): - class Split(torch.nn.Module): - - test_data: list[tuple[test_data_t]] = [ - ((torch.rand(10), 2, 0),), - ((torch.rand(10, 10), 3, 1),), - ((torch.rand(10, 10), 4, -1),), - ((torch.rand(10, 15, 10), [2, 2, 11], 1),), - ((torch.rand(4, 4, 4, 4), 2, 0),), - ((torch.rand(4, 4, 4, 4), [1, 1, 1, 1], -2),), - ] - - def forward( - self, x: torch.Tensor, split_size_or_sections: int | list[int], dim: int - ): - return x.split(split_size=split_size_or_sections, dim=dim) - - class SplitWithSizes(torch.nn.Module): - def forward(self, x: torch.Tensor, split_sizes: list[int], dim: int): - return x.split_with_sizes(split_sizes=split_sizes, dim=dim) - - class SplitSingleOut(torch.nn.Module): - def forward( - self, x: torch.Tensor, split_size_or_sections: int | list[int], dim: int - ): - return x.split(split_size=split_size_or_sections, dim=dim)[1] - - class SplitTwoOut(torch.nn.Module): - def forward( - self, x: torch.Tensor, split_size_or_sections: int | list[int], dim: int - ): - return x.split(split_size=split_size_or_sections, dim=dim)[1:3] - - def _test_split_tosa_MI_pipeline( - self, module: torch.nn.Module, test_data: test_data_t +from executorch.backends.arm.test.tester.test_pipeline import ( + EthosU55PipelineBI, + EthosU85PipelineBI, + TosaPipelineBI, + TosaPipelineMI, +) + +exir_op = "executorch_exir_dialects_edge__ops_aten_split_with_sizes_copy_default" +input_t1 = Tuple[torch.Tensor] # Input x + + +class Split(torch.nn.Module): + + test_data = { + "split_1d_2_size_0_dim": lambda: (torch.rand(10), 2, 0), + "split_2d_3_size_1_dim": lambda: (torch.rand(10, 10), 3, 1), + "split_2d_2_size_4_dim": lambda: (torch.rand(10, 10), 4, -1), + "split_4d_2_size_2_dim": lambda: (torch.rand(4, 4, 4, 4), 2, 0), + } + + test_data_list = { + "split_3d_2_sizes_dim": lambda: (torch.rand(10, 15, 10), [2, 2, 11], 1), + "split_4d_2_sizes_dim_neg": lambda: (torch.rand(4, 4, 4, 4), [1, 1, 1, 1], -2), + } + + def forward( + self, x: torch.Tensor, split_size_or_sections: int | list[int], dim: int ): - ( - ArmTester( - module, - example_inputs=test_data, - compile_spec=common.get_tosa_compile_spec("TOSA-0.80+MI"), - ) - .export() - .to_edge() - .check( - [ - "executorch_exir_dialects_edge__ops_aten_split_with_sizes_copy_default" - ] - ) - .partition() - .check_count({"torch.ops.higher_order.executorch_call_delegate": 1}) - .to_executorch() - .run_method_and_compare_outputs(inputs=test_data) - ) - - def _test_split_tosa_BI_pipeline( - self, module: torch.nn.Module, test_data: test_data_t + return x.split(split_size=split_size_or_sections, dim=dim) + + +class SplitWithSizes(torch.nn.Module): + def forward(self, x: torch.Tensor, split_sizes: list[int], dim: int): + return x.split_with_sizes(split_sizes=split_sizes, dim=dim) + + +class SplitSingleOut(torch.nn.Module): + def forward( + self, x: torch.Tensor, split_size_or_sections: int | list[int], dim: int ): + return x.split(split_size=split_size_or_sections, dim=dim)[1] + - ( - ArmTester( - module, - example_inputs=test_data, - compile_spec=common.get_tosa_compile_spec("TOSA-0.80+BI"), - ) - .quantize() - .export() - .to_edge() - .partition() - .check_count({"torch.ops.higher_order.executorch_call_delegate": 1}) - .to_executorch() - .run_method_and_compare_outputs(inputs=test_data, qtol=1) - ) - - def _test_split_ethosu_BI_pipeline( - self, compile_spec: CompileSpec, module: torch.nn.Module, test_data: test_data_t +class SplitTwoOut(torch.nn.Module): + def forward( + self, x: torch.Tensor, split_size_or_sections: int | list[int], dim: int ): - ( - ArmTester( - module, - example_inputs=test_data, - compile_spec=compile_spec, - ) - .quantize() - .export() - .to_edge() - .partition() - .check_count({"torch.ops.higher_order.executorch_call_delegate": 1}) - .to_executorch() - ) - - @parameterized.expand(Split.test_data) - def test_split_tosa_MI(self, test_data: test_data_t): - self._test_split_tosa_MI_pipeline(self.Split(), test_data) - - @parameterized.expand([Split.test_data[3], Split.test_data[5]]) - def test_split_with_sizes_tosa_MI(self, test_data: test_data_t): - assert isinstance(test_data[1], list) - self._test_split_tosa_MI_pipeline(self.SplitWithSizes(), test_data) - - @parameterized.expand(Split.test_data) - def test_split_one_out_tosa_MI(self, test_data: test_data_t): - self._test_split_tosa_MI_pipeline(self.SplitSingleOut(), test_data) - - @parameterized.expand(Split.test_data) - def test_split_two_out_tosa_MI(self, test_data: test_data_t): - self._test_split_tosa_MI_pipeline(self.SplitTwoOut(), test_data) - - @parameterized.expand(Split.test_data) - def test_split_tosa_BI(self, test_data: test_data_t): - self._test_split_tosa_BI_pipeline(self.Split(), test_data) - - @parameterized.expand(Split.test_data) - def test_split_u55_BI(self, test_data: test_data_t): - self._test_split_ethosu_BI_pipeline( - common.get_u55_compile_spec(), self.Split(), test_data - ) - - @parameterized.expand(Split.test_data) - def test_split_u85_BI(self, test_data: test_data_t): - self._test_split_ethosu_BI_pipeline( - common.get_u85_compile_spec(), self.Split(), test_data - ) + return x.split(split_size=split_size_or_sections, dim=dim)[1:3] + + +@common.parametrize( + "test_data", + (Split.test_data | Split.test_data_list), +) +def test_split_with_sizes_tosa_MI(test_data: input_t1): + + pipeline = TosaPipelineMI[input_t1]( + Split(), + test_data(), + aten_op=[], + exir_op=exir_op, + ) + pipeline.run() + + +@common.parametrize("test_data", Split.test_data_list) +def test_split_with_sizes_tosa_MI_2(test_data: input_t1): + + pipeline = TosaPipelineMI[input_t1]( + SplitWithSizes(), + test_data(), + aten_op=[], + exir_op=exir_op, + ) + pipeline.run() + + +@common.parametrize( + "test_data", + (Split.test_data | Split.test_data_list), +) +def test_split_with_sizes_tosa_MI_one_out(test_data: input_t1): + + pipeline = TosaPipelineMI[input_t1]( + SplitSingleOut(), + test_data(), + aten_op=[], + exir_op=exir_op, + ) + pipeline.run() + + +@common.parametrize( + "test_data", + (Split.test_data | Split.test_data_list), +) +def test_split_with_sizes_tosa_BI(test_data: input_t1): + + pipeline = TosaPipelineBI[input_t1]( + Split(), + test_data(), + aten_op=[], + exir_op=exir_op, + ) + pipeline.run() + + +@common.parametrize( + "test_data", + (Split.test_data | Split.test_data_list), +) +def test_split_with_sizes_u55_BI(test_data: input_t1): + pipeline = EthosU55PipelineBI[input_t1]( + Split(), + test_data(), + aten_ops=[], + exir_ops=exir_op, + run_on_fvp=False, + ) + pipeline.run() + + +@common.parametrize( + "test_data", + (Split.test_data | Split.test_data_list), +) +def test_split_with_sizes_u85_BI(test_data: input_t1): + + pipeline = EthosU85PipelineBI[input_t1]( + Split(), + test_data(), + aten_ops=[], + exir_ops=exir_op, + run_on_fvp=False, + ) + pipeline.run() diff --git a/backends/arm/test/ops/test_sqrt.py b/backends/arm/test/ops/test_sqrt.py index 53a1e79c0a8..0c79f534656 100644 --- a/backends/arm/test/ops/test_sqrt.py +++ b/backends/arm/test/ops/test_sqrt.py @@ -31,11 +31,11 @@ def forward(self, x): return torch.sqrt(x) test_data: Dict[str, input_t] = { - "sqrt_tensor_rank1_ones": (torch.ones(10),), - "sqrt_tensor_rank2_random": (torch.rand(5, 10),), - "sqrt_tensor_rank3_ones": (torch.ones(2, 3, 4),), - "sqrt_tensor_rank4_random": (torch.rand(1, 3, 8, 8),), - "sqrt_tensor_rank4_multibatch": (torch.rand(2, 3, 4, 4),), + "sqrt_tensor_rank1_ones": lambda: (torch.ones(10),), + "sqrt_tensor_rank2_random": lambda: (torch.rand(5, 10),), + "sqrt_tensor_rank3_ones": lambda: (torch.ones(2, 3, 4),), + "sqrt_tensor_rank4_random": lambda: (torch.rand(1, 3, 8, 8),), + "sqrt_tensor_rank4_multibatch": lambda: (torch.rand(2, 3, 4, 4),), } @@ -47,7 +47,10 @@ def forward(self, x): @common.parametrize("test_data", Sqrt.test_data) def test_sqrt_tosa_MI(test_data: Sqrt.input_t): pipeline = TosaPipelineMI[Sqrt.input_t]( - Sqrt(), test_data, Sqrt.aten_op_MI, Sqrt.exir_op_MI + Sqrt(), + test_data(), + Sqrt.aten_op_MI, + Sqrt.exir_op_MI, ) pipeline.run() @@ -55,7 +58,10 @@ def test_sqrt_tosa_MI(test_data: Sqrt.input_t): @common.parametrize("test_data", Sqrt.test_data) def test_sqrt_tosa_BI(test_data: Sqrt.input_t): pipeline = TosaPipelineBI[Sqrt.input_t]( - Sqrt(), test_data, Sqrt.aten_op_BI, Sqrt.exir_op_BI + Sqrt(), + test_data(), + Sqrt.aten_op_BI, + Sqrt.exir_op_BI, ) pipeline.run() @@ -64,7 +70,11 @@ def test_sqrt_tosa_BI(test_data: Sqrt.input_t): @common.XfailIfNoCorstone300 def test_sqrt_u55_BI(test_data: Sqrt.input_t): pipeline = EthosU55PipelineBI[Sqrt.input_t]( - Sqrt(), test_data, Sqrt.aten_op_BI, Sqrt.exir_op_BI, run_on_fvp=True + Sqrt(), + test_data(), + Sqrt.aten_op_BI, + Sqrt.exir_op_BI, + run_on_fvp=True, ) pipeline.run() @@ -73,6 +83,10 @@ def test_sqrt_u55_BI(test_data: Sqrt.input_t): @common.XfailIfNoCorstone320 def test_sqrt_u85_BI(test_data: Sqrt.input_t): pipeline = EthosU85PipelineBI[Sqrt.input_t]( - Sqrt(), test_data, Sqrt.aten_op_BI, Sqrt.exir_op_BI, run_on_fvp=True + Sqrt(), + test_data(), + Sqrt.aten_op_BI, + Sqrt.exir_op_BI, + run_on_fvp=True, ) pipeline.run() diff --git a/backends/arm/test/ops/test_squeeze.py b/backends/arm/test/ops/test_squeeze.py index 9f02392e1e2..e5f606c887e 100644 --- a/backends/arm/test/ops/test_squeeze.py +++ b/backends/arm/test/ops/test_squeeze.py @@ -1,5 +1,4 @@ # Copyright 2024-2025 Arm Limited and/or its affiliates. -# All rights reserved. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. @@ -8,208 +7,194 @@ # Tests the squeeze op which squeezes a given dimension with size 1 into a lower ranked tensor. # -import unittest -from typing import Optional, Tuple + +from typing import Tuple import torch from executorch.backends.arm.test import common -from executorch.backends.arm.test.tester.arm_tester import ArmTester - -from executorch.exir.backend.compile_spec_schema import CompileSpec -from parameterized import parameterized - - -class TestSqueeze(unittest.TestCase): - class SqueezeDim(torch.nn.Module): - test_parameters: list[tuple[torch.Tensor, int]] = [ - (torch.randn(1, 1, 5), -2), - (torch.randn(1, 2, 3, 1), 3), - (torch.randn(1, 5, 1, 5), -2), - ] - - def forward(self, x: torch.Tensor, dim: int): - return x.squeeze(dim) - - class SqueezeDims(torch.nn.Module): - test_parameters: list[tuple[torch.Tensor, tuple[int]]] = [ - (torch.randn(1, 1, 5), (0, 1)), - (torch.randn(1, 5, 5, 1), (0, -1)), - (torch.randn(1, 5, 1, 5), (0, -2)), - ] - - def forward(self, x: torch.Tensor, dims: tuple[int]): - return x.squeeze(dims) - - class Squeeze(torch.nn.Module): - test_parameters: list[tuple[torch.Tensor]] = [ - (torch.randn(1, 1, 5),), - (torch.randn(1, 5, 5, 1),), - (torch.randn(1, 5, 1, 5),), - ] - - def forward(self, x: torch.Tensor): - return x.squeeze() - - def _test_squeeze_tosa_MI_pipeline( - self, - module: torch.nn.Module, - test_data: Tuple[torch.Tensor, Optional[tuple[int]]], - export_target: str, - ): - ( - ArmTester( - module, - example_inputs=test_data, - compile_spec=common.get_tosa_compile_spec("TOSA-0.80+MI"), - ) - .export() - .check_count({export_target: 1}) - .to_edge() - .partition() - .check_count({"torch.ops.higher_order.executorch_call_delegate": 1}) - .to_executorch() - .run_method_and_compare_outputs(inputs=test_data) - ) - - def _test_squeeze_tosa_BI_pipeline( - self, - module: torch.nn.Module, - test_data: Tuple[torch.Tensor, Optional[tuple[int]]], - export_target: str, - ): - ( - ArmTester( - module, - example_inputs=test_data, - compile_spec=common.get_tosa_compile_spec("TOSA-0.80+BI"), - ) - .quantize() - .export() - .check_count({export_target: 1}) - .to_edge() - .partition() - .check_count({"torch.ops.higher_order.executorch_call_delegate": 1}) - .to_executorch() - .run_method_and_compare_outputs(inputs=test_data, qtol=1) - ) - - def _test_squeeze_ethosu_BI_pipeline( - self, - compile_spec: CompileSpec, - module: torch.nn.Module, - test_data: Tuple[torch.Tensor, Optional[tuple[int]]], - export_target: str, - ): - ( - ArmTester(module, example_inputs=test_data, compile_spec=compile_spec) - .quantize() - .export() - .check_count({export_target: 1}) - .to_edge() - .partition() - .check_count({"torch.ops.higher_order.executorch_call_delegate": 1}) - .to_executorch() - ) - - @parameterized.expand(Squeeze.test_parameters) - def test_squeeze_tosa_MI( - self, - test_tensor: torch.Tensor, - ): - self._test_squeeze_tosa_MI_pipeline( - self.Squeeze(), (test_tensor,), "torch.ops.aten.squeeze.default" - ) - - @parameterized.expand(Squeeze.test_parameters) - def test_squeeze_tosa_BI( - self, - test_tensor: torch.Tensor, - ): - self._test_squeeze_tosa_BI_pipeline( - self.Squeeze(), (test_tensor,), "torch.ops.aten.squeeze.default" - ) - - @parameterized.expand(Squeeze.test_parameters) - def test_squeeze_u55_BI( - self, - test_tensor: torch.Tensor, - ): - self._test_squeeze_ethosu_BI_pipeline( - common.get_u55_compile_spec(), - self.Squeeze(), - (test_tensor,), - "torch.ops.aten.squeeze.default", - ) - - @parameterized.expand(Squeeze.test_parameters) - def test_squeeze_u85_BI( - self, - test_tensor: torch.Tensor, - ): - self._test_squeeze_ethosu_BI_pipeline( - common.get_u85_compile_spec(), - self.Squeeze(), - (test_tensor,), - "torch.ops.aten.squeeze.default", - ) - - @parameterized.expand(SqueezeDim.test_parameters) - def test_squeeze_dim_tosa_MI(self, test_tensor: torch.Tensor, dim: int): - self._test_squeeze_tosa_MI_pipeline( - self.SqueezeDim(), (test_tensor, dim), "torch.ops.aten.squeeze.dim" - ) - - @parameterized.expand(SqueezeDim.test_parameters) - def test_squeeze_dim_tosa_BI(self, test_tensor: torch.Tensor, dim: int): - self._test_squeeze_tosa_BI_pipeline( - self.SqueezeDim(), (test_tensor, dim), "torch.ops.aten.squeeze.dim" - ) - - @parameterized.expand(SqueezeDim.test_parameters) - def test_squeeze_dim_u55_BI(self, test_tensor: torch.Tensor, dim: int): - self._test_squeeze_ethosu_BI_pipeline( - common.get_u55_compile_spec(), - self.SqueezeDim(), - (test_tensor, dim), - "torch.ops.aten.squeeze.dim", - ) - - @parameterized.expand(SqueezeDim.test_parameters) - def test_squeeze_dim_u85_BI(self, test_tensor: torch.Tensor, dim: int): - self._test_squeeze_ethosu_BI_pipeline( - common.get_u85_compile_spec(), - self.SqueezeDim(), - (test_tensor, dim), - "torch.ops.aten.squeeze.dim", - ) - - @parameterized.expand(SqueezeDims.test_parameters) - def test_squeeze_dims_tosa_MI(self, test_tensor: torch.Tensor, dims: tuple[int]): - self._test_squeeze_tosa_MI_pipeline( - self.SqueezeDims(), (test_tensor, dims), "torch.ops.aten.squeeze.dims" - ) - - @parameterized.expand(SqueezeDims.test_parameters) - def test_squeeze_dims_tosa_BI(self, test_tensor: torch.Tensor, dims: tuple[int]): - self._test_squeeze_tosa_BI_pipeline( - self.SqueezeDims(), (test_tensor, dims), "torch.ops.aten.squeeze.dims" - ) - - @parameterized.expand(SqueezeDims.test_parameters) - def test_squeeze_dims_u55_BI(self, test_tensor: torch.Tensor, dims: tuple[int]): - self._test_squeeze_ethosu_BI_pipeline( - common.get_u55_compile_spec(), - self.SqueezeDims(), - (test_tensor, dims), - "torch.ops.aten.squeeze.dims", - ) - - @parameterized.expand(SqueezeDims.test_parameters) - def test_squeeze_dims_u85_BI(self, test_tensor: torch.Tensor, dims: tuple[int]): - self._test_squeeze_ethosu_BI_pipeline( - common.get_u85_compile_spec(), - self.SqueezeDims(), - (test_tensor, dims), - "torch.ops.aten.squeeze.dims", - ) +from executorch.backends.arm.test.tester.test_pipeline import ( + EthosU55PipelineBI, + EthosU85PipelineBI, + TosaPipelineBI, + TosaPipelineMI, +) + +input_t1 = Tuple[torch.Tensor] # Input x + + +class SqueezeDim(torch.nn.Module): + test_parameters = { + "squeeze3d_dim_neg_2": lambda: (torch.randn(1, 1, 5), -2), + "squeeze4d_dim_pos_3": lambda: (torch.randn(1, 2, 3, 1), 3), + "squeeze4d_dim_neg_2": lambda: (torch.randn(1, 5, 1, 5), -2), + } + + def forward(self, x: torch.Tensor, dim: int): + return x.squeeze(dim) + + +class SqueezeDims(torch.nn.Module): + test_parameters = { + "squeeze3d_dims_0_1": lambda: (torch.randn(1, 1, 5), (0, 1)), + "squeeze4d_dims_0_neg_1": lambda: (torch.randn(1, 5, 5, 1), (0, -1)), + "squeeze4d_dims_0_neg_2": lambda: (torch.randn(1, 5, 1, 5), (0, -2)), + } + + def forward(self, x: torch.Tensor, dims: tuple[int]): + return x.squeeze(dims) + + +class Squeeze(torch.nn.Module): + test_parameters = { + "squeeze3d": lambda: (torch.randn(1, 1, 5),), + "squeeze4d_dims": lambda: (torch.randn(1, 5, 5, 1),), + "squeeze3d_dims_mix": lambda: (torch.randn(1, 5, 1, 5),), + } + + def forward(self, x: torch.Tensor): + return x.squeeze() + + +@common.parametrize("test_data", Squeeze.test_parameters) +def test_squeeze_dim_tosa_MI(test_data: Tuple): + pipeline = TosaPipelineMI[input_t1]( + Squeeze(), + test_data(), + aten_op="torch.ops.aten.squeeze.default", + exir_op=[], + ) + pipeline.run() + + +@common.parametrize("test_data", Squeeze.test_parameters) +def test_squeeze_dim_tosa_BI(test_data: Tuple): + pipeline = TosaPipelineBI[input_t1]( + Squeeze(), + test_data(), + aten_op="torch.ops.aten.squeeze.default", + exir_op=[], + ) + pipeline.run() + + +@common.parametrize("test_data", Squeeze.test_parameters) +@common.XfailIfNoCorstone300 +def test_squeeze_dim_u55_BI(test_data: Tuple): + pipeline = EthosU55PipelineBI[input_t1]( + Squeeze(), + test_data(), + aten_ops="torch.ops.aten.squeeze.default", + exir_ops=[], + run_on_fvp=True, + ) + pipeline.run() + + +@common.parametrize("test_data", Squeeze.test_parameters) +@common.XfailIfNoCorstone320 +def test_squeeze_dim_u85_BI(test_data: Tuple): + pipeline = EthosU85PipelineBI[input_t1]( + Squeeze(), + test_data(), + aten_ops="torch.ops.aten.squeeze.default", + exir_ops=[], + run_on_fvp=True, + ) + pipeline.run() + + +@common.parametrize("test_data", SqueezeDim.test_parameters) +def test_squeeze_dim_tosa_MI_2(test_data: Tuple): + pipeline = TosaPipelineMI[input_t1]( + SqueezeDim(), + test_data(), + aten_op="torch.ops.aten.squeeze.dim", + exir_op=[], + ) + pipeline.run() + + +@common.parametrize("test_data", SqueezeDim.test_parameters) +def test_squeeze_dim_tosa_BI_2(test_data: Tuple): + pipeline = TosaPipelineBI[input_t1]( + SqueezeDim(), + test_data(), + aten_op="torch.ops.aten.squeeze.dim", + exir_op=[], + ) + pipeline.run() + + +@common.parametrize("test_data", SqueezeDim.test_parameters) +@common.XfailIfNoCorstone300 +def test_squeeze_dim_u55_BI_2(test_data: Tuple): + pipeline = EthosU55PipelineBI[input_t1]( + SqueezeDim(), + test_data(), + aten_ops="torch.ops.aten.squeeze.dim", + exir_ops=[], + run_on_fvp=True, + ) + pipeline.run() + + +@common.parametrize("test_data", SqueezeDim.test_parameters) +@common.XfailIfNoCorstone320 +def test_squeeze_dim_u85_BI_2(test_data: Tuple): + pipeline = EthosU85PipelineBI[input_t1]( + SqueezeDim(), + test_data(), + aten_ops="torch.ops.aten.squeeze.dim", + exir_ops=[], + run_on_fvp=True, + ) + pipeline.run() + + +@common.parametrize("test_data", SqueezeDims.test_parameters) +def test_squeeze_dims_tosa_MI(test_data: Tuple): + pipeline = TosaPipelineMI[input_t1]( + SqueezeDims(), + test_data(), + aten_op="torch.ops.aten.squeeze.dims", + exir_op=[], + ) + pipeline.run() + + +@common.parametrize("test_data", SqueezeDims.test_parameters) +def test_squeeze_dims_tosa_BI(test_data: Tuple): + pipeline = TosaPipelineBI[input_t1]( + SqueezeDims(), + test_data(), + aten_op="torch.ops.aten.squeeze.dims", + exir_op=[], + ) + pipeline.run() + + +@common.parametrize("test_data", SqueezeDims.test_parameters) +@common.XfailIfNoCorstone300 +def test_squeeze_dims_u55_BI(test_data: Tuple): + pipeline = EthosU55PipelineBI[input_t1]( + SqueezeDims(), + test_data(), + aten_ops="torch.ops.aten.squeeze.dims", + exir_ops=[], + run_on_fvp=True, + ) + pipeline.run() + + +@common.parametrize("test_data", SqueezeDims.test_parameters) +@common.XfailIfNoCorstone320 +def test_squeeze_dims_u85_BI(test_data: Tuple): + pipeline = EthosU85PipelineBI[input_t1]( + SqueezeDims(), + test_data(), + aten_ops="torch.ops.aten.squeeze.dims", + exir_ops=[], + run_on_fvp=True, + ) + pipeline.run() diff --git a/backends/arm/test/ops/test_sub.py b/backends/arm/test/ops/test_sub.py index d1849e830c9..f61f3b0583d 100644 --- a/backends/arm/test/ops/test_sub.py +++ b/backends/arm/test/ops/test_sub.py @@ -5,7 +5,6 @@ # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. - from typing import Tuple import torch @@ -22,23 +21,23 @@ # Single-input subtraction (x - x) sub_test_data = { - "ones_1D_5": (torch.ones(5),), - "ones_1D_50": (torch.ones(50),), - "rand_1D_10": (torch.rand(10),), - "rand_2D_5x5": (torch.rand(5, 5),), - "rand_3D_5x5x5": (torch.rand(5, 5, 5),), - "rand_4D_2x3x4x5": (torch.rand(2, 3, 4, 5),), - "zeros": (torch.zeros(10),), + "ones_1D_5": lambda: (torch.ones(5),), + "ones_1D_50": lambda: (torch.ones(50),), + "rand_1D_10": lambda: (torch.rand(10),), + "rand_2D_5x5": lambda: (torch.rand(5, 5),), + "rand_3D_5x5x5": lambda: (torch.rand(5, 5, 5),), + "rand_4D_2x3x4x5": lambda: (torch.rand(2, 3, 4, 5),), + "zeros": lambda: (torch.zeros(10),), } fvp_sub_xfails = {"rand_4D_2x3x4x5": "MLETORCH-517 : Multiple batches not supported"} # Two-input subtraction (x - y) sub2_test_data = { - "rand_2D_4x4": (torch.rand(4, 4), torch.rand(4, 4)), - "rand_3D_4x4x4": (torch.rand(4, 2, 2), torch.rand(4, 2, 2)), - "rand_4D_2x2x4x4": (torch.rand(2, 2, 4, 4), torch.rand(2, 2, 4, 4)), - "zeros": (torch.rand(4, 4), torch.zeros(4, 4)), + "rand_2D_4x4": lambda: (torch.rand(4, 4), torch.rand(4, 4)), + "rand_3D_4x4x4": lambda: (torch.rand(4, 2, 2), torch.rand(4, 2, 2)), + "rand_4D_2x2x4x4": lambda: (torch.rand(2, 2, 4, 4), torch.rand(2, 2, 4, 4)), + "zeros": lambda: (torch.rand(4, 4), torch.zeros(4, 4)), } fvp_sub2_xfails = {"rand_4D_2x2x4x4": "MLETORCH-517 : Multiple batches not supported"} @@ -58,11 +57,11 @@ def forward(self, x: torch.Tensor, y: torch.Tensor): @common.parametrize("test_data", sub_test_data) -def test_sub_tosa_MI(test_data): +def test_sub_tensor_tosa_MI(test_data): """Test Subtraction (TOSA MI)""" pipeline = TosaPipelineMI[input_t1]( Sub(), - test_data, + test_data(), aten_op, exir_op, ) @@ -70,11 +69,11 @@ def test_sub_tosa_MI(test_data): @common.parametrize("test_data", sub2_test_data) -def test_sub_2_tosa_MI(test_data: Tuple[torch.Tensor, torch.Tensor]): +def test_sub_tensor_tosa_MI_2(test_data: Tuple[torch.Tensor, torch.Tensor]): """Test Two-Operand Subtraction (TOSA MI)""" pipeline = TosaPipelineMI[input_t2]( Sub2(), - test_data, + test_data(), aten_op, exir_op, ) @@ -82,11 +81,11 @@ def test_sub_2_tosa_MI(test_data: Tuple[torch.Tensor, torch.Tensor]): @common.parametrize("test_data", sub_test_data) -def test_sub_tosa_BI(test_data): +def test_sub_tensor_tosa_BI(test_data): """Test Subtraction (TOSA BI)""" pipeline = TosaPipelineBI[input_t1]( Sub(), - test_data, + test_data(), aten_op, exir_op, ) @@ -95,11 +94,11 @@ def test_sub_tosa_BI(test_data): @common.parametrize("test_data", sub2_test_data) -def test_sub_2_tosa_BI(test_data: Tuple[torch.Tensor, torch.Tensor]): +def test_sub_tensor_tosa_BI_2(test_data: Tuple[torch.Tensor, torch.Tensor]): """Test Two-Operand Subtraction (TOSA BI)""" pipeline = TosaPipelineBI[input_t2]( Sub2(), - test_data, + test_data(), aten_op, exir_op, ) @@ -107,65 +106,13 @@ def test_sub_2_tosa_BI(test_data: Tuple[torch.Tensor, torch.Tensor]): pipeline.run() -@common.parametrize("test_data", sub_test_data) -def test_sub_u55_BI(test_data): - """Test Subtraction on Ethos-U55""" - pipeline = EthosU55PipelineBI[input_t1]( - Sub(), - test_data, - aten_op, - exir_op, - run_on_fvp=False, - ) - pipeline.run() - - -@common.parametrize("test_data", sub2_test_data) -def test_sub_2_u55_BI(test_data: Tuple[torch.Tensor, torch.Tensor]): - """Test Two-Operand Subtraction on Ethos-U55""" - pipeline = EthosU55PipelineBI[input_t2]( - Sub2(), - test_data, - aten_op, - exir_op, - run_on_fvp=False, - ) - pipeline.run() - - -@common.parametrize("test_data", sub_test_data) -def test_sub_u85_BI(test_data): - """Test Subtraction on Ethos-U85 (Quantized Mode)""" - pipeline = EthosU85PipelineBI[input_t1]( - Sub(), - test_data, - aten_op, - exir_op, - run_on_fvp=False, - ) - pipeline.run() - - -@common.parametrize("test_data", sub2_test_data) -def test_sub_2_u85_BI(test_data: Tuple[torch.Tensor, torch.Tensor]): - """Test Two-Operand Subtraction on Ethos-U85""" - pipeline = EthosU85PipelineBI[input_t2]( - Sub2(), - test_data, - aten_op, - exir_op, - run_on_fvp=False, - ) - pipeline.run() - - @common.parametrize("test_data", sub_test_data, fvp_sub_xfails) -@common.SkipIfNoCorstone300 -def test_sub_u55_BI_on_fvp(test_data): +@common.XfailIfNoCorstone300 +def test_sub_tensor_u55_BI(test_data): """Test Subtraction on Ethos-U55 (FVP Mode)""" pipeline = EthosU55PipelineBI[input_t1]( Sub(), - test_data, + test_data(), aten_op, exir_op, run_on_fvp=True, @@ -175,12 +122,12 @@ def test_sub_u55_BI_on_fvp(test_data): @common.parametrize("test_data", sub2_test_data, fvp_sub2_xfails) -@common.SkipIfNoCorstone300 -def test_sub_2_u55_BI_on_fvp(test_data: Tuple[torch.Tensor, torch.Tensor]): +@common.XfailIfNoCorstone300 +def test_sub_tensor_u55_BI_2(test_data: Tuple[torch.Tensor, torch.Tensor]): """Test Two-Operand Subtraction on Ethos-U55 (FVP Mode)""" pipeline = EthosU55PipelineBI[input_t2]( Sub2(), - test_data, + test_data(), aten_op, exir_op, run_on_fvp=True, @@ -190,12 +137,12 @@ def test_sub_2_u55_BI_on_fvp(test_data: Tuple[torch.Tensor, torch.Tensor]): @common.parametrize("test_data", sub_test_data, fvp_sub_xfails) -@common.SkipIfNoCorstone320 -def test_sub_u85_BI_on_fvp(test_data): +@common.XfailIfNoCorstone320 +def test_sub_tensor_u85_BI_2(test_data): """Test Subtraction on Ethos-U85 (FVP Mode)""" pipeline = EthosU85PipelineBI[input_t1]( Sub(), - test_data, + test_data(), aten_op, exir_op, run_on_fvp=True, @@ -205,12 +152,12 @@ def test_sub_u85_BI_on_fvp(test_data): @common.parametrize("test_data", sub2_test_data, fvp_sub2_xfails) -@common.SkipIfNoCorstone320 -def test_sub_2_u85_BI_on_fvp(test_data: Tuple[torch.Tensor, torch.Tensor]): +@common.XfailIfNoCorstone320 +def test_sub_tensor_u85_BI(test_data: Tuple[torch.Tensor, torch.Tensor]): """Test Two-Operand Subtraction on Ethos-U85 (FVP Mode)""" pipeline = EthosU85PipelineBI[input_t2]( Sub2(), - test_data, + test_data(), aten_op, exir_op, run_on_fvp=True, diff --git a/backends/arm/test/ops/test_sum.py b/backends/arm/test/ops/test_sum.py index bc0c50b8ee0..8837f1b292d 100644 --- a/backends/arm/test/ops/test_sum.py +++ b/backends/arm/test/ops/test_sum.py @@ -4,155 +4,104 @@ # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. -import unittest - from typing import Tuple import torch -from executorch.backends.arm.test import common, conftest -from executorch.backends.arm.test.tester.arm_tester import ArmTester -from executorch.exir.backend.compile_spec_schema import CompileSpec -from parameterized import parameterized - -exampledata_t = Tuple[torch.Tensor, int | list[int], bool] -"""(data, dim(s), keepdim)""" - - -class TestSum(unittest.TestCase): - """Tests sum which sums all elements along some specified dimensions. - keepdim specifies whether the dimension that is summed should - be squeezed or not. - """ - - class Sum(torch.nn.Module): - test_parameters: list[Tuple[exampledata_t]] = [ - ((torch.rand(10), 0, True),), - ((torch.rand(10, 10), 1, False),), - ((torch.rand(10, 10, 10), [-3, 1], True),), - ((torch.rand(1, 1, 5, 8), 1, False),), - ((torch.rand(1, 2, 3, 4), 3, True),), - ((torch.rand(1, 2, 8, 8), [2, 3, 0], True),), - ] - - test_parameters_u55: list[Tuple[exampledata_t]] = [ - ((torch.rand(10), 0, True),), - ((torch.rand(10, 10), 1, False),), - ((torch.rand(1, 2, 3, 4), 3, True),), - ((torch.rand(10, 10, 10), [-3, 1], True),), - ((torch.rand(1, 1, 5, 8), 1, False),), - ((torch.rand(1, 2, 8, 8), [2, 3, 0], True),), - ] - - def forward(self, x: torch.Tensor, dim: int, keepdim: bool): - return x.sum(dim=dim, keepdim=keepdim) - - def _test_sum_tosa_MI_pipeline( - self, module: torch.nn.Module, test_data: tuple[exampledata_t] - ): - ( - ArmTester( - module, - example_inputs=test_data, - compile_spec=common.get_tosa_compile_spec("TOSA-0.80+MI"), - ) - .export() - .check_count({"torch.ops.aten.sum.dim_IntList": 1}) - .check_not(["torch.ops.quantized_decomposed"]) - .to_edge() - .partition() - .check_count({"torch.ops.higher_order.executorch_call_delegate": 1}) - .to_executorch() - .run_method_and_compare_outputs(inputs=test_data) - ) - - def _test_sum_tosa_BI_pipeline( - self, module: torch.nn.Module, test_data: tuple[exampledata_t] - ): - ( - ArmTester( - module, - example_inputs=test_data, - compile_spec=common.get_tosa_compile_spec("TOSA-0.80+BI"), - ) - .quantize() - .export() - .check_count({"torch.ops.aten.sum.dim_IntList": 1}) - .check(["torch.ops.quantized_decomposed"]) - .to_edge() - .partition() - .check_count({"torch.ops.higher_order.executorch_call_delegate": 1}) - .to_executorch() - .run_method_and_compare_outputs(inputs=test_data) - ) - - def _test_sum_ethosu_BI_pipeline( - self, - module: torch.nn.Module, - test_data: tuple[exampledata_t], - compile_spec: CompileSpec, - ): - tester = ( - ArmTester( - module, - example_inputs=test_data, - compile_spec=compile_spec, - ) - .quantize() - .export() - .check_count({"torch.ops.aten.sum.dim_IntList": 1}) - .check(["torch.ops.quantized_decomposed"]) - .to_edge() - .partition() - .check_count({"torch.ops.higher_order.executorch_call_delegate": 1}) - .to_executorch() - .serialize() - ) - if conftest.is_option_enabled("corstone_fvp"): - tester.run_method_and_compare_outputs(inputs=test_data, qtol=1) - - @parameterized.expand(Sum.test_parameters) - def test_sum_tosa_MI(self, test_data: tuple[exampledata_t]): - self._test_sum_tosa_MI_pipeline(self.Sum(), test_data) - - @parameterized.expand(Sum.test_parameters) - def test_sum_tosa_BI(self, test_data: tuple[exampledata_t]): - self._test_sum_tosa_BI_pipeline(self.Sum(), test_data) - - @parameterized.expand(Sum.test_parameters_u55) - def test_sum_u55_BI(self, test_data: tuple[exampledata_t]): - self._test_sum_ethosu_BI_pipeline( - self.Sum(), - test_data, - common.get_u55_compile_spec(), - ) - - @parameterized.expand(Sum.test_parameters) - def test_sum_u85_BI(self, test_data: tuple[exampledata_t]): - self._test_sum_ethosu_BI_pipeline( - self.Sum(), - test_data, - common.get_u85_compile_spec(), - ) - - reject_inputs = [ - ((torch.rand((65537, 1, 1)), 0, False),), - ((torch.rand((800, 90, 1)), 2, False),), - ((torch.rand((3, 2, 800, 90)), 1, False),), - ] - - @parameterized.expand(reject_inputs) - def test_reject_sum_u55_BI(self, example_inputs): - ( - ArmTester( - TestSum.Sum(), - example_inputs=example_inputs, - compile_spec=common.get_u55_compile_spec(), - ) - .quantize() - .export() - .check_count({"torch.ops.aten.sum.dim_IntList": 1}) - .check(["torch.ops.quantized_decomposed"]) - .to_edge_transform_and_lower() - .check_count({"torch.ops.higher_order.executorch_call_delegate": 0}) - .check(["executorch_exir_dialects_edge__ops_aten_sum_dim_IntList"]) - ) +from executorch.backends.arm.test import common + +from executorch.backends.arm.test.tester.test_pipeline import ( + EthosU55PipelineBI, + EthosU85PipelineBI, + TosaPipelineBI, + TosaPipelineMI, +) + +aten_op = "torch.ops.aten.sum.dim_IntList" +input_t1 = Tuple[torch.Tensor] # Input x + + +"""Tests sum which sums all elements along some specified dimensions. +keepdim specifies whether the dimension that is summed should +be squeezed or not. +""" + + +class Sum(torch.nn.Module): + test_parameters = { + "1d_dim_0_keep": lambda: (torch.rand(10), 0, True), + "2d_dim_1_no_keep": lambda: (torch.rand(10, 10), 1, False), + "3d_dims_keep": lambda: (torch.rand(10, 10, 10), [-3, 1], True), + "4d_dims_no_keep": lambda: (torch.rand(1, 1, 5, 8), 1, False), + "4d_dim_3_keep": lambda: (torch.rand(1, 2, 3, 4), 3, True), + "4d_dims_keep": lambda: (torch.rand(1, 2, 8, 8), [2, 3, 0], True), + } + + def forward(self, x: torch.Tensor, dim: int, keepdim: bool): + return x.sum(dim=dim, keepdim=keepdim) + + +@common.parametrize("test_data", Sum.test_parameters) +def test_sum_dim_intlist_tosa_MI(test_data: input_t1): + pipeline = TosaPipelineMI[input_t1]( + Sum(), + test_data(), + aten_op, + exir_op=[], + ) + pipeline.run() + + +@common.parametrize("test_data", Sum.test_parameters) +def test_sum_dim_intlist_tosa_BI(test_data: input_t1): + pipeline = TosaPipelineBI[input_t1]( + Sum(), + test_data(), + aten_op, + exir_op=[], + ) + pipeline.run() + + +@common.parametrize("test_data", Sum.test_parameters) +@common.XfailIfNoCorstone300 +def test_view_u55_BI_1_0(test_data: Tuple): + pipeline = EthosU55PipelineBI[input_t1]( + Sum(), + test_data(), + aten_op, + exir_ops=[], + run_on_fvp=True, + ) + pipeline.run() + + +@common.parametrize("test_data", Sum.test_parameters) +@common.XfailIfNoCorstone320 +def test_view_u85_BI_1_0(test_data: Tuple): + pipeline = EthosU85PipelineBI[input_t1]( + Sum(), + test_data(), + aten_op, + exir_ops=[], + run_on_fvp=True, + ) + pipeline.run() + + +reject_inputs = { + "reject_large_0_dim": lambda: (torch.rand((65537, 1, 1)), 0, False), + "reject_large_2_dim": lambda: (torch.rand((800, 90, 1)), 2, False), + "reject_large_1_dim": lambda: (torch.rand((3, 2, 800, 90)), 1, False), +} + + +@common.parametrize("test_data", reject_inputs) +def test_view_u55_BI_failure_set(test_data: Tuple): + pipeline = EthosU55PipelineBI[input_t1]( + Sum(), + test_data(), + aten_op, + exir_ops=[], + ) + pipeline.pop_stage("check_count.exir") + pipeline.run() diff --git a/backends/arm/test/ops/test_tanh.py b/backends/arm/test/ops/test_tanh.py index 8d13620dc4a..73d51cb8c3e 100644 --- a/backends/arm/test/ops/test_tanh.py +++ b/backends/arm/test/ops/test_tanh.py @@ -1,142 +1,85 @@ # Copyright (c) Meta Platforms, Inc. and affiliates. -# Copyright 2024 Arm Limited and/or its affiliates. -# All rights reserved. +# Copyright 2024-2025 Arm Limited and/or its affiliates. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. -import unittest - from typing import Tuple -import pytest - import torch -from executorch.backends.arm.test import common, conftest -from executorch.backends.arm.test.tester.arm_tester import ArmTester -from executorch.exir.backend.compile_spec_schema import CompileSpec -from parameterized import parameterized +from executorch.backends.arm.test import common +from executorch.backends.arm.test.tester.test_pipeline import ( + EthosU55PipelineBI, + EthosU85PipelineBI, + TosaPipelineBI, + TosaPipelineMI, +) +aten_op = "torch.ops.aten.tanh.default" +input_t1 = Tuple[torch.Tensor] # Input x -test_data_suite = [ +test_data_suite = { # (test_name, test_data) - ("zeros", torch.zeros(10, 10, 10, 10)), - ("ones", torch.ones(10, 10, 10)), - ("rand", torch.rand(10, 10) - 0.5), - ("randn_pos", torch.randn(10) + 10), - ("randn_neg", torch.randn(10) - 10), - ("ramp", torch.arange(-16, 16, 0.2)), -] - - -class TestTanh(unittest.TestCase): - class Tanh(torch.nn.Module): - def __init__(self): - super().__init__() - self.tanh = torch.nn.Tanh() - - def forward(self, x): - return self.tanh(x) - - def _test_tanh_tosa_MI_pipeline( - self, module: torch.nn.Module, test_data: Tuple[torch.tensor] - ): - tester = ( - ArmTester( - module, - example_inputs=test_data, - compile_spec=common.get_tosa_compile_spec("TOSA-0.80+MI"), - ) - .export() - .check(["torch.ops.aten.tanh.default"]) - .check_not(["torch.ops.quantized_decomposed"]) - .to_edge() - .partition() - .check_not(["executorch_exir_dialects_edge__ops_aten_tanh_default"]) - .check_count({"torch.ops.higher_order.executorch_call_delegate": 1}) - .to_executorch() - ) - - if conftest.is_option_enabled("tosa_ref_model"): - tester.run_method_and_compare_outputs(inputs=test_data) - - def _test_tanh_tosa_BI_pipeline(self, module: torch.nn.Module, test_data: Tuple): - tester = ( - ArmTester( - module, - example_inputs=test_data, - compile_spec=common.get_tosa_compile_spec("TOSA-0.80+BI"), - ) - .quantize() - .export() - .check(["torch.ops.aten.tanh.default"]) - .check(["torch.ops.quantized_decomposed"]) - .to_edge() - .partition() - .check_not(["executorch_exir_dialects_edge__ops_aten_tanh_default"]) - .check_count({"torch.ops.higher_order.executorch_call_delegate": 1}) - .to_executorch() - ) - - if conftest.is_option_enabled("tosa_ref_model"): - tester.run_method_and_compare_outputs(inputs=test_data) - - def _test_tanh_tosa_ethos_BI_pipeline( - self, - compile_spec: list[CompileSpec], - module: torch.nn.Module, - test_data: Tuple[torch.tensor], - ): - ( - ArmTester( - module, - example_inputs=test_data, - compile_spec=compile_spec, - ) - .quantize() - .export() - .check_count({"torch.ops.aten.tanh.default": 1}) - .check(["torch.ops.quantized_decomposed"]) - .to_edge() - .partition() - .check_not(["executorch_exir_dialects_edge__ops_aten_tanh_default"]) - .check_count({"torch.ops.higher_order.executorch_call_delegate": 1}) - .to_executorch() - ) - - def _test_tanh_tosa_u55_BI_pipeline( - self, module: torch.nn.Module, test_data: Tuple[torch.tensor] - ): - self._test_tanh_tosa_ethos_BI_pipeline( - common.get_u55_compile_spec(), module, test_data - ) - - def _test_tanh_tosa_u85_BI_pipeline( - self, module: torch.nn.Module, test_data: Tuple[torch.tensor] - ): - self._test_tanh_tosa_ethos_BI_pipeline( - common.get_u85_compile_spec(), module, test_data - ) - - @parameterized.expand(test_data_suite) - @pytest.mark.tosa_ref_model - def test_tanh_tosa_MI( - self, - test_name: str, - test_data: torch.Tensor, - ): - self._test_tanh_tosa_MI_pipeline(self.Tanh(), (test_data,)) - - @parameterized.expand(test_data_suite) - @pytest.mark.tosa_ref_model - def test_tanh_tosa_BI(self, test_name: str, test_data: torch.Tensor): - self._test_tanh_tosa_BI_pipeline(self.Tanh(), (test_data,)) - - @parameterized.expand(test_data_suite) - def test_tanh_tosa_u55_BI(self, test_name: str, test_data: torch.Tensor): - self._test_tanh_tosa_u55_BI_pipeline(self.Tanh(), (test_data,)) - - @parameterized.expand(test_data_suite) - def test_tanh_tosa_u85_BI(self, test_name: str, test_data: torch.Tensor): - self._test_tanh_tosa_u85_BI_pipeline(self.Tanh(), (test_data,)) + "zeros": lambda: torch.zeros(10, 10, 10, 10), + "ones": lambda: torch.ones(10, 10, 10), + "rand": lambda: torch.rand(10, 10) - 0.5, + "randn_pos": lambda: torch.randn(10) + 10, + "randn_neg": lambda: torch.randn(10) - 10, + "ramp": lambda: torch.arange(-16, 16, 0.2), +} + + +class Tanh(torch.nn.Module): + def __init__(self): + super().__init__() + self.tanh = torch.nn.Tanh() + + def forward(self, x): + return self.tanh(x) + + +@common.parametrize("test_data", test_data_suite) +def test_tanh_tosa_MI(test_data: Tuple): + pipeline = TosaPipelineMI[input_t1]( + Tanh(), + (test_data(),), + aten_op, + exir_op=[], + ) + pipeline.run() + + +@common.parametrize("test_data", test_data_suite) +def test_tanh_tosa_BI(test_data: Tuple): + pipeline = TosaPipelineBI[input_t1]( + Tanh(), + (test_data(),), + aten_op, + exir_op=[], + ) + pipeline.run() + + +@common.parametrize("test_data", test_data_suite) +def test_tanh_u55_BI(test_data: Tuple): + pipeline = EthosU55PipelineBI[input_t1]( + Tanh(), + (test_data(),), + aten_op, + exir_ops=[], + run_on_fvp=False, + ) + pipeline.run() + + +@common.parametrize("test_data", test_data_suite) +def test_tanh_u85_BI(test_data: Tuple): + pipeline = EthosU85PipelineBI[input_t1]( + Tanh(), + (test_data(),), + aten_op, + exir_ops=[], + run_on_fvp=False, + ) + pipeline.run() diff --git a/backends/arm/test/ops/test_to_copy.py b/backends/arm/test/ops/test_to_copy.py index db3e93fbdc9..9d873f30ce9 100644 --- a/backends/arm/test/ops/test_to_copy.py +++ b/backends/arm/test/ops/test_to_copy.py @@ -1,5 +1,4 @@ # Copyright 2024-2025 Arm Limited and/or its affiliates. -# All rights reserved. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. @@ -8,14 +7,14 @@ # Tests the _to_copy op which is interpreted as a cast for our purposes. # -import unittest +from typing import Tuple import torch from executorch.backends.arm.test import common -from executorch.backends.arm.test.tester.arm_tester import ArmTester +from executorch.backends.arm.test.tester.test_pipeline import TosaPipelineMI -from parameterized import parameterized +input_t1 = Tuple[torch.Tensor] # Input x class Cast(torch.nn.Module): @@ -27,41 +26,41 @@ def forward(self, x: torch.Tensor): return x.to(dtype=self.target_dtype) -class TestToCopy(unittest.TestCase): - """ - Tests the _to_copy operation. +""" +Tests the _to_copy operation. - Only test unquantized graphs as explicit casting of dtypes messes with the - quantization. +Only test unquantized graphs as explicit casting of dtypes messes with the +quantization. - Note: This is also covered by test_scalars.py. - """ +Note: This is also covered by test_scalars.py. +""" + +_TO_COPY_TEST_DATA = { + "rand_fp16": lambda: (torch.rand((1, 2, 3, 4), dtype=torch.float16), torch.float32), + "rand_fp32": lambda: (torch.rand((1, 2, 3, 4), dtype=torch.float32), torch.float16), + "rand_int8": lambda: ( + torch.randint(-127, 128, (1, 2, 3, 4), dtype=torch.int8), + torch.float32, + ), + "rand_int8_int32": lambda: ( + torch.randint(-127, 128, (1, 2, 3, 4), dtype=torch.int8), + torch.int32, + ), + "rand_int32": lambda: ( + torch.randint(-127, 128, (1, 2, 3, 4), dtype=torch.int32), + torch.int8, + ), +} - _TO_COPY_TEST_DATA = ( - (torch.rand((1, 2, 3, 4), dtype=torch.float16), torch.float32), - (torch.rand((1, 2, 3, 4), dtype=torch.float32), torch.float16), - (torch.randint(-127, 128, (1, 2, 3, 4), dtype=torch.int8), torch.float32), - (torch.randint(-127, 128, (1, 2, 3, 4), dtype=torch.int8), torch.int32), - (torch.randint(-127, 128, (1, 2, 3, 4), dtype=torch.int32), torch.int8), - ) - def _test_to_copy_tosa_MI_pipeline( - self, module: torch.nn.Module, test_data: torch.Tensor - ): - ( - ArmTester( - module, - example_inputs=test_data, - compile_spec=common.get_tosa_compile_spec("TOSA-0.80+MI"), - ) - .export() - .to_edge() - .partition() - .check_count({"torch.ops.higher_order.executorch_call_delegate": 1}) - .to_executorch() - .run_method_and_compare_outputs(inputs=test_data) - ) - - @parameterized.expand(_TO_COPY_TEST_DATA) - def test_view_tosa_MI(self, test_tensor: torch.Tensor, new_dtype): - self._test_to_copy_tosa_MI_pipeline(Cast(new_dtype), (test_tensor,)) +@common.parametrize("test_data", _TO_COPY_TEST_DATA) +def test_copy_tosa_MI(test_data: Tuple): + test_tensor, new_dtype = test_data() + + pipeline = TosaPipelineMI[input_t1]( + Cast(new_dtype), + (test_tensor,), + aten_op=[], + exir_op=[], + ) + pipeline.run() diff --git a/backends/arm/test/ops/test_unary.py b/backends/arm/test/ops/test_unary.py index 1f91cab56c1..f8359bb4339 100644 --- a/backends/arm/test/ops/test_unary.py +++ b/backends/arm/test/ops/test_unary.py @@ -3,7 +3,6 @@ # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. - from typing import Tuple import torch @@ -46,51 +45,51 @@ def forward(self, x: torch.Tensor): test_data = { - "ceil_zeros": ( + "ceil_zeros": lambda: ( Ceil(), zeros, ), - "floor_zeros": ( + "floor_zeros": lambda: ( Floor(), zeros, ), - "ceil_ones": ( + "ceil_ones": lambda: ( Ceil(), ones, ), - "floor_ones": ( + "floor_ones": lambda: ( Floor(), ones, ), - "ceil_rand": ( + "ceil_rand": lambda: ( Ceil(), rand, ), - "floor_rand": ( + "floor_rand": lambda: ( Floor(), rand, ), - "ceil_randn_pos": ( + "ceil_randn_pos": lambda: ( Ceil(), randn_pos, ), - "floor_randn_pos": ( + "floor_randn_pos": lambda: ( Floor(), randn_pos, ), - "ceil_randn_neg": ( + "ceil_randn_neg": lambda: ( Ceil(), randn_neg, ), - "floor_randn_neg": ( + "floor_randn_neg": lambda: ( Floor(), randn_neg, ), - "ceil_ramp": ( + "ceil_ramp": lambda: ( Ceil(), ramp, ), - "floor_ramp": ( + "floor_ramp": lambda: ( Floor(), ramp, ), @@ -99,55 +98,51 @@ def forward(self, x: torch.Tensor): @common.parametrize("test_data", test_data) def test_unary_tosa_MI(test_data: input_t1): - module = test_data[0] + module, test_data = test_data() pipeline = TosaPipelineMI[input_t1]( - module, (test_data[1],), module.aten_op, module.exir_op + module, + (test_data,), + module.aten_op, + module.exir_op, ) pipeline.run() @common.parametrize("test_data", test_data) def test_unary_tosa_BI(test_data: input_t1): - module = test_data[0] + module, test_data = test_data() pipeline = TosaPipelineBI[input_t1]( - module, (test_data[1],), module.aten_op, module.exir_op + module, + (test_data,), + module.aten_op, + module.exir_op, ) pipeline.run() @common.parametrize("test_data", test_data) +@common.XfailIfNoCorstone300 def test_unary_u55_BI(test_data: input_t1): - module = test_data[0] + module, test_data = test_data() pipeline = EthosU55PipelineBI[input_t1]( - module, (test_data[1],), module.aten_op, module.exir_op, run_on_fvp=False + module, + (test_data,), + module.aten_op, + module.exir_op, + run_on_fvp=True, ) pipeline.run() @common.parametrize("test_data", test_data) +@common.XfailIfNoCorstone320 def test_unary_u85_BI(test_data: input_t1): - module = test_data[0] - pipeline = EthosU85PipelineBI[input_t1]( - module, (test_data[1],), module.aten_op, module.exir_op, run_on_fvp=False - ) - pipeline.run() - - -@common.parametrize("test_data", test_data) -@common.SkipIfNoCorstone300 -def test_unary_u55_BI_on_fvp(test_data: input_t1): - module = test_data[0] - pipeline = EthosU55PipelineBI[input_t1]( - module, (test_data[1],), module.aten_op, module.exir_op, run_on_fvp=True - ) - pipeline.run() - - -@common.parametrize("test_data", test_data) -@common.SkipIfNoCorstone320 -def test_unary_u85_BI_on_fvp(test_data: input_t1): - module = test_data[0] + module, test_data = test_data() pipeline = EthosU85PipelineBI[input_t1]( - module, (test_data[1],), module.aten_op, module.exir_op, run_on_fvp=True + module, + (test_data,), + module.aten_op, + module.exir_op, + run_on_fvp=True, ) pipeline.run() diff --git a/backends/arm/test/ops/test_unsqueeze.py b/backends/arm/test/ops/test_unsqueeze.py index 68f4fe46123..4ad238a099a 100644 --- a/backends/arm/test/ops/test_unsqueeze.py +++ b/backends/arm/test/ops/test_unsqueeze.py @@ -1,5 +1,4 @@ # Copyright 2024-2025 Arm Limited and/or its affiliates. -# All rights reserved. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. @@ -8,105 +7,76 @@ # Tests the unsqueeze op which copies the data of the input tensor (possibly with new data format) # -import unittest from typing import Sequence, Tuple import torch from executorch.backends.arm.test import common -from executorch.backends.arm.test.tester.arm_tester import ArmTester - -from executorch.exir.backend.compile_spec_schema import CompileSpec -from parameterized import parameterized - - -class TestSimpleUnsqueeze(unittest.TestCase): - class Unsqueeze(torch.nn.Module): - shapes: list[int | Sequence[int]] = [5, (5, 5), (5, 4), (5, 4, 3)] - test_parameters: list[tuple[torch.Tensor]] = [(torch.randn(n),) for n in shapes] - - def forward(self, x: torch.Tensor, dim): - return x.unsqueeze(dim) - - def _test_unsqueeze_tosa_MI_pipeline( - self, module: torch.nn.Module, test_data: Tuple[torch.Tensor, int] - ): - ( - ArmTester( - module, - example_inputs=test_data, - compile_spec=common.get_tosa_compile_spec("TOSA-0.80+MI"), - ) - .export() - .check_count({"torch.ops.aten.unsqueeze.default": 1}) - .to_edge() - .partition() - .check_count({"torch.ops.higher_order.executorch_call_delegate": 1}) - .to_executorch() - .run_method_and_compare_outputs(inputs=test_data) - ) +from executorch.backends.arm.test.tester.test_pipeline import ( + EthosU55PipelineBI, + EthosU85PipelineBI, + TosaPipelineBI, + TosaPipelineMI, +) - def _test_unsqueeze_tosa_BI_pipeline( - self, module: torch.nn.Module, test_data: Tuple[torch.Tensor, int] - ): - ( - ArmTester( - module, - example_inputs=test_data, - compile_spec=common.get_tosa_compile_spec("TOSA-0.80+BI"), - ) - .quantize() - .export() - .check_count({"torch.ops.aten.unsqueeze.default": 1}) - .to_edge() - .partition() - .check_count({"torch.ops.higher_order.executorch_call_delegate": 1}) - .to_executorch() - .run_method_and_compare_outputs(inputs=test_data, qtol=1) - ) +aten_op = "torch.ops.aten.unsqueeze.default" +input_t1 = Tuple[torch.Tensor, torch.scalar_tensor] # Input x, Input y - def _test_unsqueeze_ethosu_BI_pipeline( - self, - compile_spec: CompileSpec, - module: torch.nn.Module, - test_data: Tuple[torch.Tensor, int], - ): - ( - ArmTester( - module, - example_inputs=test_data, - compile_spec=compile_spec, - ) - .quantize() - .export() - .check_count({"torch.ops.aten.unsqueeze.default": 1}) - .to_edge() - .partition() - .check_count({"torch.ops.higher_order.executorch_call_delegate": 1}) - .to_executorch() - ) - @parameterized.expand(Unsqueeze.test_parameters) - def test_unsqueeze_tosa_MI(self, test_tensor: torch.Tensor): - for i in range(-test_tensor.dim() - 1, test_tensor.dim() + 1): - self._test_unsqueeze_tosa_MI_pipeline(self.Unsqueeze(), (test_tensor, i)) - - @parameterized.expand(Unsqueeze.test_parameters) - def test_unsqueeze_tosa_BI(self, test_tensor: torch.Tensor): - self._test_unsqueeze_tosa_BI_pipeline(self.Unsqueeze(), (test_tensor, 0)) - - @parameterized.expand(Unsqueeze.test_parameters) - def test_unsqueeze_u55_BI(self, test_tensor: torch.Tensor): - self._test_unsqueeze_ethosu_BI_pipeline( - common.get_u55_compile_spec(), - self.Unsqueeze(), - (test_tensor, 0), - ) +class Unsqueeze(torch.nn.Module): + shapes: list[int | Sequence[int]] = [5, (5, 5), (5, 4), (5, 4, 3)] + test_parameters = {} + for n in shapes: + test_parameters[f"rand_{n}"] = (torch.randn(n),) + + def forward(self, x: torch.Tensor, dim): + return x.unsqueeze(dim) + - @parameterized.expand(Unsqueeze.test_parameters) - def test_unsqueeze_u85_BI(self, test_tensor: torch.Tensor): - self._test_unsqueeze_ethosu_BI_pipeline( - common.get_u85_compile_spec(), - self.Unsqueeze(), - (test_tensor, 0), +@common.parametrize("test_tensor", Unsqueeze.test_parameters) +def test_unsqueeze_tosa_MI(test_tensor: torch.Tensor): + for i in range(-test_tensor[0].dim() - 1, test_tensor[0].dim() + 1): + pipeline = TosaPipelineMI[input_t1]( + Unsqueeze(), + (*test_tensor, i), + aten_op, + exir_op=[], ) + pipeline.run() + + +@common.parametrize("test_tensor", Unsqueeze.test_parameters) +def test_unsqueeze_tosa_BI(test_tensor: torch.Tensor): + pipeline = TosaPipelineBI[input_t1]( + Unsqueeze(), + (*test_tensor, 0), + aten_op, + exir_op=[], + ) + pipeline.run() + + +@common.parametrize("test_tensor", Unsqueeze.test_parameters) +@common.XfailIfNoCorstone300 +def test_unsqueeze_u55_BI(test_tensor: torch.Tensor): + pipeline = EthosU55PipelineBI[input_t1]( + Unsqueeze(), + (*test_tensor, 0), + aten_op, + exir_ops=[], + run_on_fvp=False, + ) + pipeline.run() + + +@common.parametrize("test_tensor", Unsqueeze.test_parameters) +@common.XfailIfNoCorstone320 +def test_unsqueeze_u85_BI(test_tensor: torch.Tensor): + pipeline = EthosU85PipelineBI[input_t1]( + Unsqueeze(), + (*test_tensor, 0), + aten_op, + exir_ops=[], + run_on_fvp=True, + ) + pipeline.run() diff --git a/backends/arm/test/ops/test_upsample_nearest2d.py b/backends/arm/test/ops/test_upsample_nearest2d.py index 8984d716a3d..7809d5fdee2 100644 --- a/backends/arm/test/ops/test_upsample_nearest2d.py +++ b/backends/arm/test/ops/test_upsample_nearest2d.py @@ -1,165 +1,163 @@ -# Copyright 2024 Arm Limited and/or its affiliates. -# All rights reserved. +# Copyright 2024-2025 Arm Limited and/or its affiliates. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. -import unittest - from typing import Optional, Tuple import torch from executorch.backends.arm.test import common -from executorch.backends.arm.test.tester.arm_tester import ArmTester -from parameterized import parameterized +from executorch.backends.arm.test.tester.test_pipeline import ( + TosaPipelineBI, + TosaPipelineMI, +) + +aten_op = "torch.ops.aten.upsample_nearest2d.vec" +input_t1 = Tuple[torch.Tensor] # Input x -test_data_suite = [ +test_data_suite = { # (test_name, test_data, size, scale_factor, compare_outputs) - ("rand_double_scale", torch.rand(2, 4, 8, 3), None, 2.0, True), - ("rand_double_scale_one_dim", torch.rand(2, 4, 8, 3), None, (1.0, 2.0), True), - ("rand_double_size", torch.rand(2, 4, 8, 3), (16, 6), None, True), - ("rand_one_double_scale", torch.rand(2, 4, 1, 1), None, 2.0, True), - ("rand_one_double_size", torch.rand(2, 4, 1, 1), (2, 2), None, True), - ("rand_one_same_scale", torch.rand(2, 4, 1, 1), None, 1.0, True), - ("rand_one_same_size", torch.rand(2, 4, 1, 1), (1, 1), None, True), + "rand_double_scale": lambda: (torch.rand(2, 4, 8, 3), None, 2.0, True), + "rand_double_scale_one_dim": lambda: ( + torch.rand(2, 4, 8, 3), + None, + (1.0, 2.0), + True, + ), + "rand_double_size": lambda: (torch.rand(2, 4, 8, 3), (16, 6), None, True), + "rand_one_double_scale": lambda: (torch.rand(2, 4, 1, 1), None, 2.0, True), + "rand_one_double_size": lambda: (torch.rand(2, 4, 1, 1), (2, 2), None, True), + "rand_one_same_scale": lambda: (torch.rand(2, 4, 1, 1), None, 1.0, True), + "rand_one_same_size": lambda: (torch.rand(2, 4, 1, 1), (1, 1), None, True), # Can't compare outputs as the rounding when selecting the nearest pixel is # different between PyTorch and TOSA. Just check the legalization went well. # TODO Improve the test infrastructure to support more in depth verification # of the TOSA legalization results. - ("rand_half_scale", torch.rand(2, 4, 8, 6), None, 0.5, False), - ("rand_half_size", torch.rand(2, 4, 8, 6), (4, 3), None, False), - ("rand_one_and_half_scale", torch.rand(2, 4, 8, 3), None, 1.5, False), - ("rand_one_and_half_size", torch.rand(2, 4, 8, 3), (12, 4), None, False), -] - - -class TestUpsampleNearest2d(unittest.TestCase): - class UpsamplingNearest2d(torch.nn.Module): - def __init__( - self, - size: Optional[Tuple[int]], - scale_factor: Optional[float | Tuple[float]], - ): - super().__init__() - self.upsample = torch.nn.UpsamplingNearest2d( # noqa: TOR101 - size=size, scale_factor=scale_factor - ) - - def forward(self, x): - return self.upsample(x) - - class Upsample(torch.nn.Module): - def __init__( - self, - size: Optional[Tuple[int]], - scale_factor: Optional[float | Tuple[float]], - ): - super().__init__() - self.upsample = torch.nn.Upsample( - size=size, scale_factor=scale_factor, mode="nearest" - ) - - def forward(self, x): - return self.upsample(x) - - class Interpolate(torch.nn.Module): - def __init__( - self, - size: Optional[Tuple[int]], - scale_factor: Optional[float | Tuple[float]], - ): - super().__init__() - self.upsample = lambda x: torch.nn.functional.interpolate( - x, size=size, scale_factor=scale_factor, mode="nearest" - ) - - def forward(self, x): - return self.upsample(x) - - def _test_upsample_nearest_2d_tosa_MI_pipeline( - self, - module: torch.nn.Module, - test_data: Tuple[torch.tensor], - compare_outputs: bool, - ): - tester = ( - ArmTester( - module, - example_inputs=test_data, - compile_spec=common.get_tosa_compile_spec("TOSA-0.80+MI"), - ) - .export() - .check(["torch.ops.aten.upsample_nearest2d.vec"]) - .check_not(["torch.ops.quantized_decomposed"]) - .to_edge_transform_and_lower() - .check_not(["torch.ops.aten.upsample_nearest2d.vec"]) - .check_count({"torch.ops.higher_order.executorch_call_delegate": 1}) - .to_executorch() - ) + "rand_half_scale": lambda: (torch.rand(2, 4, 8, 6), None, 0.5, False), + "rand_half_size": lambda: (torch.rand(2, 4, 8, 6), (4, 3), None, False), + "rand_one_and_half_scale": lambda: (torch.rand(2, 4, 8, 3), None, 1.5, False), + "rand_one_and_half_size": lambda: (torch.rand(2, 4, 8, 3), (12, 4), None, False), +} - if compare_outputs: - tester.run_method_and_compare_outputs(inputs=test_data) - def _test_upsample_nearest_2d_tosa_BI_pipeline( +class UpsamplingNearest2d(torch.nn.Module): + def __init__( self, - module: torch.nn.Module, - test_data: Tuple[torch.tensor], - compare_outputs: bool, + size: Optional[Tuple[int]], + scale_factor: Optional[float | Tuple[float]], ): - tester = ( - ArmTester( - module, - example_inputs=test_data, - compile_spec=common.get_tosa_compile_spec("TOSA-0.80+BI"), - ) - .quantize() - .export() - .check(["torch.ops.aten.upsample_nearest2d.vec"]) - .check(["torch.ops.quantized_decomposed"]) - .to_edge_transform_and_lower() - .check_not(["torch.ops.aten.upsample_nearest2d.vec"]) - .check_count({"torch.ops.higher_order.executorch_call_delegate": 1}) - .to_executorch() + super().__init__() + self.upsample = torch.nn.UpsamplingNearest2d( # noqa: TOR101 + size=size, scale_factor=scale_factor ) - if compare_outputs: - tester.run_method_and_compare_outputs(inputs=test_data) + def forward(self, x): + return self.upsample(x) + - @parameterized.expand(test_data_suite) - def test_upsample_nearest_2d_tosa_MI( +class Upsample(torch.nn.Module): + def __init__( self, - test_name: str, - test_data: torch.Tensor, size: Optional[Tuple[int]], scale_factor: Optional[float | Tuple[float]], - compare_outputs: bool, ): - self._test_upsample_nearest_2d_tosa_MI_pipeline( - self.UpsamplingNearest2d(size, scale_factor), (test_data,), compare_outputs - ) - self._test_upsample_nearest_2d_tosa_MI_pipeline( - self.Upsample(size, scale_factor), (test_data,), compare_outputs - ) - self._test_upsample_nearest_2d_tosa_MI_pipeline( - self.Interpolate(size, scale_factor), (test_data,), compare_outputs + super().__init__() + self.upsample = torch.nn.Upsample( + size=size, scale_factor=scale_factor, mode="nearest" ) - @parameterized.expand(test_data_suite) - def test_upsample_nearest_2d_tosa_BI( + def forward(self, x): + return self.upsample(x) + + +class Interpolate(torch.nn.Module): + def __init__( self, - test_name: str, - test_data: torch.Tensor, size: Optional[Tuple[int]], scale_factor: Optional[float | Tuple[float]], - compare_outputs: bool, ): - self._test_upsample_nearest_2d_tosa_BI_pipeline( - self.UpsamplingNearest2d(size, scale_factor), (test_data,), compare_outputs - ) - self._test_upsample_nearest_2d_tosa_BI_pipeline( - self.Upsample(size, scale_factor), (test_data,), compare_outputs - ) - self._test_upsample_nearest_2d_tosa_BI_pipeline( - self.Interpolate(size, scale_factor), (test_data,), compare_outputs + super().__init__() + self.upsample = lambda x: torch.nn.functional.interpolate( + x, size=size, scale_factor=scale_factor, mode="nearest" ) + + def forward(self, x): + return self.upsample(x) + + +@common.parametrize("test_data", test_data_suite) +def test_upsample_nearest2d_vec_tosa_MI(test_data: torch.Tensor): + test_data, size, scale_factor, compare_outputs = test_data() + + pipeline = TosaPipelineMI[input_t1]( + UpsamplingNearest2d(size, scale_factor), + (test_data,), + aten_op, + exir_op=[], + ) + if not compare_outputs: + pipeline.pop_stage(-1) + pipeline.run() + + +@common.parametrize("test_data", test_data_suite) +def test_upsample_nearest2d_vec_tosa_MI_nearest(test_data: torch.Tensor): + test_data, size, scale_factor, compare_outputs = test_data() + + pipeline = TosaPipelineMI[input_t1]( + Upsample(size, scale_factor), + (test_data,), + aten_op, + exir_op=[], + ) + if not compare_outputs: + pipeline.pop_stage(-1) + + pipeline.run() + + +@common.parametrize("test_data", test_data_suite) +def test_upsample_nearest2d_vec_tosa_MI_interpolate(test_data: torch.Tensor): + test_data, size, scale_factor, compare_outputs = test_data() + + pipeline = TosaPipelineMI[input_t1]( + Interpolate(size, scale_factor), + (test_data,), + aten_op, + exir_op=[], + ) + if not compare_outputs: + pipeline.pop_stage(-1) + pipeline.run() + + +@common.parametrize("test_data", test_data_suite) +def test_upsample_nearest2d_vec_tosa_BI_interpolate(test_data: torch.Tensor): + test_data, size, scale_factor, compare_outputs = test_data() + + pipeline = TosaPipelineBI[input_t1]( + UpsamplingNearest2d(size, scale_factor), + (test_data,), + aten_op, + exir_op=[], + ) + if not compare_outputs: + pipeline.pop_stage(-1) + pipeline.run() + + +@common.parametrize("test_data", test_data_suite) +def test_upsample_nearest2d_vec_tosa_BI_nearest(test_data: torch.Tensor): + test_data, size, scale_factor, compare_outputs = test_data() + + pipeline = TosaPipelineBI[input_t1]( + Upsample(size, scale_factor), + (test_data,), + aten_op, + exir_op=[], + ) + if not compare_outputs: + pipeline.pop_stage(-1) + + pipeline.run() diff --git a/backends/arm/test/ops/test_var.py b/backends/arm/test/ops/test_var.py index fb23f24307e..63681263fab 100644 --- a/backends/arm/test/ops/test_var.py +++ b/backends/arm/test/ops/test_var.py @@ -1,255 +1,321 @@ # Copyright 2024-2025 Arm Limited and/or its affiliates. -# All rights reserved. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. -# -# Tests the mean op which changes the size of a Tensor without changing the underlying data. -# -import unittest +from typing import Tuple import torch -from executorch.backends.arm.quantizer import ( - EthosUQuantizer, - get_symmetric_quantization_config, - TOSAQuantizer, + +from executorch.backends.arm.test import common +from executorch.backends.arm.test.tester.test_pipeline import ( + EthosU55PipelineBI, + EthosU85PipelineBI, + TosaPipelineBI, + TosaPipelineMI, ) -from executorch.backends.arm.test import common, conftest -from executorch.backends.arm.test.tester.arm_tester import ArmTester - -from executorch.backends.arm.tosa_specification import TosaSpecification -from executorch.backends.xnnpack.test.tester.tester import Quantize -from executorch.exir.backend.backend_details import CompileSpec - -from parameterized import parameterized - - -class TestVar(unittest.TestCase): - - class Var(torch.nn.Module): - test_parameters = [ - (torch.randn(1, 50, 10, 20), True, 0), - (torch.rand(1, 50, 10), False, 0), - (torch.randn(1, 30, 15, 20), True, 1), - (torch.rand(1, 50, 10, 20), False, 0.5), - ] - - def __init__(self, keepdim: bool = True, correction: int = 0): - super().__init__() - self.keepdim = keepdim - self.correction = correction - - def forward( - self, - x: torch.Tensor, - ): - return x.var(keepdim=self.keepdim, correction=self.correction) - - class VarDim(torch.nn.Module): - test_parameters = [ - (torch.randn(1, 50, 10, 20), 1, True, False), - (torch.rand(1, 50, 10), -2, False, False), - (torch.randn(1, 30, 15, 20), -3, True, True), - (torch.rand(1, 50, 10, 20), -1, False, True), - ] - - test_parameters_u55 = [ - (torch.randn(1, 50, 10, 20), 1, True, False), - (torch.randn(1, 30, 15, 20), -3, True, True), - ] - - test_parameters_u55_xfails = [ - (torch.rand(1, 50, 10), -2, True, False), - (torch.rand(1, 50, 10, 20), -1, True, True), - ] - - def __init__(self, dim: int = -1, keepdim: bool = True, unbiased: bool = False): - super().__init__() - self.dim = dim - self.keepdim = keepdim - self.unbiased = unbiased - - def forward( - self, - x: torch.Tensor, - ): - return x.var(dim=self.dim, keepdim=self.keepdim, unbiased=self.unbiased) - - class VarCorrection(torch.nn.Module): - test_parameters = [ - (torch.randn(1, 50, 10, 20), (-1, -2), True, 0), - (torch.rand(1, 50, 10), (-2), True, 0), - (torch.randn(1, 30, 15, 20), (-1, -2, -3), True, 1), - (torch.rand(1, 50, 10, 20), (-1, -2), True, 0.5), - ] - - def __init__( - self, dim: int = -1, keepdim: bool = True, correction: bool = False - ): - super().__init__() - self.dim = dim - self.keepdim = keepdim - self.correction = correction - - def forward( - self, - x: torch.Tensor, - ): - return x.var(dim=self.dim, keepdim=self.keepdim, correction=self.correction) - - def _test_var_tosa_MI_pipeline( + +input_t1 = Tuple[torch.Tensor] # Input x + + +class Var(torch.nn.Module): + test_parameters = { + "var_4d_keep_dim_0_correction": lambda: (torch.randn(1, 50, 10, 20), True, 0), + "var_3d_no_keep_dim_0_correction": lambda: (torch.rand(1, 50, 10), False, 0), + "var_4d_keep_dim_1_correction": lambda: (torch.randn(1, 30, 15, 20), True, 1), + "var_4d_no_keep_dim_0_5_correction": lambda: ( + torch.rand(1, 50, 10, 20), + False, + 0.5, + ), + } + + def __init__(self, keepdim: bool = True, correction: int = 0): + super().__init__() + self.keepdim = keepdim + self.correction = correction + + def forward( self, - module: torch.nn.Module, - test_data: torch.Tensor, - target_str: str = None, + x: torch.Tensor, ): - ( - ArmTester( - module, - example_inputs=test_data, - compile_spec=common.get_tosa_compile_spec("TOSA-0.80+MI"), - ) - .export() - .to_edge() - .partition() - .check_count({"torch.ops.higher_order.executorch_call_delegate": 1}) - .to_executorch() - .run_method_and_compare_outputs(inputs=test_data) - ) - - def _test_var_tosa_BI_pipeline( + return x.var(keepdim=self.keepdim, correction=self.correction) + + +class VarDim(torch.nn.Module): + test_parameters = { + "var_4d_dim_1_keep_dim_unbiased": lambda: ( + torch.randn(1, 50, 10, 20), + 1, + True, + False, + ), + "var_3d_dim_neg_2_no_keep_dim_unbiased": lambda: ( + torch.rand(1, 50, 10), + -2, + False, + False, + ), + "var_3d_dim_neg_3_keep_dim_biased": lambda: ( + torch.randn(1, 30, 15, 20), + -3, + True, + True, + ), + "var_3d_dim_neg_1_no_keep_dim_biased": lambda: ( + torch.rand(1, 50, 10, 20), + -1, + False, + True, + ), + } + + test_parameters_u55 = { + "var_4d_dim_1_keep_dim_unbiased": lambda: ( + torch.randn(1, 50, 10, 20), + 1, + True, + False, + ), + "var_4d_dim_neg_3_keep_dim_biased": lambda: ( + torch.randn(1, 30, 15, 20), + -3, + True, + True, + ), + } + + test_parameters_u55_xfails = { + "var_3d_dim_neg_2_keep_dim_unbiased": lambda: ( + torch.rand(1, 50, 10), + -2, + True, + False, + ), + "var_3d_dim_neg_1_keep_dim_biased": lambda: ( + torch.rand(1, 50, 10, 20), + -1, + True, + True, + ), + } + + def __init__(self, dim: int = -1, keepdim: bool = True, unbiased: bool = False): + super().__init__() + self.dim = dim + self.keepdim = keepdim + self.unbiased = unbiased + + def forward( self, - module: torch.nn.Module, - test_data: torch.Tensor, - target_str: str = None, + x: torch.Tensor, ): - tosa_spec = TosaSpecification.create_from_string("TOSA-0.80+BI") - compile_spec = common.get_tosa_compile_spec(tosa_spec) - quantizer = TOSAQuantizer(tosa_spec).set_io(get_symmetric_quantization_config()) - ( - ArmTester(module, example_inputs=test_data, compile_spec=compile_spec) - .quantize(Quantize(quantizer, get_symmetric_quantization_config())) - .export() - .to_edge() - .partition() - .check_count({"torch.ops.higher_order.executorch_call_delegate": 1}) - .to_executorch() - .run_method_and_compare_outputs(inputs=test_data, qtol=1) - ) - - def _test_var_ethosu_BI_pipeline( + return x.var(dim=self.dim, keepdim=self.keepdim, unbiased=self.unbiased) + + +class VarCorrection(torch.nn.Module): + test_parameters = { + "var_4d_dims_keep_dim_0_correction": lambda: ( + torch.randn(1, 50, 10, 20), + (-1, -2), + True, + 0, + ), + "var_3d_dims_keep_dim_0_correction": lambda: ( + torch.rand(1, 50, 10), + (-2), + True, + 0, + ), + "var_4d_dims_keep_dim_1_correction": lambda: ( + torch.randn(1, 30, 15, 20), + (-1, -2, -3), + True, + 1, + ), + "var_4d_dims_keep_dim_0_5_correction": lambda: ( + torch.rand(1, 50, 10, 20), + (-1, -2), + True, + 0.5, + ), + } + + def __init__(self, dim: int = -1, keepdim: bool = True, correction: bool = False): + super().__init__() + self.dim = dim + self.keepdim = keepdim + self.correction = correction + + def forward( self, - module: torch.nn.Module, - compile_spec: CompileSpec, - test_data: torch.Tensor, - target_str: str = None, - ): - quantizer = EthosUQuantizer(compile_spec).set_io( - get_symmetric_quantization_config() - ) - tester = ( - ArmTester( - module, - example_inputs=test_data, - compile_spec=compile_spec, - ) - .quantize(Quantize(quantizer, get_symmetric_quantization_config())) - .export() - .to_edge() - .partition() - .check_count({"torch.ops.higher_order.executorch_call_delegate": 1}) - .to_executorch() - .serialize() - ) - if conftest.is_option_enabled("corstone_fvp"): - tester.run_method_and_compare_outputs(inputs=test_data, qtol=1) - - @parameterized.expand(Var.test_parameters) - def test_var_tosa_MI(self, test_tensor: torch.Tensor, keepdim, correction): - self._test_var_tosa_MI_pipeline(self.Var(keepdim, correction), (test_tensor,)) - - @parameterized.expand(Var.test_parameters) - def test_var_tosa_BI(self, test_tensor: torch.Tensor, keepdim, correction): - self._test_var_tosa_BI_pipeline(self.Var(keepdim, correction), (test_tensor,)) - - @parameterized.expand(Var.test_parameters) - def test_var_u55_BI(self, test_tensor: torch.Tensor, keepdim, correction): - self._test_var_ethosu_BI_pipeline( - self.Var(keepdim, correction), - common.get_u55_compile_spec(), - (test_tensor,), - ) - - @parameterized.expand(Var.test_parameters) - def test_var_u85_BI(self, test_tensor: torch.Tensor, keepdim, correction): - self._test_var_ethosu_BI_pipeline( - self.Var(keepdim, correction), - common.get_u85_compile_spec(), - (test_tensor,), - ) - - @parameterized.expand(VarDim.test_parameters) - def test_var_dim_tosa_MI(self, test_tensor: torch.Tensor, dim, keepdim, unbiased): - self._test_var_tosa_MI_pipeline( - self.VarDim(dim, keepdim, unbiased), (test_tensor,) - ) - - @parameterized.expand(VarDim.test_parameters) - def test_var_dim_tosa_BI(self, test_tensor: torch.Tensor, dim, keepdim, unbiased): - self._test_var_tosa_BI_pipeline( - self.VarDim(dim, keepdim, unbiased), (test_tensor,) - ) - - @parameterized.expand(VarDim.test_parameters_u55) - def test_var_dim_u55_BI(self, test_tensor: torch.Tensor, dim, keepdim, unbiased): - self._test_var_ethosu_BI_pipeline( - self.VarDim(dim, keepdim, unbiased), - common.get_u55_compile_spec(), - (test_tensor,), - ) - - @parameterized.expand(VarDim.test_parameters) - def test_var_dim_u85_BI(self, test_tensor: torch.Tensor, dim, keepdim, unbiased): - self._test_var_ethosu_BI_pipeline( - self.VarDim(dim, keepdim, unbiased), - common.get_u85_compile_spec(), - (test_tensor,), - ) - - @parameterized.expand(VarCorrection.test_parameters) - def test_var_correction_tosa_MI( - self, test_tensor: torch.Tensor, dim, keepdim, correction - ): - self._test_var_tosa_MI_pipeline( - self.VarCorrection(dim, keepdim, correction), (test_tensor,) - ) - - @parameterized.expand(VarCorrection.test_parameters) - def test_var_correction_tosa_BI( - self, test_tensor: torch.Tensor, dim, keepdim, correction - ): - self._test_var_tosa_BI_pipeline( - self.VarCorrection(dim, keepdim, correction), (test_tensor,) - ) - - @parameterized.expand(VarCorrection.test_parameters) - def test_var_correction_u55_BI( - self, test_tensor: torch.Tensor, dim, keepdim, correction - ): - self._test_var_ethosu_BI_pipeline( - self.VarCorrection(dim, keepdim, correction), - common.get_u55_compile_spec(), - (test_tensor,), - ) - - @parameterized.expand(VarCorrection.test_parameters) - def test_var_correction_u85_BI( - self, test_tensor: torch.Tensor, dim, keepdim, correction + x: torch.Tensor, ): - self._test_var_ethosu_BI_pipeline( - self.VarCorrection(dim, keepdim, correction), - common.get_u85_compile_spec(), - (test_tensor,), - ) + return x.var(dim=self.dim, keepdim=self.keepdim, correction=self.correction) + + +@common.parametrize("test_data", Var.test_parameters) +def test_var_dim_tosa_MI_no_dim(test_data: Tuple): + test_data, keepdim, correction = test_data() + pipeline = TosaPipelineMI[input_t1]( + Var(keepdim, correction), + (test_data,), + aten_op=[], + exir_op=[], + ) + pipeline.run() + + +@common.parametrize("test_data", Var.test_parameters) +def test_var_dim_tosa_BI_no_dim(test_data: Tuple): + test_data, keepdim, correction = test_data() + pipeline = TosaPipelineBI[input_t1]( + Var(keepdim, correction), + (test_data,), + aten_op=[], + exir_op=[], + symmetric_io_quantization=True, + ) + pipeline.run() + + +@common.parametrize("test_data", Var.test_parameters) +@common.XfailIfNoCorstone300 +def test_var_dim_u55_BI_no_dim(test_data: Tuple): + test_data, keepdim, correction = test_data() + pipeline = EthosU55PipelineBI[input_t1]( + Var(keepdim, correction), + (test_data,), + aten_ops=[], + exir_ops=[], + run_on_fvp=True, + symmetric_io_quantization=True, + ) + pipeline.run() + + +@common.parametrize("test_data", Var.test_parameters) +@common.XfailIfNoCorstone320 +def test_var_dim_u85_BI_no_dim(test_data: Tuple): + test_data, keepdim, correction = test_data() + pipeline = EthosU85PipelineBI[input_t1]( + Var(keepdim, correction), + (test_data,), + aten_ops=[], + exir_ops=[], + run_on_fvp=True, + symmetric_io_quantization=True, + ) + pipeline.run() + + +@common.parametrize("test_data", VarDim.test_parameters) +def test_var_dim_tosa_MI(test_data: Tuple): + test_data, dim, keepdim, unbiased = test_data() + pipeline = TosaPipelineMI[input_t1]( + VarDim(dim, keepdim, unbiased), + (test_data,), + aten_op=[], + exir_op=[], + ) + pipeline.run() + + +@common.parametrize("test_data", VarDim.test_parameters) +def test_var_dim_tosa_BI(test_data: Tuple): + + test_data, dim, keepdim, unbiased = test_data() + pipeline = TosaPipelineBI[input_t1]( + VarDim(dim, keepdim, unbiased), + (test_data,), + aten_op=[], + exir_op=[], + symmetric_io_quantization=True, + ) + pipeline.run() + + +@common.parametrize("test_data", VarDim.test_parameters_u55) +@common.XfailIfNoCorstone300 +def test_var_dim_u55_BI(test_data: Tuple): + test_data, dim, keepdim, unbiased = test_data() + pipeline = EthosU55PipelineBI[input_t1]( + VarDim(dim, keepdim, unbiased), + (test_data,), + aten_ops=[], + exir_ops=[], + run_on_fvp=True, + symmetric_io_quantization=True, + ) + pipeline.run() + + +@common.parametrize("test_data", VarDim.test_parameters) +@common.XfailIfNoCorstone320 +def test_var_dim_u85_BI(test_data: Tuple): + test_data, dim, keepdim, unbiased = test_data() + pipeline = EthosU85PipelineBI[input_t1]( + VarDim(dim, keepdim, unbiased), + (test_data,), + aten_ops=[], + exir_ops=[], + run_on_fvp=True, + symmetric_io_quantization=True, + ) + pipeline.run() + + +@common.parametrize("test_data", VarCorrection.test_parameters) +def test_var_dim_tosa_MI_correction(test_data: Tuple): + test_data, dim, keepdim, correction = test_data() + pipeline = TosaPipelineMI[input_t1]( + VarCorrection(dim, keepdim, correction), + (test_data,), + aten_op=[], + exir_op=[], + ) + pipeline.run() + + +@common.parametrize("test_data", VarCorrection.test_parameters) +def test_var_dim_tosa_BI_correction(test_data: Tuple): + test_data, dim, keepdim, correction = test_data() + pipeline = TosaPipelineBI[input_t1]( + VarCorrection(dim, keepdim, correction), + (test_data,), + aten_op=[], + exir_op=[], + symmetric_io_quantization=True, + ) + pipeline.run() + + +@common.parametrize("test_data", VarCorrection.test_parameters) +@common.XfailIfNoCorstone300 +def test_var_dim_u55_BI_correction(test_data: Tuple): + test_data, dim, keepdim, correction = test_data() + pipeline = EthosU55PipelineBI[input_t1]( + VarCorrection(dim, keepdim, correction), + (test_data,), + aten_ops=[], + exir_ops=[], + run_on_fvp=True, + symmetric_io_quantization=True, + ) + pipeline.run() + + +@common.parametrize("test_data", VarCorrection.test_parameters) +@common.XfailIfNoCorstone320 +def test_var_dim_u85_BI_correction(test_data: Tuple): + test_data, dim, keepdim, correction = test_data() + pipeline = EthosU85PipelineBI[input_t1]( + VarCorrection(dim, keepdim, correction), + (test_data,), + aten_ops=[], + exir_ops=[], + run_on_fvp=True, + symmetric_io_quantization=True, + ) + pipeline.run() diff --git a/backends/arm/test/ops/test_view.py b/backends/arm/test/ops/test_view.py index f90ae402067..a899be6750d 100644 --- a/backends/arm/test/ops/test_view.py +++ b/backends/arm/test/ops/test_view.py @@ -1,5 +1,4 @@ # Copyright 2024-2025 Arm Limited and/or its affiliates. -# All rights reserved. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. @@ -8,131 +7,90 @@ # Tests the view op which changes the size of a Tensor without changing the underlying data. # -import unittest from typing import Tuple import torch from executorch.backends.arm.test import common -from executorch.backends.arm.test.tester.arm_tester import ArmTester - -from executorch.exir.backend.compile_spec_schema import CompileSpec -from parameterized import parameterized - - -class TestView(unittest.TestCase): - """Tests the view operation.""" - - class View(torch.nn.Module): - - needs_transpose_tests = [ - (torch.rand(100), (1, -1, 5, 2)), - (torch.rand(10, 2, 1, 5), (1, -1, 5, 2)), - (torch.rand(1, 2, 1, 9), (3, 1, 3, 2)), - (torch.rand(2, 1, 1, 9), (3, 2, 3, 1)), - (torch.rand(2, 50, 2, 1), (1, 200)), - (torch.rand(2, 5, 2, 3), (1, 15, 4)), - ] - - no_transpose_tests = [ - (torch.rand(2, 1, 1, 9), (3, 1, 3, 2)), - (torch.rand(5, 10, 1, 1), (25, 2, 1, 1)), - (torch.rand(10, 2), (1, 1, 5, 4)), - (torch.rand(10, 10), (5, 1, 5, 4)), - (torch.rand(1, 1, 1, 10), (1, 1, 10, 1)), - (torch.rand(1, 1, 5, 10), (1, 1, 50, 1)), - (torch.rand(5, 10, 1, 1), (1, 25, 2)), - (torch.rand(2, 50, 1, 1), (1, 100)), - (torch.rand(2, 3, 2, 3), (2, 3, 3, 2)), - ] - - def forward(self, x: torch.Tensor, new_shape): - return x.view(new_shape) - - def _test_view_tosa_MI_pipeline( - self, module: torch.nn.Module, test_data: torch.Tensor - ): - ( - ArmTester( - module, - example_inputs=test_data, - compile_spec=common.get_tosa_compile_spec("TOSA-0.80+MI"), - ) - .export() - .check_count({"torch.ops.aten.view.default": 1}) - .to_edge() - .partition() - .check_count({"torch.ops.higher_order.executorch_call_delegate": 1}) - .to_executorch() - .run_method_and_compare_outputs(inputs=test_data) - ) - - def _test_view_tosa_BI_pipeline( - self, module: torch.nn.Module, test_data: Tuple[torch.Tensor] - ): - ( - ArmTester( - module, - example_inputs=test_data, - compile_spec=common.get_tosa_compile_spec("TOSA-0.80+BI"), - ) - .quantize() - .export() - .check_count({"torch.ops.aten.view.default": 1}) - .to_edge() - .partition() - .check_count({"torch.ops.higher_order.executorch_call_delegate": 1}) - .to_executorch() - .run_method_and_compare_outputs(inputs=test_data, qtol=1) - ) - - def _test_view_ethos_BI_pipeline( - self, - compile_spec: list[CompileSpec], - module: torch.nn.Module, - test_data: Tuple[torch.Tensor], - ): - ( - ArmTester( - module, - example_inputs=test_data, - compile_spec=compile_spec, - ) - .quantize() - .export() - .check_count({"torch.ops.aten.view.default": 1}) - .to_edge() - .partition() - .check_count({"torch.ops.higher_order.executorch_call_delegate": 1}) - .to_executorch() - ) - - def _test_view_u55_BI_pipeline( - self, module: torch.nn.Module, test_data: Tuple[torch.Tensor] - ): - self._test_view_ethos_BI_pipeline( - common.get_u55_compile_spec(), module, test_data - ) - - def _test_view_u85_BI_pipeline( - self, module: torch.nn.Module, test_data: Tuple[torch.Tensor] - ): - self._test_view_ethos_BI_pipeline( - common.get_u85_compile_spec(), module, test_data - ) - - @parameterized.expand(View.needs_transpose_tests + View.no_transpose_tests) - def test_view_tosa_MI(self, test_tensor: torch.Tensor, new_shape): - self._test_view_tosa_MI_pipeline(self.View(), (test_tensor, new_shape)) - - @parameterized.expand(View.needs_transpose_tests + View.no_transpose_tests) - def test_view_tosa_BI(self, test_tensor: torch.Tensor, new_shape): - self._test_view_tosa_BI_pipeline(self.View(), (test_tensor, new_shape)) - - @parameterized.expand(View.needs_transpose_tests + View.no_transpose_tests) - def test_view_u55_BI(self, test_tensor: torch.Tensor, new_shape): - self._test_view_u55_BI_pipeline(self.View(), (test_tensor, new_shape)) - - @parameterized.expand(View.needs_transpose_tests + View.no_transpose_tests) - def test_view_u85_BI(self, test_tensor: torch.Tensor, new_shape): - self._test_view_u85_BI_pipeline(self.View(), (test_tensor, new_shape)) +from executorch.backends.arm.test.tester.test_pipeline import ( + EthosU55PipelineBI, + EthosU85PipelineBI, + TosaPipelineBI, + TosaPipelineMI, +) + +aten_op = "torch.ops.aten.view.default" + +input_t1 = Tuple[torch.Tensor, torch.Tensor] # Input x, Input y + + +class View(torch.nn.Module): + + needs_transpose_tests = { + "rand_1d_neg": lambda: (torch.rand(100), (1, -1, 5, 2)), + "rand_4d_neg": lambda: (torch.rand(10, 2, 1, 5), (1, -1, 5, 2)), + "rand_4d_4d_small": lambda: (torch.rand(1, 2, 1, 9), (3, 1, 3, 2)), + "rand_4d_4d": lambda: (torch.rand(2, 1, 1, 9), (3, 2, 3, 1)), + "rand_4d_2d": lambda: (torch.rand(2, 50, 2, 1), (1, 200)), + "rand_4d_3d": lambda: (torch.rand(2, 5, 2, 3), (1, 15, 4)), + "rand_4d_1": lambda: (torch.rand(2, 1, 1, 9), (3, 1, 3, 2)), + "rand_4d_2": lambda: (torch.rand(5, 10, 1, 1), (25, 2, 1, 1)), + "rand_4d_2_4": lambda: (torch.rand(10, 2), (1, 1, 5, 4)), + "rand_4d_2_4_big": lambda: (torch.rand(10, 10), (5, 1, 5, 4)), + "rand_4d_4_4": lambda: (torch.rand(1, 1, 1, 10), (1, 1, 10, 1)), + "rand_4d_4_4_big": lambda: (torch.rand(1, 1, 5, 10), (1, 1, 50, 1)), + "rand_4d_4_3": lambda: (torch.rand(5, 10, 1, 1), (1, 25, 2)), + "rand_4d_4_2": lambda: (torch.rand(2, 50, 1, 1), (1, 100)), + "rand_4d_2_4_same": lambda: (torch.rand(2, 3, 2, 3), (2, 3, 3, 2)), + } + + def forward(self, x: torch.Tensor, new_shape): + return x.view(new_shape) + + +@common.parametrize("test_data", View.needs_transpose_tests) +def test_view_tosa_MI(test_data: Tuple): + test_tensor, new_shape = test_data() + pipeline = TosaPipelineMI[input_t1]( + View(), + (test_tensor, new_shape), + aten_op, + exir_op=[], + ) + pipeline.run() + + +@common.parametrize("test_data", View.needs_transpose_tests) +def test_view_tosa_BI(test_data: Tuple): + test_tensor, new_shape = test_data() + pipeline = TosaPipelineBI[input_t1]( + View(), + (test_tensor, new_shape), + aten_op, + exir_op=[], + ) + pipeline.run() + + +@common.parametrize("test_data", View.needs_transpose_tests) +def test_view_u55_BI(test_data: Tuple): + test_tensor, new_shape = test_data() + pipeline = EthosU55PipelineBI[input_t1]( + View(), + (test_tensor, new_shape), + aten_op, + exir_ops=[], + ) + pipeline.run() + + +@common.parametrize("test_data", View.needs_transpose_tests) +def test_view_u85_BI(test_data: Tuple): + test_tensor, new_shape = test_data() + pipeline = EthosU85PipelineBI[input_t1]( + View(), + (test_tensor, new_shape), + aten_op, + exir_ops=[], + ) + pipeline.run() diff --git a/backends/arm/test/ops/test_where.py b/backends/arm/test/ops/test_where.py index 91d616232fa..7bfd27ac0a8 100644 --- a/backends/arm/test/ops/test_where.py +++ b/backends/arm/test/ops/test_where.py @@ -5,15 +5,13 @@ from typing import List, Tuple -import pytest - import torch from executorch.backends.arm.quantizer import ( EthosUQuantizer, get_symmetric_quantization_config, - TOSAQuantizer, ) + from executorch.backends.arm.test import common from executorch.backends.arm.test.tester.test_pipeline import ( EthosU85PipelineBI, @@ -124,18 +122,18 @@ def scalar_condition(input: torch.Tensor): ) test_modules_common = { - "two_dim_tensor_cond": two_dim_tensor_cond, - "three_dim_tensor_cond": three_dim_tensor_cond, - "float32_tensor_cond": float32_tensor_cond, - "two_dim_scalar_cond": two_dim_scalar_cond, - "three_dim_scalar_cond": three_dim_scalar_cond, - "float32_scalar_cond": float32_scalar_cond, + "two_dim_tensor_cond": lambda: two_dim_tensor_cond, + "three_dim_tensor_cond": lambda: three_dim_tensor_cond, + "float32_tensor_cond": lambda: float32_tensor_cond, + "two_dim_scalar_cond": lambda: two_dim_scalar_cond, + "three_dim_scalar_cond": lambda: three_dim_scalar_cond, + "float32_scalar_cond": lambda: float32_scalar_cond, } test_modules_MI = { **test_modules_common, - "float32_tensor_cond_tuple_dtype": float32_tensor_cond_tuple_dtype, - "float32_tensor_cond_tuple_dtype_bool": float32_tensor_cond_tuple_dtype_bool, + "float32_tensor_cond_tuple_dtype": lambda: float32_tensor_cond_tuple_dtype, + "float32_tensor_cond_tuple_dtype_bool": lambda: float32_tensor_cond_tuple_dtype_bool, } test_modules_BI = { @@ -146,93 +144,51 @@ def scalar_condition(input: torch.Tensor): @common.parametrize("test_module", test_modules_MI) -def test_where_tosa_MI(test_module): +def test_where_self_tosa_MI(test_module): pipeline = TosaPipelineMI[input_t]( - test_module, test_module.get_inputs(), aten_op, exir_op + test_module(), + test_module().get_inputs(), + aten_op, + exir_op, ) pipeline.run() @common.parametrize("test_module", test_modules_BI) -def test_where_tosa_BI(test_module): - compile_spec = common.get_tosa_compile_spec("TOSA-0.80+BI") - quantizer = TOSAQuantizer(compile_spec).set_io(get_symmetric_quantization_config()) +def test_where_self_tosa_BI(test_module): pipeline = TosaPipelineBI[input_t]( - test_module, test_module.get_inputs(), aten_op, exir_op - ) - pipeline.change_args( - "quantize", Quantize(quantizer, get_symmetric_quantization_config()) + test_module(), + test_module().get_inputs(), + aten_op, + exir_op, + symmetric_io_quantization=True, ) pipeline.run() @common.parametrize("test_module", test_modules_BI) -def test_where_u55_BI(test_module): - compile_spec = common.get_u55_compile_spec() - quantizer = EthosUQuantizer(compile_spec).set_io( - get_symmetric_quantization_config() - ) - +@common.XfailIfNoCorstone300 +def test_where_self_u55_BI_not_delegated(test_module): # There will be one full_like op which will be delegated. num_delegates = 1 num_exir = 0 - pipeline = OpNotSupportedPipeline[input_t]( - test_module, - test_module.get_inputs(), - "TOSA-0.80+BI+u55", - { - exir_op: 1, - "executorch_exir_dialects_edge__ops_aten_full_default": num_exir, - }, - num_delegates, - ) - - pipeline.change_args( - "quantize", Quantize(quantizer, get_symmetric_quantization_config()) - ) - pipeline.run() - - -@common.parametrize("test_module", test_modules_BI) -def test_where_u85_BI(test_module): - compile_spec = common.get_u85_compile_spec() - quantizer = EthosUQuantizer(compile_spec).set_io( - get_symmetric_quantization_config() - ) - pipeline = EthosU85PipelineBI[input_t]( - test_module, test_module.get_inputs(), aten_op, exir_op, run_on_fvp=False - ) - pipeline.change_args( - "quantize", Quantize(quantizer, get_symmetric_quantization_config()) - ) - pipeline.run() - - -@common.parametrize("test_module", test_modules_BI) -@pytest.mark.skip(reason="The same as test_where_u55_BI") -@common.XfailIfNoCorstone300 -def test_where_u55_BI_on_fvp(test_module): compile_spec = common.get_u55_compile_spec() quantizer = EthosUQuantizer(compile_spec).set_io( get_symmetric_quantization_config() ) - # There will be one full_like op which will be delegated. - num_delegates = 1 - num_exir = 0 - pipeline = OpNotSupportedPipeline[input_t]( - test_module, - test_module.get_inputs(), - "TOSA-0.80+BI+u55", + test_module(), + test_module().get_inputs(), { exir_op: 1, "executorch_exir_dialects_edge__ops_aten_full_default": num_exir, }, num_delegates, + quantize=True, + u55_subset=True, ) - pipeline.change_args( "quantize", Quantize(quantizer, get_symmetric_quantization_config()) ) @@ -241,15 +197,14 @@ def test_where_u55_BI_on_fvp(test_module): @common.parametrize("test_module", test_modules_BI) @common.XfailIfNoCorstone320 -def test_where_u85_BI_on_fvp(test_module): - compile_spec = common.get_u85_compile_spec() - quantizer = EthosUQuantizer(compile_spec).set_io( - get_symmetric_quantization_config() - ) +def test_where_self_u85_BI(test_module): + pipeline = EthosU85PipelineBI[input_t]( - test_module, test_module.get_inputs(), aten_op, exir_op, run_on_fvp=True - ) - pipeline.change_args( - "quantize", Quantize(quantizer, get_symmetric_quantization_config()) + test_module(), + test_module().get_inputs(), + aten_op, + exir_op, + run_on_fvp=True, + symmetric_io_quantization=True, ) pipeline.run() diff --git a/backends/arm/test/tester/test_pipeline.py b/backends/arm/test/tester/test_pipeline.py index 38d82b739e1..c4c90064bce 100644 --- a/backends/arm/test/tester/test_pipeline.py +++ b/backends/arm/test/tester/test_pipeline.py @@ -13,8 +13,9 @@ get_symmetric_quantization_config, TOSAQuantizer, ) -from executorch.backends.arm.test import common +from executorch.backends.arm.test import common, conftest from executorch.backends.arm.test.tester.arm_tester import ArmTester, RunPasses +from executorch.backends.arm.tosa_specification import TosaSpecification from executorch.backends.xnnpack.test.tester.tester import Quantize from executorch.exir.backend.compile_spec_schema import CompileSpec @@ -281,8 +282,14 @@ def __init__( rtol: float = 1e-03, qtol: int = 0, ): + tosa_profiles = { + "0.80": TosaSpecification.create_from_string("TOSA-0.80+BI"), + "1.0": TosaSpecification.create_from_string("TOSA-1.0+INT"), + } + tosa_version = conftest.get_option("tosa_version") + compile_spec = common.get_tosa_compile_spec( - tosa_version, custom_path=custom_path + tosa_profiles[tosa_version], custom_path=custom_path ) quant_stage = ( Quantize( @@ -371,8 +378,14 @@ def __init__( rtol: float = 1e-03, qtol: int = 0, ): + tosa_profiles = { + "0.80": TosaSpecification.create_from_string("TOSA-0.80+MI"), + "1.0": TosaSpecification.create_from_string("TOSA-1.0+FP"), + } + tosa_version = conftest.get_option("tosa_version") + compile_spec = common.get_tosa_compile_spec( - tosa_version, custom_path=custom_path + tosa_profiles[tosa_version], custom_path=custom_path ) super().__init__( module, @@ -663,7 +676,6 @@ class TransformAnnotationPassPipeline(BasePipelineMaker, Generic[T]): Attributes: module: The module which the pipeline is applied to. test_data: Data used for testing the module. - tosa_version: The TOSA-version which to test for. custom_path : Path to dump intermediate artifacts such as tosa and pte to. @@ -673,11 +685,16 @@ def __init__( self, module: torch.nn.Module, test_data: T, - tosa_version: str, custom_path: str = None, ): + tosa_profiles = { + "0.80": TosaSpecification.create_from_string("TOSA-0.80+BI"), + "1.0": TosaSpecification.create_from_string("TOSA-1.0+INT"), + } + tosa_version = conftest.get_option("tosa_version") + compile_spec = common.get_tosa_compile_spec( - tosa_version, custom_path=custom_path + tosa_profiles[tosa_version], custom_path=custom_path ) super().__init__( module, @@ -723,11 +740,21 @@ def __init__( self, module: torch.nn.Module, test_data: T, - tosa_version: str, non_delegated_ops: Dict[str, int], n_expected_delegates: int = 0, custom_path: str = None, + quantize: Optional[bool] = False, + u55_subset: Optional[bool] = False, ): + tosa_profiles = { + "0.80": "TOSA-0.80+" + ("BI" if quantize else "MI"), + "1.0": "TOSA-1.0+" + ("INT" if quantize else "FP"), + } + tosa_version = tosa_profiles[conftest.get_option("tosa_version")] + + if u55_subset and quantize: + tosa_version = f"{tosa_version}+u55" + compile_spec = common.get_tosa_compile_spec( tosa_version, custom_path=custom_path ) @@ -739,7 +766,7 @@ def __init__( [], ) - if "BI" in tosa_version: + if "INT" in tosa_version or "BI" in tosa_version: self.add_stage(self.tester.quantize, pos=0) self.change_args("check_not.exir", [])