From 2eef568bb37cb9afbb2f37bf781ac9652c94f733 Mon Sep 17 00:00:00 2001 From: Jorick van der Hoeven Date: Fri, 3 Apr 2026 11:32:12 -0700 Subject: [PATCH] Add torch.log10 INT support to ExecuTorch Arm backend (#18671) Summary: Adds quantized (INT) support for `torch.log10` in the ExecuTorch Arm backend using the lookup table (LUT) path. TOSA has no native LOG10 op, so for quantized inference the op is handled via a precomputed table in the `InsertTableOpsPass`. Changes: - Registered `log10.default` in the `unary_table_ops` dict in `insert_table_ops.py` for the LUT quantized path - Added `log10.default` to `_one_to_one` in `quantization_annotator.py` to enable quantization annotation - Added `log10.default` to `TOSA_PRO_INT_SupportList` in `tosa_profile_supported_op_lists.py` so the partitioner delegates it to the Arm backend - Wrote `test/ops/test_log10.py` with TOSA INT, U55 INT, and U85 INT test cases - Registered the test in `test/targets.bzl Reviewed By: 3l1 Differential Revision: D99177468 --- backends/arm/_passes/insert_table_ops.py | 1 + .../tosa_profile_supported_op_lists.py | 1 + .../arm/quantizer/quantization_annotator.py | 1 + backends/arm/test/ops/test_log10.py | 87 +++++++++++++++++++ backends/arm/test/targets.bzl | 1 + 5 files changed, 91 insertions(+) create mode 100644 backends/arm/test/ops/test_log10.py diff --git a/backends/arm/_passes/insert_table_ops.py b/backends/arm/_passes/insert_table_ops.py index 78702bf9035..ffe16b36ffe 100644 --- a/backends/arm/_passes/insert_table_ops.py +++ b/backends/arm/_passes/insert_table_ops.py @@ -39,6 +39,7 @@ class TableOps: exir_ops.edge.aten.floor.default: torch.floor, exir_ops.edge.aten.log.default: torch.log, exir_ops.edge.aten.log1p.default: torch.log1p, + exir_ops.edge.aten.log10.default: torch.log10, exir_ops.edge.aten.reciprocal.default: torch.reciprocal, exir_ops.edge.aten.rsqrt.default: torch.rsqrt, exir_ops.edge.aten.sigmoid.default: torch.sigmoid, diff --git a/backends/arm/operator_support/tosa_profile_supported_op_lists.py b/backends/arm/operator_support/tosa_profile_supported_op_lists.py index 204c0bcf399..96c164214a0 100644 --- a/backends/arm/operator_support/tosa_profile_supported_op_lists.py +++ b/backends/arm/operator_support/tosa_profile_supported_op_lists.py @@ -56,6 +56,7 @@ exir_ops.edge.aten.expm1.default, exir_ops.edge.aten.log.default, exir_ops.edge.aten.log1p.default, + exir_ops.edge.aten.log10.default, exir_ops.edge.aten.linear.default, exir_ops.edge.aten.split_with_sizes_copy.default, exir_ops.edge.aten.split_copy.Tensor, diff --git a/backends/arm/quantizer/quantization_annotator.py b/backends/arm/quantizer/quantization_annotator.py index eb5f9bcbfef..efc8320f0b9 100644 --- a/backends/arm/quantizer/quantization_annotator.py +++ b/backends/arm/quantizer/quantization_annotator.py @@ -502,6 +502,7 @@ def _match_pattern( torch.ops.aten.sinh.default, torch.ops.aten.atan.default, torch.ops.aten.log1p.default, + torch.ops.aten.log10.default, torch.ops.aten.acosh.default, torch.ops.aten.sign.default, torch.ops.aten.asinh.default, diff --git a/backends/arm/test/ops/test_log10.py b/backends/arm/test/ops/test_log10.py new file mode 100644 index 00000000000..92491780f96 --- /dev/null +++ b/backends/arm/test/ops/test_log10.py @@ -0,0 +1,87 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# Copyright 2024-2026 Arm Limited and/or its affiliates. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + + +from typing import Tuple + +import torch +from executorch.backends.arm.test import common + +from executorch.backends.arm.test.tester.test_pipeline import ( + EthosU55PipelineINT, + EthosU85PipelineINT, + TosaPipelineINT, + VgfPipeline, +) + +aten_op = "torch.ops.aten.log10.default" +exir_op = "executorch_exir_dialects_edge__ops_aten_log10_default" + +input_t1 = Tuple[torch.Tensor] + + +def _tensor(values): + return torch.tensor(values, dtype=torch.float32) + + +test_data_suite = { + # (test_name, test_data) + "tiny_positive": lambda: (_tensor([5e-4, 8e-4, 9e-4, 1e-3, 1.2e-3])), + "mixed_range": lambda: (_tensor([1e-4, 5e-4, 2e-3, 1e-2, 5e-2])), + "ones_rank4": lambda: (torch.ones(1, 10, 10, 10)), + "ones_rank3": lambda: (torch.ones(10, 10, 10)), + "rand": lambda: (torch.rand(10, 10) + 0.001), + "randn_pos": lambda: (torch.randn(10) + 10), + "randn_spread": lambda: (torch.max(torch.Tensor([0.1]), torch.randn(10) * 100)), + "ramp": lambda: (torch.arange(0.01, 20, 0.2)), +} + + +class Log10(torch.nn.Module): + def forward(self, x: torch.Tensor) -> torch.Tensor: + return torch.log10(x) + + +@common.parametrize("test_data", test_data_suite) +def test_log10_tosa_INT(test_data: input_t1): + pipeline = TosaPipelineINT[input_t1](Log10(), (test_data(),), aten_op, exir_op) + pipeline.run() + + +@common.parametrize("test_data", test_data_suite) +@common.XfailIfNoCorstone300 +def test_log10_u55_INT(test_data: input_t1): + EthosU55PipelineINT[input_t1]( + Log10(), + (test_data(),), + aten_op, + exir_op, + ).run() + + +@common.parametrize("test_data", test_data_suite) +@common.XfailIfNoCorstone320 +def test_log10_u85_INT(test_data: input_t1): + EthosU85PipelineINT[input_t1]( + Log10(), + (test_data(),), + aten_op, + exir_op, + ).run() + + +@common.parametrize("test_data", test_data_suite) +@common.SkipIfNoModelConverter +def test_log10_vgf_quant(test_data: input_t1): + pipeline = VgfPipeline[input_t1]( + Log10(), + (test_data(),), + aten_op, + exir_op, + quantize=True, + ) + pipeline.run() diff --git a/backends/arm/test/targets.bzl b/backends/arm/test/targets.bzl index 5c5f9dd02c3..88d31c964c7 100644 --- a/backends/arm/test/targets.bzl +++ b/backends/arm/test/targets.bzl @@ -20,6 +20,7 @@ def define_arm_tests(): "ops/test_cat.py", "ops/test_conv2d.py", "ops/test_linear.py", + "ops/test_log10.py", "ops/test_max_pool1d.py", "ops/test_mul.py", "ops/test_permute.py",