From a3e01beef11567f9b8123a08b1763baa7bb58704 Mon Sep 17 00:00:00 2001 From: Jerry Zhang Date: Tue, 23 Dec 2025 19:47:51 -0800 Subject: [PATCH] Update remaining callsites to not use pt2e quant API from pytorch (#16380) Summary: X-link: https://github.com/facebookexternal/vizard/pull/18 X-link: https://github.com/meta-pytorch/tnt/pull/1041 X-link: https://github.com/pytorch/ao/pull/3535 We removed pt2e quant code from D87958849, updating some remaining callsites to use torchao or executorch Reviewed By: jainapurva Differential Revision: D89744472 --- backends/nxp/quantizer/neutron_quantizer.py | 4 ++-- backends/nxp/quantizer/utils.py | 6 ++++-- 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/backends/nxp/quantizer/neutron_quantizer.py b/backends/nxp/quantizer/neutron_quantizer.py index b9186884d5e..1f1b6b88846 100644 --- a/backends/nxp/quantizer/neutron_quantizer.py +++ b/backends/nxp/quantizer/neutron_quantizer.py @@ -53,7 +53,6 @@ no_outside_users, ) from torch import fx -from torch.ao.quantization.quantizer.utils import _annotate_output_qspec from torchao.quantization.pt2e import ( FakeQuantize, FusedMovingAvgObsFakeQuantize, @@ -62,6 +61,7 @@ MovingAverageMinMaxObserver, ) from torchao.quantization.pt2e.quantizer import ( + annotate_output_qspec, ComposableQuantizer, DerivedQuantizationSpec, OperatorConfig, @@ -338,7 +338,7 @@ def _annotate_inputs(self, model: fx.GraphModule): continue if node.op == "placeholder" and len(node.users) > 0: - _annotate_output_qspec(node, act_qspec(self.is_qat)) + annotate_output_qspec(node, act_qspec(self.is_qat)) self._mark_input_node_as_annotated(node) def validate(self, model: torch.fx.GraphModule) -> None: diff --git a/backends/nxp/quantizer/utils.py b/backends/nxp/quantizer/utils.py index 6dc58e8114a..459f31ec7da 100644 --- a/backends/nxp/quantizer/utils.py +++ b/backends/nxp/quantizer/utils.py @@ -15,13 +15,15 @@ import torch from torch import fx from torch._ops import OpOverload -from torch.ao.quantization import move_exported_model_to_eval from torch.export import ExportedProgram from torch.fx.passes.utils.source_matcher_utils import ( check_subgraphs_connected, SourcePartition, ) -from torchao.quantization.pt2e import ObserverOrFakeQuantize +from torchao.quantization.pt2e import ( + move_exported_model_to_eval, + ObserverOrFakeQuantize, +) from torchao.quantization.pt2e.quantize_pt2e import ( convert_pt2e, prepare_pt2e,