From 727b7307a12cd1a8b871b5a9556a9d889784b527 Mon Sep 17 00:00:00 2001 From: Marco Giordano Date: Wed, 14 Jan 2026 15:04:24 -0800 Subject: [PATCH] Fix Conv1d w8a32 operator Summary: #### Summary This diff fixes the Conv1d w8a32 operator by adding a transformation to the `val` attribute of the `other_inputs[0].meta` dictionary. Specifically, the `permute` operation is applied to the `original_val` tensor with the `fake_mode` context, and the resulting `transposed_val` is assigned to `transposed_inputs.meta["val"]`. Reviewed By: mcremon-meta Differential Revision: D89863750 --- backends/cadence/aot/compiler.py | 5 ++++- backends/cadence/aot/quantizer/patterns.py | 14 +++++++++++++- 2 files changed, 17 insertions(+), 2 deletions(-) diff --git a/backends/cadence/aot/compiler.py b/backends/cadence/aot/compiler.py index 6acecffb67a..e26b5a7da5a 100644 --- a/backends/cadence/aot/compiler.py +++ b/backends/cadence/aot/compiler.py @@ -22,6 +22,7 @@ print_memory_planning_info, ) from executorch.backends.cadence.aot.quantizer.fusion_pass import QuantFusion +from executorch.exir.passes.spec_prop_pass import SpecPropPass from executorch.backends.cadence.aot.quantizer.quantizer import ( CadenceDefaultQuantizer, CadenceQuantizer, @@ -157,7 +158,9 @@ def apply_pre_edge_transform_passes( # Get patterns and apply fusion of dq -> op -> q to qop # pyre-ignore[16]: no attribute patterns = [q.pattern for q in quantizer.quantizers] - fused_program = _transform(converted_program, QuantFusion(patterns)) + fused_program = _transform( + converted_program, QuantFusion(patterns), SpecPropPass() + ) return fused_program diff --git a/backends/cadence/aot/quantizer/patterns.py b/backends/cadence/aot/quantizer/patterns.py index 7a11541b601..987ecd0173f 100644 --- a/backends/cadence/aot/quantizer/patterns.py +++ b/backends/cadence/aot/quantizer/patterns.py @@ -625,7 +625,7 @@ def get_anchors( ) cnn_weights = conv_layer.args[1] - if hasattr(cnn_weights.meta, "tensor_meta"): + if "tensor_meta" in cnn_weights.meta: cnn_weights_shape = cnn_weights.meta["tensor_meta"].shape # Bail if the channels are not multiple of 4 (SIMD) if cnn_weights_shape[0] % 4 != 0: @@ -651,6 +651,18 @@ def get_anchors( conv_layer, ) + inputs = conv_layer.args[0] + if "tensor_meta" in inputs.meta: + inputs_shape = inputs.meta["tensor_meta"].shape + # Bail if length != kernel size - Not yet supported + if inputs_shape[-1] != cnn_weights_shape[2]: + return ( + PartitionAnchors( + empty=True, + ), + conv_layer, + ) + return ( PartitionAnchors( inputs=[],