Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 4 additions & 1 deletion backends/cadence/aot/compiler.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,7 @@
print_memory_planning_info,
)
from executorch.backends.cadence.aot.quantizer.fusion_pass import QuantFusion
from executorch.exir.passes.spec_prop_pass import SpecPropPass
from executorch.backends.cadence.aot.quantizer.quantizer import (
CadenceDefaultQuantizer,
CadenceQuantizer,
Expand Down Expand Up @@ -157,7 +158,9 @@ def apply_pre_edge_transform_passes(
# Get patterns and apply fusion of dq -> op -> q to qop
# pyre-ignore[16]: no attribute
patterns = [q.pattern for q in quantizer.quantizers]
fused_program = _transform(converted_program, QuantFusion(patterns))
fused_program = _transform(
converted_program, QuantFusion(patterns), SpecPropPass()
)

return fused_program

Expand Down
14 changes: 13 additions & 1 deletion backends/cadence/aot/quantizer/patterns.py
Original file line number Diff line number Diff line change
Expand Up @@ -625,7 +625,7 @@ def get_anchors(
)

cnn_weights = conv_layer.args[1]
if hasattr(cnn_weights.meta, "tensor_meta"):
if "tensor_meta" in cnn_weights.meta:
cnn_weights_shape = cnn_weights.meta["tensor_meta"].shape
# Bail if the channels are not multiple of 4 (SIMD)
if cnn_weights_shape[0] % 4 != 0:
Expand All @@ -651,6 +651,18 @@ def get_anchors(
conv_layer,
)

inputs = conv_layer.args[0]
if "tensor_meta" in inputs.meta:
inputs_shape = inputs.meta["tensor_meta"].shape
# Bail if length != kernel size - Not yet supported
if inputs_shape[-1] != cnn_weights_shape[2]:
return (
PartitionAnchors(
empty=True,
),
conv_layer,
)

return (
PartitionAnchors(
inputs=[],
Expand Down
Loading