Skip to content

Commit bb7d8ce

Browse files
skywallrobert-kalmar
authored andcommitted
NXP backend: Use default (non-shared) quantization params for HardTanh
1 parent 4197fc1 commit bb7d8ce

File tree

3 files changed

+48
-12
lines changed

3 files changed

+48
-12
lines changed

backends/nxp/quantizer/patterns.py

Lines changed: 32 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -279,7 +279,7 @@ def partition_types(self):
279279
return [torch.ops.aten.flatten.using_ints]
280280

281281

282-
class HardTanhPattern(SharedSpecPattern):
282+
class HardTanhPattern(QuantizationPattern):
283283
"""
284284
Quantizer for HardTanh operator. Shared quantization spec is selected, as activation functions usually follows
285285
computation layer.
@@ -288,8 +288,23 @@ class HardTanhPattern(SharedSpecPattern):
288288
def partition_types(self):
289289
return [torch.ops.aten.hardtanh.default]
290290

291+
def get_anchors(
292+
self, gm: fx.GraphModule, fused_partition: List[fx.GraphModule]
293+
) -> PartitionAnchors | None:
294+
node = fused_partition[0].nodes[-1]
295+
296+
return PartitionAnchors(
297+
inputs=[(node, 0)],
298+
weights=[],
299+
biases=[],
300+
output=[(node,)],
301+
)
302+
303+
def replacement_op(self):
304+
raise AssertionError()
305+
291306

292-
class HardTanhInPlacePattern(SharedSpecPattern):
307+
class HardTanhInPlacePattern(QuantizationPattern):
293308
"""
294309
Quantizer for HardTanh operator with param inplace=True. Shared quantization spec is selected, as activation
295310
functions usually follows computation layer.
@@ -298,6 +313,21 @@ class HardTanhInPlacePattern(SharedSpecPattern):
298313
def partition_types(self):
299314
return [torch.ops.aten.hardtanh_.default]
300315

316+
def get_anchors(
317+
self, gm: fx.GraphModule, fused_partition: List[fx.GraphModule]
318+
) -> PartitionAnchors | None:
319+
node = fused_partition[0].nodes[-1]
320+
321+
return PartitionAnchors(
322+
inputs=[(node, 0)],
323+
weights=[],
324+
biases=[],
325+
output=[(node,)],
326+
)
327+
328+
def replacement_op(self):
329+
raise AssertionError()
330+
301331

302332
class LinearPattern(QuantizationPattern):
303333
def partition_types(self) -> List[OpOverload]:

backends/nxp/tests/executorch_pipeline.py

Lines changed: 10 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -31,6 +31,15 @@ def _quantize_model(model, calibration_inputs: list[tuple[torch.Tensor]]):
3131
return m
3232

3333

34+
def get_random_float_data(input_shapes: tuple[int] | list[tuple[int]]):
35+
# TODO: Replace with something more robust.
36+
return (
37+
(torch.randn(input_shapes),)
38+
if type(input_shapes) is tuple
39+
else tuple(torch.randn(input_shape) for input_shape in input_shapes)
40+
)
41+
42+
3443
def to_quantized_edge_program(
3544
model: torch.nn.Module,
3645
input_shapes: tuple[int] | list[tuple[int]],
@@ -43,12 +52,7 @@ def to_quantized_edge_program(
4352
"For multiple inputs, provide" " list[tuple[int]]."
4453
)
4554

46-
random_tensors = (
47-
(torch.randn(input_shapes),)
48-
if type(input_shapes) is tuple
49-
else tuple(torch.randn(input_shape) for input_shape in input_shapes)
50-
)
51-
calibration_inputs = [random_tensors, random_tensors]
55+
calibration_inputs = [get_random_float_data(input_shapes) for _ in range(4)]
5256
example_input = (
5357
(torch.ones(input_shapes),)
5458
if type(input_shapes) is tuple

backends/nxp/tests/ir/converter/node_converter/test_hardtanh_converter.py

Lines changed: 6 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -39,7 +39,7 @@ def forward(self, x):
3939
return self.block(x)
4040

4141

42-
class CustomHardTanhBlock(torch.nn.Module):
42+
class ConvHardTanhBlock(torch.nn.Module):
4343
def __init__(
4444
self,
4545
conv_in_channels: int = 3,
@@ -89,16 +89,18 @@ def test_relu6_quant(mocker, input_shape: tuple[int], inplace: bool):
8989
)
9090

9191

92-
@pytest.mark.parametrize("input_shape", [(1, 3, 128, 128), (1, 3, 256, 256)])
92+
@pytest.mark.parametrize("input_shape", [(1, 3, 16, 16), (1, 3, 32, 32)])
9393
@pytest.mark.parametrize(
9494
"activation_range", list(HardTanhConverter.supported_modes_map.keys())
9595
)
9696
@pytest.mark.parametrize("inplace", [True, False])
9797
def test_custom_hardtanh_quant(
9898
mocker, input_shape: tuple[int], activation_range: tuple[int, int], inplace: bool
9999
):
100+
# TODO(13063): This test suffers from non-ideal testing random quantization, because we always use range <0,1>.
101+
# We should update (decrease atol) when the Conv/Linear + Activation fuse at quantization is in place.
100102
min_val, max_val = activation_range
101-
model = CustomHardTanhBlock(
103+
model = ConvHardTanhBlock(
102104
conv_in_channels=input_shape[1],
103105
min_act_val=min_val,
104106
max_act_val=max_val,
@@ -122,5 +124,5 @@ def test_custom_hardtanh_quant(
122124
tflite_input_preprocess=ToNHWCPreprocess(),
123125
tflite_output_preprocess=ToNCHWPreprocess(),
124126
input_data=input_data,
125-
atol=1.0,
127+
atol=2.0,
126128
)

0 commit comments

Comments
 (0)