Skip to content

Commit 360da85

Browse files
author
Github Executorch
committed
Summary: ARM backend: Add skip markers for 16A8W quantization known test failures
Added skip functionality (because buck based targets do not seem to comply with xfail marker) to the test parametrize framework to handle known failing tests in ARM backend 16A8W quantization. The model_linear_rank4_zeros, model_linear_rank4_negative_ones, and model_linear_rank4_negative_large_rand test cases are now skipped due to bias quantization accuracy issues tracked in MLETORCH-1452. This prevents buck based CI from blocking while maintaining visibility of the known issues through proper test annotations. Test Plan: Reviewers: Subscribers: Tasks: Tags:
1 parent a11d555 commit 360da85

File tree

2 files changed

+14
-4
lines changed

2 files changed

+14
-4
lines changed

backends/arm/test/common.py

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -237,6 +237,7 @@ def parametrize(
237237
arg_name: str,
238238
test_data: dict[str, Any],
239239
xfails: dict[str, xfail_type] | None = None,
240+
skips: dict[str, str] | None = None,
240241
strict: bool = True,
241242
flakies: dict[str, int] | None = None,
242243
) -> Decorator:
@@ -249,6 +250,8 @@ def parametrize(
249250
"""
250251
if xfails is None:
251252
xfails = {}
253+
if skips is None:
254+
skips = {}
252255
if flakies is None:
253256
flakies = {}
254257

@@ -259,6 +262,9 @@ def decorator_func(func: Callable[_P, _R]) -> Callable[_P, _R]:
259262
if id in flakies:
260263
# Mark this parameter as flaky with given reruns
261264
marker = (pytest.mark.flaky(reruns=flakies[id]),)
265+
elif id in skips:
266+
# fail markers do not work with 'buck' based ci, so use skip instead
267+
marker = (pytest.mark.skip(reason=skips[id]),)
262268
elif id in xfails:
263269
xfail_info = xfails[id]
264270
reason = ""

backends/arm/test/ops/test_linear.py

Lines changed: 8 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -308,18 +308,22 @@ def test_linear_16a8w_tosa_INT(test_data: torch.Tensor):
308308

309309

310310
x_fails = {}
311+
x_skips = {}
312+
311313
for test_name in [
312314
"model_linear_rank4_zeros",
313315
"model_linear_rank4_negative_ones",
314316
"model_linear_rank4_negative_large_rand",
315317
]:
316318
for set_per_chan in ["True", "False"]:
317-
x_fails[test_name + ",per_channel_quant={}".format(set_per_chan)] = (
318-
"MLETORCH-1452: AssertionError: Output 0 does not match reference output."
319-
)
319+
key = test_name + ",per_channel_quant={}".format(set_per_chan)
320+
reason = "MLETORCH-1452: AssertionError: Output 0 does not match reference output."
321+
x_fails[key] = reason
322+
# TODO: Check why xfail doesn't work for this buck target. In the interim rely on skip
323+
x_skips[key] = reason
320324

321325

322-
@common.parametrize("test_data", test_data_all_16a8w, x_fails)
326+
@common.parametrize("test_data", test_data_all_16a8w, xfails=x_fails, skips=x_skips)
323327
@common.XfailIfNoCorstone300
324328
def test_linear_16a8w_u55_INT16(test_data: torch.Tensor):
325329
"""Test linear operation with 16A8W quantization on U55 (16-bit activations, 8-bit weights)"""

0 commit comments

Comments
 (0)