Skip to content

Commit 0749b7d

Browse files
jerryzh168liangel-02
authored andcommitted
Remove group_size arg in Float8DynamicActivationInt4WeightConfig (#2779)
Summary: Fixes: #2763 Test Plan: python test/quantization/quantize_/workflows/int4/test_int4_preshuffled_tensor.py Reviewers: Subscribers: Tasks: Tags:
1 parent 6eedb83 commit 0749b7d

File tree

3 files changed

+5
-5
lines changed

3 files changed

+5
-5
lines changed

test/quantization/quantize_/workflows/int4/test_int4_preshuffled_tensor.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -33,8 +33,8 @@
3333
version=2,
3434
)
3535

36+
# only 128 group_size is supported
3637
FP8_ACT_CONFIG = Float8DynamicActivationInt4WeightConfig(
37-
group_size=128,
3838
packing_format="preshuffled",
3939
)
4040

test/quantization/test_qat.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1927,7 +1927,7 @@ def test_quantize_api_fp8_int4(self):
19271927
quantize_(model, QATConfig(Float8DynamicActivationInt4WeightConfig(), step="convert"))
19281928
"""
19291929
self._test_quantize_api_against_ptq(
1930-
Float8DynamicActivationInt4WeightConfig(group_size=128),
1930+
Float8DynamicActivationInt4WeightConfig(),
19311931
target_prepare_sqnr=15,
19321932
target_convert_sqnr=float("inf"),
19331933
)

torchao/quantization/quant_api.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1156,13 +1156,13 @@ def _int4_weight_only_transform(
11561156
class Float8DynamicActivationInt4WeightConfig(AOBaseConfig):
11571157
"""Configuration for apply float8 dynamic per row quantization and int4
11581158
per group weight quantization to linear
1159+
(only group_size 128 is supported right now since underlying kernel used only supports 128
1160+
and above and no benefits of making it bigger)
11591161
11601162
Args:
1161-
`group_size`: group size for groupwise quantization for weight
11621163
`packing_format`: how the weight is packed, only preshuffled is supported
11631164
"""
11641165

1165-
group_size: int = 128
11661166
packing_format: PackingFormat = "preshuffled"
11671167

11681168

@@ -1174,13 +1174,13 @@ def _float8_dynamic_activation_int4_weight_transform(
11741174
"applying int8 weight only quant requires module to have weight attribute"
11751175
+ " but {module} does not have one"
11761176
)
1177-
group_size = config.group_size
11781177
packing_format = config.packing_format
11791178

11801179
assert packing_format == "preshuffled", (
11811180
f"only preshuffled packing_format supported right now, got: {packing_format}"
11821181
)
11831182
weight = module.weight
1183+
group_size = 128
11841184
block_size = tuple([1 for _ in range(weight.ndim - 1)] + [group_size])
11851185
new_weight = Int4PreshuffledTensor.from_hp(
11861186
module.weight,

0 commit comments

Comments
 (0)