Skip to content

Remove group_size arg in Float8DynamicActivationInt4WeightConfig #2779

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 1 commit into from
Aug 18, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -33,8 +33,8 @@
version=2,
)

# only 128 group_size is supported
FP8_ACT_CONFIG = Float8DynamicActivationInt4WeightConfig(
group_size=128,
packing_format="preshuffled",
)

Expand Down
2 changes: 1 addition & 1 deletion test/quantization/test_qat.py
Original file line number Diff line number Diff line change
Expand Up @@ -1927,7 +1927,7 @@ def test_quantize_api_fp8_int4(self):
quantize_(model, QATConfig(Float8DynamicActivationInt4WeightConfig(), step="convert"))
"""
self._test_quantize_api_against_ptq(
Float8DynamicActivationInt4WeightConfig(group_size=128),
Float8DynamicActivationInt4WeightConfig(),
target_prepare_sqnr=15,
target_convert_sqnr=float("inf"),
)
Expand Down
6 changes: 3 additions & 3 deletions torchao/quantization/quant_api.py
Original file line number Diff line number Diff line change
Expand Up @@ -1149,13 +1149,13 @@ def _int4_weight_only_transform(
class Float8DynamicActivationInt4WeightConfig(AOBaseConfig):
"""Configuration for apply float8 dynamic per row quantization and int4
per group weight quantization to linear
(only group_size 128 is supported right now since underlying kernel used only supports 128
and above and no benefits of making it bigger)

Args:
`group_size`: group size for groupwise quantization for weight
`packing_format`: how the weight is packed, only preshuffled is supported
"""

group_size: int = 128
packing_format: PackingFormat = "preshuffled"


Expand All @@ -1167,13 +1167,13 @@ def _float8_dynamic_activation_int4_weight_transform(
"applying int8 weight only quant requires module to have weight attribute"
+ " but {module} does not have one"
)
group_size = config.group_size
packing_format = config.packing_format

assert packing_format == "preshuffled", (
f"only preshuffled packing_format supported right now, got: {packing_format}"
)
weight = module.weight
group_size = 128
block_size = tuple([1 for _ in range(weight.ndim - 1)] + [group_size])
new_weight = Int4PreshuffledTensor.from_hp(
module.weight,
Expand Down
Loading