Skip to content

Commit 5b775fe

Browse files
authored
Update ruff version to 0.11.6 (#2103)
Summary: Use more recent versions so people can have less problems when they just install the latest ruff Test Plan: CI Reviewers: Subscribers: Tasks: Tags:
1 parent 896f61b commit 5b775fe

File tree

120 files changed

+1047
-1025
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

120 files changed

+1047
-1025
lines changed

.github/scripts/github_utils.py

+4-4
Original file line numberDiff line numberDiff line change
@@ -50,10 +50,10 @@ def gh_fetch_url_and_headers(
5050
):
5151
print(
5252
f"""Rate limit exceeded:
53-
Used: {err.headers['X-RateLimit-Used']}
54-
Limit: {err.headers['X-RateLimit-Limit']}
55-
Remaining: {err.headers['X-RateLimit-Remaining']}
56-
Resets at: {err.headers['x-RateLimit-Reset']}"""
53+
Used: {err.headers["X-RateLimit-Used"]}
54+
Limit: {err.headers["X-RateLimit-Limit"]}
55+
Remaining: {err.headers["X-RateLimit-Remaining"]}
56+
Resets at: {err.headers["x-RateLimit-Reset"]}"""
5757
)
5858
raise
5959

.github/scripts/label_utils.py

+3-3
Original file line numberDiff line numberDiff line change
@@ -62,9 +62,9 @@ def gh_get_labels(org: str, repo: str) -> List[str]:
6262
update_labels(labels, info)
6363

6464
last_page = get_last_page_num_from_header(header)
65-
assert (
66-
last_page > 0
67-
), "Error reading header info to determine total number of pages of labels"
65+
assert last_page > 0, (
66+
"Error reading header info to determine total number of pages of labels"
67+
)
6868
for page_number in range(2, last_page + 1): # skip page 1
6969
_, info = request_for_labels(prefix + f"&page={page_number}")
7070
update_labels(labels, info)

.github/scripts/trymerge.py

+3-3
Original file line numberDiff line numberDiff line change
@@ -490,7 +490,7 @@ def get_check_run_name_prefix(workflow_run: Any) -> str:
490490
if workflow_run is None:
491491
return ""
492492
else:
493-
return f'{workflow_run["workflow"]["name"]} / '
493+
return f"{workflow_run['workflow']['name']} / "
494494

495495

496496
def is_passing_status(status: Optional[str]) -> bool:
@@ -538,7 +538,7 @@ def add_conclusions(edges: Any) -> None:
538538
if not isinstance(checkrun_node, dict):
539539
warn(f"Expected dictionary, but got {type(checkrun_node)}")
540540
continue
541-
checkrun_name = f'{get_check_run_name_prefix(workflow_run)}{checkrun_node["name"]}'
541+
checkrun_name = f"{get_check_run_name_prefix(workflow_run)}{checkrun_node['name']}"
542542
existing_checkrun = workflow_obj.jobs.get(checkrun_name)
543543
if existing_checkrun is None or not is_passing_status(
544544
existing_checkrun.status
@@ -653,7 +653,7 @@ def skip_func(idx: int, candidate: "GitHubPR") -> bool:
653653
if not open_only or not candidate.is_closed():
654654
return False
655655
print(
656-
f"Skipping {idx+1} of {len(rev_list)} PR (#{candidate.pr_num}) as its already been merged"
656+
f"Skipping {idx + 1} of {len(rev_list)} PR (#{candidate.pr_num}) as its already been merged"
657657
)
658658
return True
659659

.github/scripts/trymerge_explainer.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -77,7 +77,7 @@ def get_merge_message(
7777
(
7878
"<details><summary>Advanced Debugging</summary>",
7979
"Check the merge workflow status ",
80-
f"<a href=\"{os.getenv('GH_RUN_URL')}\">here</a>",
80+
f'<a href="{os.getenv("GH_RUN_URL")}">here</a>',
8181
"</details>",
8282
)
8383
)

.github/workflows/ruff_linter.yml

+1-1
Original file line numberDiff line numberDiff line change
@@ -60,7 +60,7 @@ jobs:
6060
- name: Install dependencies
6161
run: |
6262
python -m pip install --upgrade pip
63-
pip install ruff==0.6.8
63+
pip install ruff==0.11.6
6464
6565
- name: Regular lint check
6666
if: github.event_name != 'workflow_dispatch'

benchmarks/bench_galore_fused_kernels.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -21,7 +21,7 @@ def run(args):
2121

2222
benchmark = get_benchmark(M, N, dtype, allow_tf32=allow_tf32)
2323
save_path = (
24-
f'benchmark_{M}x{N}_{rank}_{args.dtype}_{"tf32" if allow_tf32 else "no-tf32"}'
24+
f"benchmark_{M}x{N}_{rank}_{args.dtype}_{'tf32' if allow_tf32 else 'no-tf32'}"
2525
)
2626
if not os.path.exists(save_path):
2727
os.makedirs(save_path)

benchmarks/benchmark_low_bit_adam.py

+6-6
Original file line numberDiff line numberDiff line change
@@ -185,12 +185,12 @@ def evaluate_model(model, args):
185185
if args.full_bf16:
186186
assert args.amp == "none", "When --full_bf16 is set, --amp must be none"
187187
if args.optim_cpu_offload == "deepspeed":
188-
assert (
189-
args.amp == "none"
190-
), "When using DeepSpeed ZeRO-Offload, --amp must be none"
191-
assert (
192-
args.optim == "AdamW"
193-
), "When using DeepSpeed ZeRO-Offload, --optim must be AdamW"
188+
assert args.amp == "none", (
189+
"When using DeepSpeed ZeRO-Offload, --amp must be none"
190+
)
191+
assert args.optim == "AdamW", (
192+
"When using DeepSpeed ZeRO-Offload, --optim must be AdamW"
193+
)
194194
if args.profile:
195195
args.n_epochs = 1
196196
if args.seed is not None:

benchmarks/benchmark_uintx.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -109,7 +109,7 @@ def uintx_vs_fp16(nbits=[1, 2, 3, 4, 5, 6, 7], scales=[256, 512, 1024], repeats=
109109
for result in results:
110110
print(f"scale: {result[0]} fp16 time:{result[1]: .2f}ms speedups:")
111111
for i in range(2, len(result)):
112-
print(f"int{nbits[i-2]}: {result[1]/result[i]: .2f}x")
112+
print(f"int{nbits[i - 2]}: {result[1] / result[i]: .2f}x")
113113

114114

115115
if __name__ == "__main__":

benchmarks/float8/float8_roofline.py

+3-3
Original file line numberDiff line numberDiff line change
@@ -225,9 +225,9 @@ def run(
225225
* `enable_fusion_modeling`: if False uses Linear, if True uses LNLinearSigmoid and models the fusion of float8 overhead
226226
"""
227227

228-
assert not (
229-
(float8_recipe_name is not None) and (mx_recipe_name is not None)
230-
), "unsupported"
228+
assert not ((float8_recipe_name is not None) and (mx_recipe_name is not None)), (
229+
"unsupported"
230+
)
231231
if float8_recipe_name is None and mx_recipe_name is None:
232232
float8_recipe_name = "tensorwise"
233233

benchmarks/float8/profile_lowp_training.py

+12-13
Original file line numberDiff line numberDiff line change
@@ -299,22 +299,21 @@ def main(
299299
"lowp",
300300
"ref",
301301
), "experiment_filter must be one of `both`, `lowp`, `ref`"
302-
assert (
303-
mode_filter
304-
in (
305-
"fwd_bwd",
306-
"fwd",
307-
"cast_only",
308-
"cast_with_to_blocked",
309-
"cast_only_dim0_dim1",
310-
)
311-
), "mode_filter must be one of `fwd_bwd`, `fwd`, `cast_only`, `cast_with_to_blocked`, `cast_only_dim0_dim1`"
302+
assert mode_filter in (
303+
"fwd_bwd",
304+
"fwd",
305+
"cast_only",
306+
"cast_with_to_blocked",
307+
"cast_only_dim0_dim1",
308+
), (
309+
"mode_filter must be one of `fwd_bwd`, `fwd`, `cast_only`, `cast_with_to_blocked`, `cast_only_dim0_dim1`"
310+
)
312311
if mode_filter == "cast_only":
313312
assert experiment_filter == "lowp", "unsupported"
314313

315-
assert not (
316-
float8_recipe_name is not None and mx_recipe_name is not None
317-
), "either float8_recipe_name or mx_recipe_name can be specified, but not both"
314+
assert not (float8_recipe_name is not None and mx_recipe_name is not None), (
315+
"either float8_recipe_name or mx_recipe_name can be specified, but not both"
316+
)
318317

319318
if float8_recipe_name is None and mx_recipe_name is None:
320319
config = Float8LinearConfig()

benchmarks/float8/utils.py

+18-18
Original file line numberDiff line numberDiff line change
@@ -75,9 +75,9 @@ def profiler_output_to_filtered_time_by_kernel_name(
7575
continue
7676
elif e.key == "aten::add_":
7777
# accumulating gradients into leaf tensors
78-
assert e.count == (
79-
num_iter * num_leaf_tensors
80-
), f"unexpected number of iter for {e.key}"
78+
assert e.count == (num_iter * num_leaf_tensors), (
79+
f"unexpected number of iter for {e.key}"
80+
)
8181
continue
8282
elif e.key == "cudaDeviceSynchronize":
8383
continue
@@ -136,9 +136,9 @@ def get_name_to_shapes_iter(
136136
N: Optional[int],
137137
):
138138
if shape_gen_name == "llama":
139-
assert (
140-
M == K == N == None
141-
), f"M, K, N arguments not supported for shape_gen_name {shape_gen_name}"
139+
assert M == K == N == None, (
140+
f"M, K, N arguments not supported for shape_gen_name {shape_gen_name}"
141+
)
142142
bsz, seq_len = 4, 4096
143143
M = bsz * seq_len
144144
# LLaMa 2 70B single-node weight shapes
@@ -153,9 +153,9 @@ def get_name_to_shapes_iter(
153153
return name_to_shapes_70b.items()
154154

155155
elif shape_gen_name == "pow2":
156-
assert (
157-
M == K == N == None
158-
), f"M, K, N arguments not supported for shape_gen_name {shape_gen_name}"
156+
assert M == K == N == None, (
157+
f"M, K, N arguments not supported for shape_gen_name {shape_gen_name}"
158+
)
159159
name_to_shapes = {}
160160
min_power_of_2 = 10 # 1024
161161
max_power_of_2 = 14 # 16,384
@@ -165,9 +165,9 @@ def get_name_to_shapes_iter(
165165
return name_to_shapes.items()
166166

167167
elif shape_gen_name == "pow2_extended":
168-
assert (
169-
M == K == N == None
170-
), f"M, K, N arguments not supported for shape_gen_name {shape_gen_name}"
168+
assert M == K == N == None, (
169+
f"M, K, N arguments not supported for shape_gen_name {shape_gen_name}"
170+
)
171171
name_to_shapes = {}
172172
min_power_of_2 = 10 # 1024
173173
max_power_of_2 = 14 # 16,384
@@ -179,9 +179,9 @@ def get_name_to_shapes_iter(
179179
return name_to_shapes.items()
180180

181181
elif shape_gen_name == "sweep":
182-
assert (
183-
M == K == N == None
184-
), f"M, K, N arguments not supported for shape_gen_name {shape_gen_name}"
182+
assert M == K == N == None, (
183+
f"M, K, N arguments not supported for shape_gen_name {shape_gen_name}"
184+
)
185185
name_to_shapes = {}
186186
min_p2 = 8 # 256
187187
max_p2 = 15 # 32,768
@@ -197,9 +197,9 @@ def get_name_to_shapes_iter(
197197
return name_to_shapes.items()
198198

199199
elif shape_gen_name == "custom":
200-
assert (
201-
M is not None and K is not None and N is not None
202-
), "M, K, N must be specified for custom shape_gen"
200+
assert M is not None and K is not None and N is not None, (
201+
"M, K, N must be specified for custom shape_gen"
202+
)
203203
name_to_shapes = {
204204
1: (M, K, N),
205205
}

benchmarks/microbenchmarks/utils.py

+3-3
Original file line numberDiff line numberDiff line change
@@ -267,9 +267,9 @@ def string_to_config(
267267
group_size = int(_quant_args[2])
268268
return UIntXWeightOnlyConfig(dtype, group_size, use_hqq=use_hqq)
269269
elif "int8_dynamic_activation_intx_weight" in quantization:
270-
assert (
271-
high_precision_dtype == torch.float32
272-
), "int8_dynamic_activation_intx_weight requires using high_precision_dtype=torch.float32"
270+
assert high_precision_dtype == torch.float32, (
271+
"int8_dynamic_activation_intx_weight requires using high_precision_dtype=torch.float32"
272+
)
273273

274274
from torchao.dtypes import PackedLinearInt8DynamicActivationIntxWeightLayout
275275
from torchao.quantization.granularity import PerAxis, PerGroup

examples/sam2_amg_server/cli_on_modal.py

+1-2
Original file line numberDiff line numberDiff line change
@@ -372,8 +372,7 @@ def main(
372372
output_directory = Path(output_directory)
373373
if not (output_directory.exists() and output_directory.is_dir()):
374374
raise ValueError(
375-
f"Expected output_directory {output_directory} "
376-
"to be a directory and exist"
375+
f"Expected output_directory {output_directory} to be a directory and exist"
377376
)
378377

379378
if meta_paths is not None:

examples/sam2_amg_server/compile_export_utils.py

+6-6
Original file line numberDiff line numberDiff line change
@@ -158,13 +158,13 @@ def export_model(
158158
set_furious(mask_generator)
159159
assert task_type in TASK_TYPES, f"Expected {task_type} to be one of {TASK_TYPES}"
160160
if task_type in ["sps", "amg"]:
161-
assert (
162-
points_per_batch is not None
163-
), f"Specify points_per_batch for task {task_type}"
161+
assert points_per_batch is not None, (
162+
f"Specify points_per_batch for task {task_type}"
163+
)
164164
if task_type == "sps":
165-
assert (
166-
points_per_batch == 1
167-
), f"Expected points_per_batch set to 1 for {task_type} but got {points_per_batch}"
165+
assert points_per_batch == 1, (
166+
f"Expected points_per_batch set to 1 for {task_type} but got {points_per_batch}"
167+
)
168168

169169
example_input = torch.empty(batch_size, 3, 1024, 1024)
170170
example_input = example_input.to(mask_generator.predictor._image_dtype)

examples/sam2_amg_server/server.py

+9-5
Original file line numberDiff line numberDiff line change
@@ -493,10 +493,12 @@ def main(
493493
set_furious(mask_generator)
494494

495495
if save_fast != "":
496-
assert (
497-
load_fast == ""
498-
), "Can't save compiled models while loading them with --load-fast."
499-
assert not baseline, "--fast cannot be combined with baseline. code to be torch.compile(fullgraph=True) compatible."
496+
assert load_fast == "", (
497+
"Can't save compiled models while loading them with --load-fast."
498+
)
499+
assert not baseline, (
500+
"--fast cannot be combined with baseline. code to be torch.compile(fullgraph=True) compatible."
501+
)
500502
print(f"Saving compiled models under directory {save_fast}")
501503
export_model(
502504
mask_generator,
@@ -508,7 +510,9 @@ def main(
508510
)
509511

510512
if fast:
511-
assert not baseline, "--fast cannot be combined with baseline. code to be torch.compile(fullgraph=True) compatible."
513+
assert not baseline, (
514+
"--fast cannot be combined with baseline. code to be torch.compile(fullgraph=True) compatible."
515+
)
512516
set_fast(mask_generator, "amg", load_fast)
513517

514518
# since autoquant is replicating what furious mode is doing, don't use these two together

examples/sam2_vos_example/video_profile.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -113,7 +113,7 @@ def print_all_timings(self, warmup: int = 5):
113113
timestamped_print("Average timings for all sections:")
114114
for section_name in self.elapsed_times:
115115
average_time = self.get_average_time(section_name, warmup)
116-
timestamped_print(f"{section_name}, {average_time*1000.0:.6f}")
116+
timestamped_print(f"{section_name}, {average_time * 1000.0:.6f}")
117117

118118

119119
global_timer = CodeTimer()
@@ -186,7 +186,7 @@ def profiler_runner(path, fn, *args, **kwargs):
186186
if path is None:
187187
path = os.path.join(
188188
os.path.expanduser("~/traces"),
189-
f'{datetime.now().strftime("%Y-%m-%d-%H-%M-%S")}.json.gz',
189+
f"{datetime.now().strftime('%Y-%m-%d-%H-%M-%S')}.json.gz",
190190
)
191191
with torch.profiler.profile(
192192
activities=[

setup.py

+9-9
Original file line numberDiff line numberDiff line change
@@ -91,9 +91,9 @@ def __init__(self):
9191
default=(self._is_arm64() and self._is_macos()),
9292
)
9393
if self.build_cpu_aarch64:
94-
assert (
95-
self._is_arm64()
96-
), "TORCHAO_BUILD_CPU_AARCH64 requires an arm64 machine"
94+
assert self._is_arm64(), (
95+
"TORCHAO_BUILD_CPU_AARCH64 requires an arm64 machine"
96+
)
9797

9898
# TORCHAO_BUILD_KLEIDIAI is disabled by default for now because
9999
# 1) It increases the build time
@@ -102,9 +102,9 @@ def __init__(self):
102102
"TORCHAO_BUILD_KLEIDIAI", default=False
103103
)
104104
if self.build_kleidi_ai:
105-
assert (
106-
self.build_cpu_aarch64
107-
), "TORCHAO_BUILD_KLEIDIAI requires TORCHAO_BUILD_CPU_AARCH64 be set"
105+
assert self.build_cpu_aarch64, (
106+
"TORCHAO_BUILD_KLEIDIAI requires TORCHAO_BUILD_CPU_AARCH64 be set"
107+
)
108108

109109
# TORCHAO_BUILD_EXPERIMENTAL_MPS is disabled by default.
110110
self.build_experimental_mps = self._os_bool_var(
@@ -113,9 +113,9 @@ def __init__(self):
113113
if self.build_experimental_mps:
114114
assert self._is_macos(), "TORCHAO_BUILD_EXPERIMENTAL_MPS requires MacOS"
115115
assert self._is_arm64(), "TORCHAO_BUILD_EXPERIMENTAL_MPS requires arm64"
116-
assert (
117-
torch.mps.is_available()
118-
), "TORCHAO_BUILD_EXPERIMENTAL_MPS requires MPS be available"
116+
assert torch.mps.is_available(), (
117+
"TORCHAO_BUILD_EXPERIMENTAL_MPS requires MPS be available"
118+
)
119119

120120
def _is_arm64(self) -> bool:
121121
return platform.machine().startswith("arm64")

test/dtypes/test_affine_quantized_float.py

+6-6
Original file line numberDiff line numberDiff line change
@@ -138,9 +138,9 @@ def test_fp8_linear_variants(
138138
output_quantized = quantized_model(input_tensor)
139139

140140
error = compute_error(output_original, output_quantized)
141-
assert (
142-
compute_error(output_original, output_quantized) > 20
143-
), f"Quantization error is too high got a SQNR of {error}"
141+
assert compute_error(output_original, output_quantized) > 20, (
142+
f"Quantization error is too high got a SQNR of {error}"
143+
)
144144

145145
@unittest.skipIf(
146146
not is_sm_at_least_89(), "Requires GPU with compute capability >= 8.9"
@@ -247,9 +247,9 @@ def test_serialization(self, mode: str):
247247
)
248248
)
249249

250-
assert torch.allclose(
251-
original_weight, new_weight
252-
), f"Weights do not match for {layer_name}"
250+
assert torch.allclose(original_weight, new_weight), (
251+
f"Weights do not match for {layer_name}"
252+
)
253253

254254
# Compare scales
255255
if hasattr(original_layer.weight, "scale"):

0 commit comments

Comments
 (0)