Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

refactor: ✨ ruff rules enabled and fixed and code refactor made #1526

Merged
merged 2 commits into from
Sep 19, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion demo.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -1357,7 +1357,7 @@
}
],
"source": [
"IMAGE_NAME = list(ds.images.keys())[0]\n",
"IMAGE_NAME = next(iter(ds.images.keys()))\n",
"\n",
"image = ds.images[IMAGE_NAME]\n",
"annotations = ds.annotations[IMAGE_NAME]\n",
Expand Down
4 changes: 2 additions & 2 deletions examples/traffic_analysis/inference_example.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
import argparse
import os
from typing import Dict, Iterable, List, Set
from typing import Dict, Iterable, List, Optional, Set

import cv2
import numpy as np
Expand Down Expand Up @@ -77,7 +77,7 @@ def __init__(
roboflow_api_key: str,
model_id: str,
source_video_path: str,
target_video_path: str = None,
target_video_path: Optional[str] = None,
confidence_threshold: float = 0.3,
iou_threshold: float = 0.7,
) -> None:
Expand Down
4 changes: 2 additions & 2 deletions examples/traffic_analysis/ultralytics_example.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
import argparse
from typing import Dict, Iterable, List, Set
from typing import Dict, Iterable, List, Optional, Set

import cv2
import numpy as np
Expand Down Expand Up @@ -74,7 +74,7 @@ def __init__(
self,
source_weights_path: str,
source_video_path: str,
target_video_path: str = None,
target_video_path: Optional[str] = None,
confidence_threshold: float = 0.3,
iou_threshold: float = 0.7,
) -> None:
Expand Down
2 changes: 1 addition & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -166,7 +166,7 @@ indent-width = 4

[tool.ruff.lint]
# Enable pycodestyle (`E`) and Pyflakes (`F`) codes by default.
select = ["E", "F", "I", "A", "Q", "W"]
select = ["E", "F", "I", "A", "Q", "W","RUF"]
ignore = []
# Allow autofix for all enabled rules (when `--fix`) is provided.
fixable = [
Expand Down
8 changes: 4 additions & 4 deletions supervision/detection/core.py
Original file line number Diff line number Diff line change
Expand Up @@ -249,7 +249,7 @@ def from_ultralytics(cls, ultralytics_results) -> Detections:
results = model(image)[0]
detections = sv.Detections.from_ultralytics(results)
```
""" # noqa: E501 // docs
"""

if hasattr(ultralytics_results, "obb") and ultralytics_results.obb is not None:
class_id = ultralytics_results.obb.cls.cpu().numpy().astype(int)
Expand Down Expand Up @@ -356,7 +356,7 @@ def from_tensorflow(
result = model(img)
detections = sv.Detections.from_tensorflow(result)
```
""" # noqa: E501 // docs
"""

boxes = tensorflow_results["detection_boxes"][0].numpy()
boxes[:, [0, 2]] *= resolution_wh[0]
Expand Down Expand Up @@ -431,7 +431,7 @@ def from_mmdetection(cls, mmdet_results) -> Detections:
result = inference_detector(model, image)
detections = sv.Detections.from_mmdetection(result)
```
""" # noqa: E501 // docs
"""

return cls(
xyxy=mmdet_results.pred_instances.bboxes.cpu().numpy(),
Expand Down Expand Up @@ -490,7 +490,7 @@ def from_transformers(
id2label=model.config.id2label
)
```
""" # noqa: E501 // docs
"""

if (
transformers_results.__class__.__name__ == "Tensor"
Expand Down
2 changes: 1 addition & 1 deletion supervision/detection/line_zone.py
Original file line number Diff line number Diff line change
Expand Up @@ -55,7 +55,7 @@ class LineZone:
line_zone.in_count, line_zone.out_count
# 7, 2
```
""" # noqa: E501 // docs
"""

def __init__(
self,
Expand Down
2 changes: 1 addition & 1 deletion supervision/detection/lmm.py
Original file line number Diff line number Diff line change
Expand Up @@ -113,7 +113,7 @@ def from_florence_2(
oriented bounding boxes.
"""
assert len(result) == 1, f"Expected result with a single element. Got: {result}"
task = list(result.keys())[0]
task = next(iter(result.keys()))
if task not in SUPPORTED_TASKS_FLORENCE_2:
raise ValueError(
f"{task} not supported. Supported tasks are: {SUPPORTED_TASKS_FLORENCE_2}"
Expand Down
2 changes: 1 addition & 1 deletion supervision/detection/overlap_filter.py
Original file line number Diff line number Diff line change
Expand Up @@ -183,7 +183,7 @@ def group_overlapping_boxes(
ious = ious.flatten()

above_threshold = ious >= iou_threshold
merge_group = [idx] + np.flip(order[above_threshold]).tolist()
merge_group = [idx, *np.flip(order[above_threshold]).tolist()]
merge_groups.append(merge_group)
order = order[~above_threshold]
return merge_groups
Expand Down
6 changes: 3 additions & 3 deletions supervision/detection/tools/csv_sink.py
Original file line number Diff line number Diff line change
Expand Up @@ -48,7 +48,7 @@ class CSVSink:
detections = sv.Detections.from_ultralytics(result)
sink.append(detections, custom_data={'<CUSTOM_LABEL>':'<CUSTOM_DATA>'})
```
""" # noqa: E501 // docs
"""

def __init__(self, file_name: str = "output.csv") -> None:
"""
Expand Down Expand Up @@ -104,7 +104,7 @@ def close(self) -> None:

@staticmethod
def parse_detection_data(
detections: Detections, custom_data: Dict[str, Any] = None
detections: Detections, custom_data: Optional[Dict[str, Any]] = None
) -> List[Dict[str, Any]]:
parsed_rows = []
for i in range(len(detections.xyxy)):
Expand Down Expand Up @@ -137,7 +137,7 @@ def parse_detection_data(
return parsed_rows

def append(
self, detections: Detections, custom_data: Dict[str, Any] = None
self, detections: Detections, custom_data: Optional[Dict[str, Any]] = None
) -> None:
"""
Append detection data to the CSV file.
Expand Down
6 changes: 3 additions & 3 deletions supervision/detection/tools/json_sink.py
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,7 @@ class JSONSink:
detections = sv.Detections.from_ultralytics(result)
sink.append(detections, custom_data={'<CUSTOM_LABEL>':'<CUSTOM_DATA>'})
```
""" # noqa: E501 // docs
"""

def __init__(self, file_name: str = "output.json") -> None:
"""
Expand Down Expand Up @@ -92,7 +92,7 @@ def write_and_close(self) -> None:

@staticmethod
def parse_detection_data(
detections: Detections, custom_data: Dict[str, Any] = None
detections: Detections, custom_data: Optional[Dict[str, Any]] = None
) -> List[Dict[str, Any]]:
parsed_rows = []
for i in range(len(detections.xyxy)):
Expand Down Expand Up @@ -126,7 +126,7 @@ def parse_detection_data(
return parsed_rows

def append(
self, detections: Detections, custom_data: Dict[str, Any] = None
self, detections: Detections, custom_data: Optional[Dict[str, Any]] = None
) -> None:
"""
Append detection data to the JSON file.
Expand Down
2 changes: 1 addition & 1 deletion supervision/detection/tools/smoother.py
Original file line number Diff line number Diff line change
Expand Up @@ -53,7 +53,7 @@ class DetectionsSmoother:
annotated_frame = box_annotator.annotate(frame.copy(), detections)
sink.write_frame(annotated_frame)
```
""" # noqa: E501 // docs
"""

def __init__(self, length: int = 5) -> None:
"""
Expand Down
2 changes: 1 addition & 1 deletion supervision/keypoint/core.py
Original file line number Diff line number Diff line change
Expand Up @@ -429,7 +429,7 @@ def from_yolo_nas(cls, yolo_nas_results) -> KeyPoints:
results = model.predict(image, conf=0.1)
key_points = sv.KeyPoints.from_yolo_nas(results)
```
""" # noqa: E501 // docs
"""
if len(yolo_nas_results.prediction.poses) == 0:
return cls.empty()

Expand Down
21 changes: 11 additions & 10 deletions supervision/keypoint/skeletons.py
Original file line number Diff line number Diff line change
@@ -1,11 +1,11 @@
from enum import Enum
from typing import Dict, List, Tuple
from typing import Dict, Tuple

Edges = List[Tuple[int, int]]
Edges = Tuple[Tuple[int, int], ...]


class Skeleton(Enum):
COCO = [
COCO: Edges = (
(1, 2),
(1, 3),
(2, 3),
Expand All @@ -23,9 +23,9 @@ class Skeleton(Enum):
(15, 13),
(16, 14),
(17, 15),
]
)

GHUM = [
GHUM: Edges = (
(1, 2),
(1, 5),
(2, 3),
Expand Down Expand Up @@ -61,9 +61,9 @@ class Skeleton(Enum):
(29, 33),
(30, 32),
(31, 33),
]
)

FACEMESH_TESSELATION_NO_IRIS = [
FACEMESH_TESSELATION_NO_IRIS: Edges = (
(128, 35),
(35, 140),
(140, 128),
Expand Down Expand Up @@ -2620,9 +2620,9 @@ class Skeleton(Enum):
(340, 449),
(449, 256),
(256, 340),
]
)

FACEMESH_TESSELATION = [
FACEMESH_TESSELATION: Edges = (
(474, 474),
(475, 476),
(476, 477),
Expand All @@ -2633,7 +2633,8 @@ class Skeleton(Enum):
(471, 472),
(472, 473),
(473, 470),
] + FACEMESH_TESSELATION_NO_IRIS
*FACEMESH_TESSELATION_NO_IRIS,
)


SKELETONS_BY_EDGE_COUNT: Dict[int, Edges] = {}
Expand Down
4 changes: 2 additions & 2 deletions supervision/metrics/detection.py
Original file line number Diff line number Diff line change
Expand Up @@ -440,8 +440,8 @@ def plot(
class_names = classes if classes is not None else self.classes
use_labels_for_ticks = class_names is not None and (0 < len(class_names) < 99)
if use_labels_for_ticks:
x_tick_labels = class_names + ["FN"]
y_tick_labels = class_names + ["FP"]
x_tick_labels = [*class_names, "FN"]
y_tick_labels = [*class_names, "FP"]
num_ticks = len(x_tick_labels)
else:
x_tick_labels = None
Expand Down
4 changes: 2 additions & 2 deletions test/dataset/formats/test_coco.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
from contextlib import ExitStack as DoesNotRaise
from typing import Dict, List, Tuple, Union
from typing import Dict, List, Optional, Tuple, Union

import numpy as np
import pytest
Expand All @@ -21,7 +21,7 @@ def mock_coco_annotation(
category_id: int = 0,
bbox: Tuple[float, float, float, float] = (0.0, 0.0, 0.0, 0.0),
area: float = 0.0,
segmentation: Union[List[list], Dict] = None,
segmentation: Optional[Union[List[list], Dict]] = None,
iscrowd: bool = False,
) -> dict:
if not segmentation:
Expand Down