Skip to content

Commit cb3235b

Browse files
committed
chore: isolate session detail UI changes
1 parent c0132bd commit cb3235b

12 files changed

Lines changed: 11 additions & 186 deletions

File tree

ami/jobs/tests/test_tasks.py

Lines changed: 1 addition & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -17,8 +17,7 @@
1717
from ami.jobs.models import Job, JobDispatchMode, JobState, MLJob
1818
from ami.jobs.tasks import process_nats_pipeline_result
1919
from ami.main.models import Detection, Project, SourceImage, SourceImageCollection
20-
from ami.ml.models import Algorithm, Pipeline
21-
from ami.ml.models.algorithm import AlgorithmTaskType
20+
from ami.ml.models import Pipeline
2221
from ami.ml.orchestration.async_job_state import AsyncJobStateManager
2322
from ami.ml.schemas import PipelineResultsError, PipelineResultsResponse, SourceImageResponse
2423
from ami.users.models import User
@@ -181,15 +180,6 @@ def test_process_nats_pipeline_result_mixed_results(self, mock_manager_class):
181180
"""
182181
mock_manager = self._setup_mock_nats(mock_manager_class)
183182

184-
# Create detection algorithm for the pipeline
185-
detection_algorithm = Algorithm.objects.create(
186-
name="test-detector",
187-
key="test-detector",
188-
task_type=AlgorithmTaskType.LOCALIZATION,
189-
)
190-
# Update pipeline to include detection algorithm
191-
self.pipeline.algorithms.add(detection_algorithm)
192-
193183
# For this test, we just want to verify progress tracking works with mixed results
194184
# We'll skip checking final job completion status since that depends on all stages
195185

ami/main/admin.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -265,7 +265,6 @@ class SourceImageAdmin(AdminBase):
265265
"checksum",
266266
"checksum_algorithm",
267267
"created_at",
268-
"get_was_processed",
269268
)
270269

271270
list_filter = (

ami/main/api/serializers.py

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1246,7 +1246,6 @@ class Meta:
12461246
"source_images",
12471247
"source_images_count",
12481248
"source_images_with_detections_count",
1249-
"source_images_processed_count",
12501249
"occurrences_count",
12511250
"taxa_count",
12521251
"description",
@@ -1548,7 +1547,6 @@ class EventTimelineIntervalSerializer(serializers.Serializer):
15481547
captures_count = serializers.IntegerField()
15491548
detections_count = serializers.IntegerField()
15501549
detections_avg = serializers.IntegerField()
1551-
was_processed = serializers.BooleanField()
15521550

15531551

15541552
class EventTimelineMetaSerializer(serializers.Serializer):

ami/main/api/views.py

Lines changed: 4 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -36,7 +36,6 @@
3636
from ami.utils.storages import ConnectionTestResult
3737

3838
from ..models import (
39-
NULL_DETECTIONS_FILTER,
4039
Classification,
4140
Deployment,
4241
Detection,
@@ -379,7 +378,7 @@ def timeline(self, request, pk=None):
379378
)
380379
resolution = datetime.timedelta(minutes=resolution_minutes)
381380

382-
qs = SourceImage.objects.filter(event=event).with_was_processed() # type: ignore
381+
qs = SourceImage.objects.filter(event=event)
383382

384383
# Bulk update all source images where detections_count is null
385384
update_detection_counts(qs=qs, null_only=True)
@@ -405,7 +404,7 @@ def timeline(self, request, pk=None):
405404
source_images = list(
406405
qs.filter(timestamp__range=(start_time, end_time))
407406
.order_by("timestamp")
408-
.values("id", "timestamp", "detections_count", "was_processed")
407+
.values("id", "timestamp", "detections_count")
409408
)
410409

411410
timeline = []
@@ -422,7 +421,6 @@ def timeline(self, request, pk=None):
422421
"captures_count": 0,
423422
"detections_count": 0,
424423
"detection_counts": [],
425-
"was_processed": False,
426424
}
427425

428426
while image_index < len(source_images) and source_images[image_index]["timestamp"] <= interval_end:
@@ -434,9 +432,6 @@ def timeline(self, request, pk=None):
434432
interval_data["detection_counts"] += [image["detections_count"]]
435433
if image["detections_count"] >= max(interval_data["detection_counts"]):
436434
interval_data["top_capture"] = SourceImage(pk=image["id"])
437-
# Track if any image in this interval was processed
438-
if image["was_processed"]:
439-
interval_data["was_processed"] = True
440435
image_index += 1
441436

442437
# Set a meaningful average detection count to display for the interval
@@ -607,7 +602,7 @@ def prefetch_detections(self, queryset: QuerySet, project: Project | None = None
607602
score = get_default_classification_threshold(project, self.request)
608603

609604
prefetch_queryset = (
610-
Detection.objects.exclude(NULL_DETECTIONS_FILTER)
605+
Detection.objects.all()
611606
.annotate(
612607
determination_score=models.Max("occurrence__detections__classifications__score"),
613608
# Store whether this occurrence should be included based on default filters
@@ -714,7 +709,6 @@ class SourceImageCollectionViewSet(DefaultViewSet, ProjectMixin):
714709
SourceImageCollection.objects.all()
715710
.with_source_images_count() # type: ignore
716711
.with_source_images_with_detections_count()
717-
.with_source_images_processed_count()
718712
.prefetch_related("jobs")
719713
)
720714
serializer_class = SourceImageCollectionSerializer
@@ -730,7 +724,6 @@ class SourceImageCollectionViewSet(DefaultViewSet, ProjectMixin):
730724
"method",
731725
"source_images_count",
732726
"source_images_with_detections_count",
733-
"source_images_processed_count",
734727
"occurrences_count",
735728
]
736729

@@ -905,7 +898,7 @@ class DetectionViewSet(DefaultViewSet, ProjectMixin):
905898
API endpoint that allows detections to be viewed or edited.
906899
"""
907900

908-
queryset = Detection.objects.exclude(NULL_DETECTIONS_FILTER).select_related("source_image", "detection_algorithm")
901+
queryset = Detection.objects.all().select_related("source_image", "detection_algorithm")
909902
serializer_class = DetectionSerializer
910903
filterset_fields = ["source_image", "detection_algorithm", "source_image__project"]
911904
ordering_fields = ["created_at", "updated_at", "detection_score", "timestamp"]

ami/main/models.py

Lines changed: 3 additions & 52 deletions
Original file line numberDiff line numberDiff line change
@@ -85,8 +85,6 @@ class TaxonRank(OrderedEnum):
8585
]
8686
)
8787

88-
NULL_DETECTIONS_FILTER = Q(bbox__isnull=True) | Q(bbox=[])
89-
9088

9189
def get_media_url(path: str) -> str:
9290
"""
@@ -1777,19 +1775,6 @@ def with_taxa_count(self, project: Project | None = None, request=None):
17771775
taxa_count=Coalesce(models.Subquery(taxa_subquery, output_field=models.IntegerField()), 0)
17781776
)
17791777

1780-
def with_was_processed(self):
1781-
"""
1782-
Annotate each SourceImage with a boolean `was_processed` indicating
1783-
whether any detections exist for that image.
1784-
1785-
This mirrors `SourceImage.get_was_processed()` but as a queryset
1786-
annotation for efficient bulk queries.
1787-
"""
1788-
# @TODO: this returns a was processed status for any algorithm. One the session detail view supports
1789-
# filtering by algorithm, this should be updated to return was_processed for the selected algorithm.
1790-
processed_exists = models.Exists(Detection.objects.filter(source_image_id=models.OuterRef("pk")))
1791-
return self.annotate(was_processed=processed_exists)
1792-
17931778

17941779
class SourceImageManager(models.Manager.from_queryset(SourceImageQuerySet)):
17951780
pass
@@ -1889,15 +1874,7 @@ def size_display(self) -> str:
18891874
return filesizeformat(self.size)
18901875

18911876
def get_detections_count(self) -> int:
1892-
# Detections count excludes detections without bounding boxes
1893-
# Detections with null bounding boxes are valid and indicates the image was successfully processed
1894-
return self.detections.exclude(NULL_DETECTIONS_FILTER).count()
1895-
1896-
def get_was_processed(self, algorithm_key: str | None = None) -> bool:
1897-
if algorithm_key:
1898-
return self.detections.filter(detection_algorithm__key=algorithm_key).exists()
1899-
else:
1900-
return self.detections.exists()
1877+
return self.detections.distinct().count()
19011878

19021879
def get_base_url(self) -> str | None:
19031880
"""
@@ -2067,7 +2044,6 @@ def update_detection_counts(qs: models.QuerySet[SourceImage] | None = None, null
20672044

20682045
subquery = models.Subquery(
20692046
Detection.objects.filter(source_image_id=models.OuterRef("pk"))
2070-
.exclude(NULL_DETECTIONS_FILTER)
20712047
.values("source_image_id")
20722048
.annotate(count=models.Count("id"))
20732049
.values("count"),
@@ -2538,15 +2514,6 @@ def save(self, *args, **kwargs):
25382514
super().save(*args, **kwargs)
25392515

25402516

2541-
class DetectionQuerySet(BaseQuerySet):
2542-
def null_detections(self):
2543-
return self.filter(NULL_DETECTIONS_FILTER)
2544-
2545-
2546-
class DetectionManager(models.Manager.from_queryset(DetectionQuerySet)):
2547-
pass
2548-
2549-
25502517
@final
25512518
class Detection(BaseModel):
25522519
"""An object detected in an image"""
@@ -2615,8 +2582,6 @@ class Detection(BaseModel):
26152582
source_image_id: int
26162583
detection_algorithm_id: int
26172584

2618-
objects = DetectionManager()
2619-
26202585
def get_bbox(self):
26212586
if self.bbox:
26222587
return BoundingBox(
@@ -3787,18 +3752,7 @@ def with_source_images_count(self):
37873752
def with_source_images_with_detections_count(self):
37883753
return self.annotate(
37893754
source_images_with_detections_count=models.Count(
3790-
"images",
3791-
filter=(~models.Q(images__detections__bbox__isnull=True) & ~models.Q(images__detections__bbox=[])),
3792-
distinct=True,
3793-
)
3794-
)
3795-
3796-
def with_source_images_processed_count(self):
3797-
return self.annotate(
3798-
source_images_processed_count=models.Count(
3799-
"images",
3800-
filter=models.Q(images__detections__isnull=False),
3801-
distinct=True,
3755+
"images", filter=models.Q(images__detections__isnull=False), distinct=True
38023756
)
38033757
)
38043758

@@ -3909,10 +3863,7 @@ def source_images_count(self) -> int | None:
39093863

39103864
def source_images_with_detections_count(self) -> int | None:
39113865
# This should always be pre-populated using queryset annotations
3912-
return None
3913-
3914-
def source_images_processed_count(self) -> int | None:
3915-
# This should always be pre-populated using queryset annotations
3866+
# return self.images.filter(detections__isnull=False).count()
39163867
return None
39173868

39183869
def occurrences_count(self) -> int | None:

ami/ml/models/pipeline.py

Lines changed: 1 addition & 56 deletions
Original file line numberDiff line numberDiff line change
@@ -84,9 +84,6 @@ def filter_processed_images(
8484
task_logger.debug(f"Image {image} needs processing: has no existing detections from pipeline's detector")
8585
# If there are no existing detections from this pipeline, send the image
8686
yield image
87-
elif existing_detections.null_detections().exists(): # type: ignore
88-
task_logger.debug(f"Image {image} has a null detection from pipeline {pipeline}, skipping! ")
89-
continue
9087
elif existing_detections.filter(classifications__isnull=True).exists():
9188
# Check if there are detections with no classifications
9289
task_logger.debug(
@@ -409,17 +406,13 @@ def get_or_create_detection(
409406
410407
:return: A tuple of the Detection object and a boolean indicating whether it was created
411408
"""
412-
if detection_resp.bbox is not None:
413-
serialized_bbox = list(detection_resp.bbox.dict().values())
414-
else:
415-
serialized_bbox = None
409+
serialized_bbox = list(detection_resp.bbox.dict().values())
416410
detection_repr = f"Detection {detection_resp.source_image_id} {serialized_bbox}"
417411

418412
assert str(detection_resp.source_image_id) == str(
419413
source_image.pk
420414
), f"Detection belongs to a different source image: {detection_repr}"
421415

422-
# When reprocessing, we don't care which detection algorithm created the existing detection
423416
existing_detection = Detection.objects.filter(
424417
source_image=source_image,
425418
bbox=serialized_bbox,
@@ -492,7 +485,6 @@ def create_detections(
492485

493486
existing_detections: list[Detection] = []
494487
new_detections: list[Detection] = []
495-
496488
for detection_resp in detections:
497489
source_image = source_image_map.get(detection_resp.source_image_id)
498490
if not source_image:
@@ -818,37 +810,6 @@ class PipelineSaveResults:
818810
total_time: float
819811

820812

821-
def create_null_detections_for_undetected_images(
822-
results: PipelineResultsResponse,
823-
detection_algorithm: Algorithm,
824-
logger: logging.Logger = logger,
825-
) -> list[DetectionResponse]:
826-
"""
827-
Create null DetectionResponse objects (empty bbox) for images that have no detections.
828-
829-
:param results: The PipelineResultsResponse from the processing service
830-
:param algorithms_known: Dictionary of algorithms keyed by algorithm key
831-
832-
:return: List of DetectionResponse objects with null bbox
833-
"""
834-
source_images_with_detections = {detection.source_image_id for detection in results.detections}
835-
null_detections_to_add = []
836-
detection_algorithm_reference = AlgorithmReference(name=detection_algorithm.name, key=detection_algorithm.key)
837-
838-
for source_img in results.source_images:
839-
if source_img.id not in source_images_with_detections:
840-
null_detections_to_add.append(
841-
DetectionResponse(
842-
source_image_id=source_img.id,
843-
bbox=None,
844-
algorithm=detection_algorithm_reference,
845-
timestamp=now(),
846-
)
847-
)
848-
849-
return null_detections_to_add
850-
851-
852813
@celery_app.task(soft_time_limit=60 * 4, time_limit=60 * 5)
853814
def save_results(
854815
results: PipelineResultsResponse | None = None,
@@ -896,13 +857,6 @@ def save_results(
896857
)
897858

898859
algorithms_known: dict[str, Algorithm] = {algo.key: algo for algo in pipeline.algorithms.all()}
899-
try:
900-
detection_algorithm = pipeline.algorithms.get(task_type__in=Algorithm.detection_task_types)
901-
except Algorithm.DoesNotExist:
902-
raise ValueError("Pipeline does not have a detection algorithm")
903-
except Algorithm.MultipleObjectsReturned:
904-
raise NotImplementedError("Multiple detection algorithms per pipeline are not supported")
905-
906860
job_logger.info(f"Algorithms registered for pipeline: \n{', '.join(algorithms_known.keys())}")
907861

908862
if results.algorithms:
@@ -912,15 +866,6 @@ def save_results(
912866
"Algorithms and category maps must be registered before processing, using /info endpoint."
913867
)
914868

915-
# Ensure all images have detections
916-
# if not, add a NULL detection (empty bbox) to the results
917-
null_detections = create_null_detections_for_undetected_images(
918-
results=results,
919-
detection_algorithm=detection_algorithm,
920-
logger=job_logger,
921-
)
922-
results.detections = results.detections + null_detections
923-
924869
detections = create_detections(
925870
detections=results.detections,
926871
algorithms_known=algorithms_known,

ami/ml/schemas.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -163,14 +163,14 @@ class Config:
163163

164164
class DetectionRequest(pydantic.BaseModel):
165165
source_image: SourceImageRequest # the 'original' image
166-
bbox: BoundingBox | None = None
166+
bbox: BoundingBox
167167
crop_image_url: str | None = None
168168
algorithm: AlgorithmReference
169169

170170

171171
class DetectionResponse(pydantic.BaseModel):
172172
source_image_id: str
173-
bbox: BoundingBox | None = None
173+
bbox: BoundingBox
174174
inference_time: float | None = None
175175
algorithm: AlgorithmReference
176176
timestamp: datetime.datetime

ami/ml/tests.py

Lines changed: 0 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -735,30 +735,6 @@ def test_project_pipeline_config(self):
735735
final_config = self.pipeline.get_config(self.project.pk)
736736
self.assertEqual(final_config["test_param"], "project_value")
737737

738-
def test_image_with_null_detection(self):
739-
"""
740-
Test saving results for a pipeline that returns null detections for some images.
741-
"""
742-
image = self.test_images[0]
743-
results = self.fake_pipeline_results([image], self.pipeline)
744-
745-
# Manually change the results for a single image to a list of empty detections
746-
results.detections = []
747-
748-
save_results(results)
749-
750-
image.save()
751-
self.assertEqual(image.get_detections_count(), 0) # detections_count should exclude null detections
752-
total_num_detections = image.detections.distinct().count()
753-
self.assertEqual(total_num_detections, 1)
754-
755-
was_processed = image.get_was_processed()
756-
self.assertEqual(was_processed, True)
757-
758-
# Also test filtering by algorithm
759-
was_processed = image.get_was_processed(algorithm_key="random-detector")
760-
self.assertEqual(was_processed, True)
761-
762738

763739
class TestAlgorithmCategoryMaps(TestCase):
764740
def setUp(self):

0 commit comments

Comments
 (0)