|
31 | 31 | ) |
32 | 32 | from app.dao.services_dao import dao_fetch_service_by_id |
33 | 33 | from app.dao.templates_dao import dao_get_template_by_id |
34 | | -from app.enums import JobStatus |
| 34 | +from app.enums import JobStatus, NotificationStatus |
35 | 35 | from app.errors import InvalidRequest, register_errors |
36 | 36 | from app.schemas import ( |
37 | 37 | JobSchema, |
@@ -62,6 +62,40 @@ def get_job_by_service_and_job_id(service_id, job_id): |
62 | 62 | return jsonify(data=data) |
63 | 63 |
|
64 | 64 |
|
| 65 | +@job_blueprint.route("/<job_id>/status", methods=["GET"]) |
| 66 | +def get_job_status(service_id, job_id): |
| 67 | + """Fast job status endpoint for real-time polling. No S3 calls, no caching.""" |
| 68 | + check_suspicious_id(service_id, job_id) |
| 69 | + |
| 70 | + job = dao_get_job_by_service_id_and_job_id(service_id, job_id) |
| 71 | + statistics = dao_get_notification_outcomes_for_job(service_id, job_id) |
| 72 | + |
| 73 | + delivered_statuses = (NotificationStatus.DELIVERED, NotificationStatus.SENT) |
| 74 | + failed_statuses = (NotificationStatus.FAILED,) + NotificationStatus.failed_types() |
| 75 | + |
| 76 | + delivered_count = failed_count = 0 |
| 77 | + for stat in statistics: |
| 78 | + if stat.status in delivered_statuses: |
| 79 | + delivered_count += stat.count |
| 80 | + elif stat.status in failed_statuses: |
| 81 | + failed_count += stat.count |
| 82 | + |
| 83 | + total_count = job.notification_count or 0 |
| 84 | + pending_calculated = max(0, total_count - delivered_count - failed_count) |
| 85 | + |
| 86 | + is_finished = job.processing_finished is not None and pending_calculated == 0 |
| 87 | + |
| 88 | + response_data = { |
| 89 | + "total": total_count, |
| 90 | + "delivered": delivered_count, |
| 91 | + "failed": failed_count, |
| 92 | + "pending": pending_calculated, |
| 93 | + "finished": is_finished, |
| 94 | + } |
| 95 | + |
| 96 | + return jsonify(response_data) |
| 97 | + |
| 98 | + |
65 | 99 | @job_blueprint.route("/<job_id>/cancel", methods=["POST"]) |
66 | 100 | def cancel_job(service_id, job_id): |
67 | 101 | check_suspicious_id(service_id, job_id) |
@@ -263,9 +297,6 @@ def create_job(service_id): |
263 | 297 | original_file_name = data.get("original_file_name") |
264 | 298 | data.update({"service": service_id}) |
265 | 299 | try: |
266 | | - current_app.logger.info( |
267 | | - f"#notify-debug-s3-partitioning DATA IN CREATE_JOB: {data}" |
268 | | - ) |
269 | 300 | data.update(**get_job_metadata_from_s3(service_id, data["id"])) |
270 | 301 | except KeyError: |
271 | 302 | raise InvalidRequest( |
|
0 commit comments