From 4c3a6d9db06a2548c46cb8d975b1691925c3e775 Mon Sep 17 00:00:00 2001 From: Jonathan Edey Date: Fri, 11 Apr 2025 11:07:46 -0400 Subject: [PATCH 1/4] Refactored retry config to `_retry.py` and added support for backoff and Retry-After --- firebase_admin/_retry.py | 246 ++++++++++++++++++++++++++++++++++++ firebase_admin/messaging.py | 51 ++------ 2 files changed, 255 insertions(+), 42 deletions(-) create mode 100644 firebase_admin/_retry.py diff --git a/firebase_admin/_retry.py b/firebase_admin/_retry.py new file mode 100644 index 00000000..b1083727 --- /dev/null +++ b/firebase_admin/_retry.py @@ -0,0 +1,246 @@ +# Copyright 2025 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Internal retry logic module + +This module provides utilities for adding retry logic to HTTPX requests +""" + +from __future__ import annotations +import copy +import email.utils +import random +import re +import time +from types import CoroutineType +from typing import Any, Callable, List, Optional, Tuple +import logging +import asyncio +import httpx + +logger = logging.getLogger(__name__) + + +class HttpxRetry: + """HTTPX based retry config""" + # TODO: Decide + # urllib3.Retry ignores the status_forcelist and only respects Retry-After header + # for 413, 429 and 503 errors. + # Should we do the same? + # Default status codes to be used for ``status_forcelist`` + RETRY_AFTER_STATUS_CODES = frozenset([413, 429, 503]) + + #: Default maximum backoff time. + DEFAULT_BACKOFF_MAX = 120 + + def __init__( + self, + status: int = 10, + status_forcelist: Optional[List[int]] = None, + backoff_factor: float = 0, + backoff_max: float = DEFAULT_BACKOFF_MAX, + raise_on_status: bool = False, + backoff_jitter: float = 0, + history: Optional[List[Tuple[ + httpx.Request, + Optional[httpx.Response], + Optional[Exception] + ]]] = None, + respect_retry_after_header: bool = False, + ) -> None: + self.status = status + self.status_forcelist = status_forcelist + self.backoff_factor = backoff_factor + self.backoff_max = backoff_max + self.raise_on_status = raise_on_status + self.backoff_jitter = backoff_jitter + if history: + self.history = history + else: + self.history = [] + self.respect_retry_after_header = respect_retry_after_header + + def copy(self) -> HttpxRetry: + """Creates a deep copy of this instance.""" + return copy.deepcopy(self) + + def is_retryable_response(self, response: httpx.Response) -> bool: + """Determine if a response implies that the request should be retried if possible.""" + if self.status_forcelist and response.status_code in self.status_forcelist: + return True + + has_retry_after = bool(response.headers.get("Retry-After")) + if ( + self.respect_retry_after_header + and has_retry_after + and response.status_code in self.RETRY_AFTER_STATUS_CODES + ): + return True + + return False + + # Placeholder for exception retrying + def is_retryable_error(self, error: Exception): + """Determine if the error implies that the request should be retired if possible.""" + logger.debug(error) + return False + + def is_exhausted(self) -> bool: + """Determine if there are anymore more retires.""" + # status count is negative + return self.status < 0 + + # Identical implementation of `urllib3.Retry.parse_retry_after()` + def _parse_retry_after(self, retry_after_header: str) -> float | None: + """Parses Retry-After string into a float with unit seconds.""" + seconds: float + # Whitespace: https://tools.ietf.org/html/rfc7230#section-3.2.4 + if re.match(r"^\s*[0-9]+\s*$", retry_after_header): + seconds = int(retry_after_header) + else: + retry_date_tuple = email.utils.parsedate_tz(retry_after_header) + if retry_date_tuple is None: + # TODO: Verify if this is the appropriate way to handle this. + raise httpx.RemoteProtocolError(f"Invalid Retry-After header: {retry_after_header}") + + retry_date = email.utils.mktime_tz(retry_date_tuple) + seconds = retry_date - time.time() + + seconds = max(seconds, 0) + + return seconds + + def get_retry_after(self, response: httpx.Response) -> float | None: + """Determine the Retry-After time needed before sending the next request.""" + retry_after_header = response.headers.get('Retry_After', None) + if retry_after_header: + # Convert retry header to a float in seconds + return self._parse_retry_after(retry_after_header) + return None + + def get_backoff_time(self): + """Determine the backoff time needed before sending the next request.""" + # request_count is the number of previous request attempts + request_count = len(self.history) + backoff = self.backoff_factor * (2 ** (request_count-1)) + if self.backoff_jitter: + backoff += random.random() * self.backoff_jitter + return float(max(0, min(self.backoff_max, backoff))) + + async def sleep_for_backoff(self) -> None: + """Determine and wait the backoff time needed before sending the next request.""" + backoff = self.get_backoff_time() + logger.debug('Sleeping for %f seconds following failed request', backoff) + await asyncio.sleep(backoff) + + async def sleep(self, response: httpx.Response) -> None: + """Determine and wait the time needed before sending the next request.""" + if self.respect_retry_after_header: + retry_after = self.get_retry_after(response) + if retry_after: + await asyncio.sleep(retry_after) + return + await self.sleep_for_backoff() + + def increment( + self, + request: httpx.Request, + response: Optional[httpx.Response] = None, + error: Optional[Exception] = None + ) -> None: + """Update the retry state based on request attempt.""" + if response and self.is_retryable_response(response): + self.status -= 1 + self.history.append((request, response, error)) + + +# TODO: Remove comments +# Note - This implementation currently covers: +# - basic retires for pre-defined status errors +# - applying retry backoff and backoff jitter +# - ability to respect a response's retry-after header +class HttpxRetryTransport(httpx.AsyncBaseTransport): + """HTTPX transport with retry logic.""" + + # DEFAULT_RETRY = HttpxRetry( + # connect=1, read=1, status=4, status_forcelist=[500, 503], + # raise_on_status=False, backoff_factor=0.5, allowed_methods=None + # ) + DEFAULT_RETRY = HttpxRetry(status=4, status_forcelist=[500, 503], backoff_factor=0.5) + + # We could also support passing kwargs here + def __init__(self, retry: HttpxRetry = DEFAULT_RETRY, **kwargs) -> None: + self._retry = retry + + transport_kwargs = kwargs.copy() + transport_kwargs.update({'retries': 0, 'http2': True}) + # We should use a full AsyncHTTPTransport under the hood since that is + # fully implemented. We could consider making this class extend a + # AsyncHTTPTransport instead and use the parent class's methods to handle + # requests. We sould also ensure that that transport's internal retry is + # not enabled. + self._wrapped_transport = httpx.AsyncHTTPTransport(**transport_kwargs) + + async def handle_async_request(self, request: httpx.Request) -> httpx.Response: + return await self._dispatch_with_retry( + request, self._wrapped_transport.handle_async_request) + + # Two types of retries + # - Status code (500s, redirect) + # - Error code (read, connect, other) + async def _dispatch_with_retry( + self, + request: httpx.Request, + dispatch_method: Callable[[httpx.Request], CoroutineType[Any, Any, httpx.Response]] + ) -> httpx.Response: + """Sends a request with retry logic using a provided dispatch method.""" + # This request config is used across all requests that use this transport and therefore + # needs to be copied to be used for just this request ans it's retries. + retry = self._retry.copy() + # First request + response, error = None, None + + while not retry.is_exhausted(): + + # First retry + if response: + await retry.sleep(response) + + # Need to reset here so only last attempt's error or response is saved. + response, error = None, None + + try: + logger.debug('Sending request in _dispatch_with_retry(): %r', request) + response = await dispatch_method(request) + logger.debug('Received response: %r', response) + except httpx.HTTPError as err: + logger.debug('Received error: %r', err) + error = err + + if response and not retry.is_retryable_response(response): + return response + + if error and not retry.is_retryable_error(error): + raise error + + retry.increment(request, response) + + if response: + return response + if error: + raise error + raise Exception('_dispatch_with_retry() ended with no response or exception') + + async def aclose(self) -> None: + await self._wrapped_transport.aclose() diff --git a/firebase_admin/messaging.py b/firebase_admin/messaging.py index abac5ae5..e886b25c 100644 --- a/firebase_admin/messaging.py +++ b/firebase_admin/messaging.py @@ -20,6 +20,7 @@ import json import warnings import asyncio +import logging import requests import httpx @@ -38,7 +39,9 @@ exceptions, App ) +from firebase_admin._retry import HttpxRetryTransport +logger = logging.getLogger(__name__) _MESSAGING_ATTRIBUTE = '_messaging' @@ -410,6 +413,9 @@ def auth_flow(self, request: httpx.Request): # copy original headers request.headers = _original_headers.copy() # mutates request headers + logger.debug( + 'Refreshing credentials for request attempt %d', + _credential_refresh_attempt + 1) self.apply_auth_headers(request) # Continue to perform the request @@ -420,6 +426,9 @@ def auth_flow(self, request: httpx.Request): # on refreshable status codes. Current transport.requests.AuthorizedSession() # only does this on 401 errors. We should do the same. if response.status_code in self._refresh_status_codes: + logger.debug( + 'Request attempt %d failed due to unauthorized credentials', + _credential_refresh_attempt + 1) _credential_refresh_attempt += 1 else: break @@ -715,45 +724,3 @@ def _build_fcm_error(cls, error_dict) -> Optional[Callable[..., exceptions.Fireb fcm_code = detail.get('errorCode') break return _MessagingService.FCM_ERROR_TYPES.get(fcm_code) if fcm_code else None - - -# TODO: Remove comments -# Notes: -# This implementation currently only covers basic retires for pre-defined status errors -class HttpxRetryTransport(httpx.AsyncBaseTransport): - """HTTPX transport with retry logic.""" - # We could also support passing kwargs here - def __init__(self, **kwargs) -> None: - # Hardcoded settings for now - self._retryable_status_codes = (500, 503,) - self._max_retry_count = 4 - - # - We use a full AsyncHTTPTransport under the hood to make use of it's - # fully implemented `handle_async_request()`. - # - We could consider making the `HttpxRetryTransport`` class extend a - # `AsyncHTTPTransport` instead and use the parent class's methods to handle - # requests. - # - We should also ensure that that transport's internal retry is - # not enabled. - transport_kwargs = kwargs.copy() - transport_kwargs.update({'retries': 0, 'http2': True}) - self._wrapped_transport = httpx.AsyncHTTPTransport(**transport_kwargs) - - - async def handle_async_request(self, request: httpx.Request) -> httpx.Response: - _retry_count = 0 - - while True: - # Dispatch request - # Let exceptions pass through for now - response = await self._wrapped_transport.handle_async_request(request) - - # Check if request is retryable - if response.status_code in self._retryable_status_codes: - _retry_count += 1 - - # Return if retries exhausted - if _retry_count > self._max_retry_count: - return response - else: - return response From 1c4c844dee04f32f91d6c3f676dc43ad1fdd3551 Mon Sep 17 00:00:00 2001 From: Jonathan Edey Date: Mon, 14 Apr 2025 17:42:12 -0400 Subject: [PATCH 2/4] Added unit tests for `_retry.py` --- firebase_admin/_retry.py | 21 +- tests/test_retry.py | 451 +++++++++++++++++++++++++++++++++++++++ 2 files changed, 465 insertions(+), 7 deletions(-) create mode 100644 tests/test_retry.py diff --git a/firebase_admin/_retry.py b/firebase_admin/_retry.py index b1083727..79c8c8f8 100644 --- a/firebase_admin/_retry.py +++ b/firebase_admin/_retry.py @@ -35,8 +35,8 @@ class HttpxRetry: """HTTPX based retry config""" # TODO: Decide - # urllib3.Retry ignores the status_forcelist and only respects Retry-After header - # for 413, 429 and 503 errors. + # urllib3.Retry ignores the status_forcelist when respecting Retry-After header + # Only 413, 429 and 503 errors are retried with the Retry-After header. # Should we do the same? # Default status codes to be used for ``status_forcelist`` RETRY_AFTER_STATUS_CODES = frozenset([413, 429, 503]) @@ -123,7 +123,7 @@ def _parse_retry_after(self, retry_after_header: str) -> float | None: def get_retry_after(self, response: httpx.Response) -> float | None: """Determine the Retry-After time needed before sending the next request.""" - retry_after_header = response.headers.get('Retry_After', None) + retry_after_header = response.headers.get('Retry-After', None) if retry_after_header: # Convert retry header to a float in seconds return self._parse_retry_after(retry_after_header) @@ -131,9 +131,12 @@ def get_retry_after(self, response: httpx.Response) -> float | None: def get_backoff_time(self): """Determine the backoff time needed before sending the next request.""" - # request_count is the number of previous request attempts - request_count = len(self.history) - backoff = self.backoff_factor * (2 ** (request_count-1)) + # attempt_count is the number of previous request attempts + attempt_count = len(self.history) + # Backoff should be set to 0 until after first retry. + if attempt_count <= 1: + return 0 + backoff = self.backoff_factor * (2 ** (attempt_count-1)) if self.backoff_jitter: backoff += random.random() * self.backoff_jitter return float(max(0, min(self.backoff_max, backoff))) @@ -141,7 +144,7 @@ def get_backoff_time(self): async def sleep_for_backoff(self) -> None: """Determine and wait the backoff time needed before sending the next request.""" backoff = self.get_backoff_time() - logger.debug('Sleeping for %f seconds following failed request', backoff) + logger.debug('Sleeping for backoff of %f seconds following failed request', backoff) await asyncio.sleep(backoff) async def sleep(self, response: httpx.Response) -> None: @@ -149,6 +152,10 @@ async def sleep(self, response: httpx.Response) -> None: if self.respect_retry_after_header: retry_after = self.get_retry_after(response) if retry_after: + logger.debug( + 'Sleeping for Retry-After header of %f seconds following failed request', + retry_after + ) await asyncio.sleep(retry_after) return await self.sleep_for_backoff() diff --git a/tests/test_retry.py b/tests/test_retry.py new file mode 100644 index 00000000..b29b5fef --- /dev/null +++ b/tests/test_retry.py @@ -0,0 +1,451 @@ +# Copyright 2025 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test cases for the firebase_admin._retry module.""" + +import time +import email.utils +from itertools import repeat +from unittest.mock import call +import pytest +import httpx +from pytest_mock import MockerFixture +import respx + +from firebase_admin._retry import HttpxRetry, HttpxRetryTransport + +_TEST_URL = 'http://firebase.test.url/' + +@pytest.fixture +def base_url() -> str: + """Provides a consistent base URL for tests.""" + return _TEST_URL + +class TestHttpxRetryTransport(): + @pytest.mark.asyncio + @respx.mock + async def test_no_retry_on_success(self, base_url: str, mocker: MockerFixture): + """Test that a successful response doesn't trigger retries.""" + retry_config = HttpxRetry(status=3, status_forcelist=[500]) + transport = HttpxRetryTransport(retry=retry_config) + client = httpx.AsyncClient(transport=transport) + + route = respx.post(base_url).mock(return_value=httpx.Response(200, text="Success")) + + mock_sleep = mocker.patch('asyncio.sleep', return_value=None) + response = await client.post(base_url) + + assert response.status_code == 200 + assert response.text == "Success" + assert route.call_count == 1 + mock_sleep.assert_not_called() + + @pytest.mark.asyncio + @respx.mock + async def test_no_retry_on_non_retryable_status(self, base_url: str, mocker: MockerFixture): + """Test that a non-retryable error status doesn't trigger retries.""" + retry_config = HttpxRetry(status=3, status_forcelist=[500, 503]) + transport = HttpxRetryTransport(retry=retry_config) + client = httpx.AsyncClient(transport=transport) + + route = respx.post(base_url).mock(return_value=httpx.Response(404, text="Not Found")) + + mock_sleep = mocker.patch('asyncio.sleep', return_value=None) + response = await client.post(base_url) + + assert response.status_code == 404 + assert response.text == "Not Found" + assert route.call_count == 1 + mock_sleep.assert_not_called() + + @pytest.mark.asyncio + @respx.mock + async def test_retry_on_status_code_success_on_last_retry( + self, base_url: str, mocker: MockerFixture + ): + """Test retry on status code from status_forcelist, succeeding on the last attempt.""" + retry_config = HttpxRetry(status=2, status_forcelist=[503, 500], backoff_factor=0.5) + transport = HttpxRetryTransport(retry=retry_config) + client = httpx.AsyncClient(transport=transport) + + route = respx.post(base_url).mock(side_effect=[ + httpx.Response(503, text="Attempt 1 Failed"), + httpx.Response(500, text="Attempt 2 Failed"), + httpx.Response(200, text="Attempt 3 Success"), + ]) + + mock_sleep = mocker.patch('asyncio.sleep', return_value=None) + response = await client.post(base_url) + + assert response.status_code == 200 + assert response.text == "Attempt 3 Success" + assert route.call_count == 3 + assert mock_sleep.call_count == 2 + # Check sleep calls (backoff_factor is 0.5) + mock_sleep.assert_has_calls([call(0.0), call(1.0)]) + + @pytest.mark.asyncio + @respx.mock + async def test_retry_exhausted_returns_last_response( + self, base_url: str, mocker: MockerFixture + ): + """Test that the last response is returned when retries are exhausted.""" + retry_config = HttpxRetry(status=1, status_forcelist=[500], backoff_factor=0) + transport = HttpxRetryTransport(retry=retry_config) + client = httpx.AsyncClient(transport=transport) + + route = respx.post(base_url).mock(side_effect=[ + httpx.Response(500, text="Attempt 1 Failed"), + httpx.Response(500, text="Attempt 2 Failed (Final)"), + # Should stop after previous response + httpx.Response(200, text="This should not be reached"), + ]) + + mock_sleep = mocker.patch('asyncio.sleep', return_value=None) + response = await client.post(base_url) + + assert response.status_code == 500 + assert response.text == "Attempt 2 Failed (Final)" + assert route.call_count == 2 # Initial call + 1 retry + assert mock_sleep.call_count == 1 # Slept before the single retry + + @pytest.mark.asyncio + @respx.mock + async def test_retry_after_header_seconds(self, base_url: str, mocker: MockerFixture): + """Test respecting Retry-After header with seconds value.""" + retry_config = HttpxRetry(status=1, respect_retry_after_header=True, backoff_factor=100) + transport = HttpxRetryTransport(retry=retry_config) + client = httpx.AsyncClient(transport=transport) + + route = respx.post(base_url).mock(side_effect=[ + httpx.Response(429, text="Too Many Requests", headers={'Retry-After': '10'}), + httpx.Response(200, text="OK"), + ]) + + mock_sleep = mocker.patch('asyncio.sleep', return_value=None) + response = await client.post(base_url) + + assert response.status_code == 200 + assert route.call_count == 2 + assert mock_sleep.call_count == 1 + # Assert sleep was called with the value from Retry-After header + mock_sleep.assert_called_once_with(10.0) + + @pytest.mark.asyncio + @respx.mock + async def test_retry_after_header_http_date(self, base_url: str, mocker: MockerFixture): + """Test respecting Retry-After header with an HTTP-date value.""" + retry_config = HttpxRetry(status=1, respect_retry_after_header=True, backoff_factor=100) + transport = HttpxRetryTransport(retry=retry_config) + client = httpx.AsyncClient(transport=transport) + + # Calculate a future time and format as HTTP-date + retry_delay_seconds = 60 + time_at_request = time.time() + retry_time = time_at_request + retry_delay_seconds + http_date = email.utils.formatdate(retry_time) + + route = respx.post(base_url).mock(side_effect=[ + httpx.Response(503, text="Maintenance", headers={'Retry-After': http_date}), + httpx.Response(200, text="OK"), + ]) + + mock_sleep = mocker.patch('asyncio.sleep', return_value=None) + # Patch time.time() within the test context to control the baseline for date calculation + # Set the mock time to be *just before* the Retry-After time + mocker.patch('time.time', return_value=time_at_request) + response = await client.post(base_url) + + assert response.status_code == 200 + assert route.call_count == 2 + assert mock_sleep.call_count == 1 + # Check that sleep was called with approximately the correct delay + # Allow for small floating point inaccuracies + mock_sleep.assert_called_once() + args, _ = mock_sleep.call_args + assert args[0] == pytest.approx(retry_delay_seconds, abs=2) + + @pytest.mark.asyncio + @respx.mock + async def test_retry_after_ignored_when_disabled(self, base_url: str, mocker: MockerFixture): + """Test Retry-After header is ignored if `respect_retry_after_header` is `False`.""" + retry_config = HttpxRetry( + status=3, respect_retry_after_header=False, status_forcelist=[429], + backoff_factor=0.5, backoff_max=10) + transport = HttpxRetryTransport(retry=retry_config) + client = httpx.AsyncClient(transport=transport) + + route = respx.post(base_url).mock(side_effect=[ + httpx.Response(429, text="Too Many Requests", headers={'Retry-After': '60'}), + httpx.Response(429, text="Too Many Requests", headers={'Retry-After': '60'}), + httpx.Response(429, text="Too Many Requests", headers={'Retry-After': '60'}), + httpx.Response(200, text="OK"), + ]) + + mock_sleep = mocker.patch('asyncio.sleep', return_value=None) + response = await client.post(base_url) + + assert response.status_code == 200 + assert route.call_count == 4 + assert mock_sleep.call_count == 3 + + # Assert sleep was called with the calculated backoff times: + # After first attempt: delay = 0 + # After retry 1 (attempt 2): delay = 0.5 * (2**(2-1)) = 0.5 * 2 = 1.0 + # After retry 2 (attempt 3): delay = 0.5 * (2**(3-1)) = 0.5 * 4 = 2.0 + expected_sleeps = [call(0), call(1.0), call(2.0)] + mock_sleep.assert_has_calls(expected_sleeps) + + @pytest.mark.asyncio + @respx.mock + async def test_retry_after_header_missing_backoff_fallback( + self, base_url: str, mocker: MockerFixture + ): + """Test Retry-After header is ignored if `respect_retry_after_header`is `True` but header is + not set.""" + retry_config = HttpxRetry( + status=3, respect_retry_after_header=True, status_forcelist=[429], + backoff_factor=0.5, backoff_max=10) + transport = HttpxRetryTransport(retry=retry_config) + client = httpx.AsyncClient(transport=transport) + + route = respx.post(base_url).mock(side_effect=[ + httpx.Response(429, text="Too Many Requests"), + httpx.Response(429, text="Too Many Requests"), + httpx.Response(429, text="Too Many Requests"), + httpx.Response(200, text="OK"), + ]) + + mock_sleep = mocker.patch('asyncio.sleep', return_value=None) + response = await client.post(base_url) + + assert response.status_code == 200 + assert route.call_count == 4 + assert mock_sleep.call_count == 3 + + # Assert sleep was called with the calculated backoff times: + # After first attempt: delay = 0 + # After retry 1 (attempt 2): delay = 0.5 * (2**(2-1)) = 0.5 * 2 = 1.0 + # After retry 2 (attempt 3): delay = 0.5 * (2**(3-1)) = 0.5 * 4 = 2.0 + expected_sleeps = [call(0), call(1.0), call(2.0)] + mock_sleep.assert_has_calls(expected_sleeps) + + @pytest.mark.asyncio + @respx.mock + async def test_exponential_backoff(self, base_url: str, mocker: MockerFixture): + """Test that sleep time increases exponentially with `backoff_factor`.""" + # status=3 allows 3 retries (attempts 2, 3, 4) + retry_config = HttpxRetry( + status=3, status_forcelist=[500], backoff_factor=0.1, backoff_max=10.0) + transport = HttpxRetryTransport(retry=retry_config) + client = httpx.AsyncClient(transport=transport) + + route = respx.post(base_url).mock(side_effect=[ + httpx.Response(500, text="Fail 1"), + httpx.Response(500, text="Fail 2"), + httpx.Response(500, text="Fail 3"), + httpx.Response(200, text="Success"), + ]) + + mock_sleep = mocker.patch('asyncio.sleep', return_value=None) + response = await client.post(base_url) + + assert response.status_code == 200 + assert route.call_count == 4 + assert mock_sleep.call_count == 3 + + # Check expected backoff times: + # After first attempt: delay = 0 + # After retry 1 (attempt 2): delay = 0.1 * (2**(2-1)) = 0.1 * 2 = 0.2 + # After retry 2 (attempt 3): delay = 0.1 * (2**(3-1)) = 0.1 * 4 = 0.4 + expected_sleeps = [call(0), call(0.2), call(0.4)] + mock_sleep.assert_has_calls(expected_sleeps) + + @pytest.mark.asyncio + @respx.mock + async def test_backoff_max(self, base_url: str, mocker: MockerFixture): + """Test that backoff time respects `backoff_max`.""" + # status=4 allows 4 retries. backoff_factor=1 causes rapid increase. + retry_config = HttpxRetry( + status=4, status_forcelist=[500], backoff_factor=1, backoff_max=3.0) + transport = HttpxRetryTransport(retry=retry_config) + client = httpx.AsyncClient(transport=transport) + + route = respx.post(base_url).mock(side_effect=[ + httpx.Response(500, text="Fail 1"), + httpx.Response(500, text="Fail 2"), + httpx.Response(500, text="Fail 2"), + httpx.Response(500, text="Fail 4"), + httpx.Response(200, text="Success"), + ]) + + mock_sleep = mocker.patch('asyncio.sleep', return_value=None) + response = await client.post(base_url) + + assert response.status_code == 200 + assert route.call_count == 5 + assert mock_sleep.call_count == 4 + + # Check expected backoff times: + # After first attempt: delay = 0 + # After retry 1 (attempt 2): delay = 1*(2**(2-1)) = 2. Clamped by max(0, min(3.0, 2)) = 2.0 + # After retry 2 (attempt 3): delay = 1*(2**(3-1)) = 4. Clamped by max(0, min(3.0, 4)) = 3.0 + # After retry 3 (attempt 4): delay = 1*(2**(4-1)) = 8. Clamped by max(0, min(3.0, 8)) = 3.0 + expected_sleeps = [call(0.0), call(2.0), call(3.0), call(3.0)] + mock_sleep.assert_has_calls(expected_sleeps) + + @pytest.mark.asyncio + @respx.mock + async def test_backoff_jitter(self, base_url: str, mocker: MockerFixture): + """Test that `backoff_jitter` adds randomness within bounds.""" + retry_config = HttpxRetry( + status=3, status_forcelist=[500], backoff_factor=0.2, backoff_jitter=0.1) + transport = HttpxRetryTransport(retry=retry_config) + client = httpx.AsyncClient(transport=transport) + + route = respx.post(base_url).mock(side_effect=[ + httpx.Response(500, text="Fail 1"), + httpx.Response(500, text="Fail 2"), + httpx.Response(500, text="Fail 3"), + httpx.Response(200, text="Success"), + ]) + + mock_sleep = mocker.patch('asyncio.sleep', return_value=None) + response = await client.post(base_url) + + assert response.status_code == 200 + assert route.call_count == 4 + assert mock_sleep.call_count == 3 + + # Check expected backoff times are within the expected range [base - jitter, base + jitter] + # After first attempt: delay = 0 + # After retry 1 (attempt 2): delay = 0.2 * (2**(2-1)) = 0.2 * 2 = 0.4 +/- 0.1 + # After retry 2 (attempt 3): delay = 0.2 * (2**(3-1)) = 0.2 * 4 = 0.8 +/- 0.1 + expected_sleeps = [ + call(pytest.approx(0.0, abs=0.1)), + call(pytest.approx(0.4, abs=0.1)), + call(pytest.approx(0.8, abs=0.1)) + ] + mock_sleep.assert_has_calls(expected_sleeps) + + @pytest.mark.asyncio + @respx.mock + async def test_error_not_retryable(self, base_url): + """Test that non-HTTP errors are raised immediately if not retryable.""" + retry_config = HttpxRetry(status=3) + transport = HttpxRetryTransport(retry=retry_config) + client = httpx.AsyncClient(transport=transport) + + # Mock a connection error + route = respx.post(base_url).mock( + side_effect=repeat(httpx.ConnectError("Connection failed"))) + + with pytest.raises(httpx.ConnectError, match="Connection failed"): + await client.post(base_url) + + assert route.call_count == 1 + + +class TestHttpxRetry(): + _TEST_REQUEST = httpx.Request('POST', _TEST_URL) + + def test_httpx_retry_copy(self, base_url): + """Test that `HttpxRetry.copy()` creates a deep copy.""" + original = HttpxRetry(status=5, status_forcelist=[500, 503], backoff_factor=0.5) + original.history.append((base_url, None, None)) # Add something mutable + + copied = original.copy() + + # Assert they are different objects + assert original is not copied + assert original.history is not copied.history + + # Assert values are the same initially + assert copied.status == original.status + assert copied.status_forcelist == original.status_forcelist + assert copied.backoff_factor == original.backoff_factor + assert len(copied.history) == 1 + + # Modify the copy and check original is unchanged + copied.status = 1 + copied.status_forcelist = [404] + copied.history.append((base_url, None, None)) + + assert original.status == 5 + assert original.status_forcelist == [500, 503] + assert len(original.history) == 1 + + def test_parse_retry_after_seconds(self): + retry = HttpxRetry() + assert retry._parse_retry_after('10') == 10.0 + assert retry._parse_retry_after(' 30 ') == 30.0 + + + def test_parse_retry_after_http_date(self, mocker: MockerFixture): + mocker.patch('time.time', return_value=1000.0) + retry = HttpxRetry() + # Date string representing 1015 seconds since epoch + http_date = email.utils.formatdate(1015.0) + # time.time() is mocked to 1000.0, so delay should be 15s + assert retry._parse_retry_after(http_date) == pytest.approx(15.0) + + def test_parse_retry_after_past_http_date(self, mocker: MockerFixture): + """Test that a past date results in 0 seconds.""" + mocker.patch('time.time', return_value=1000.0) + retry = HttpxRetry() + http_date = email.utils.formatdate(990.0) # 10s in the past + assert retry._parse_retry_after(http_date) == 0.0 + + def test_parse_retry_after_invalid_date(self): + retry = HttpxRetry() + with pytest.raises(httpx.RemoteProtocolError, match='Invalid Retry-After header'): + retry._parse_retry_after('Invalid Date Format') + + def test_get_backoff_time_calculation(self): + retry = HttpxRetry(status=6, status_forcelist=[503], backoff_factor=0.5, backoff_max=10.0) + response = httpx.Response(503) + # No history -> attempt 1 -> no backoff before first request + # Note: get_backoff_time() is typically called *before* the *next* request, + # so history length reflects completed attempts. + assert retry.get_backoff_time() == 0.0 + + # Simulate attempt 1 completed + retry.increment(self._TEST_REQUEST, response) + # History len 1, attempt 2 -> base case 0 + assert retry.get_backoff_time() == pytest.approx(0) + + # Simulate attempt 2 completed + retry.increment(self._TEST_REQUEST, response) + # History len 2, attempt 3 -> 0.5*(2^1) = 1.0 + assert retry.get_backoff_time() == pytest.approx(1.0) + + # Simulate attempt 3 completed + retry.increment(self._TEST_REQUEST, response) + # History len 3, attempt 4 -> 0.5*(2^2) = 2.0 + assert retry.get_backoff_time() == pytest.approx(2.0) + + # Simulate attempt 4 completed + retry.increment(self._TEST_REQUEST, response) + # History len 4, attempt 5 -> 0.5*(2^3) = 4.0 + assert retry.get_backoff_time() == pytest.approx(4.0) + + # Simulate attempt 5 completed + retry.increment(self._TEST_REQUEST, response) + # History len 5, attempt 6 -> 0.5*(2^4) = 8.0 + assert retry.get_backoff_time() == pytest.approx(8.0) + + # Simulate attempt 6 completed + retry.increment(self._TEST_REQUEST, response) + # History len 6, attempt 7 -> 0.5*(2^4) = 10.0 + assert retry.get_backoff_time() == pytest.approx(10.0) From b3aba373d643c77b0a77db02e72faab512d5a4f7 Mon Sep 17 00:00:00 2001 From: Jonathan Edey Date: Mon, 14 Apr 2025 17:43:08 -0400 Subject: [PATCH 3/4] Updated unit tests for HTTPX request errors --- tests/test_messaging.py | 69 +++++++++++++---------------------------- 1 file changed, 21 insertions(+), 48 deletions(-) diff --git a/tests/test_messaging.py b/tests/test_messaging.py index aee3ae4e..0200c11e 100644 --- a/tests/test_messaging.py +++ b/tests/test_messaging.py @@ -17,6 +17,7 @@ from itertools import chain, repeat import json import numbers +import httpx import respx from googleapiclient import http @@ -2100,54 +2101,26 @@ async def test_send_each_async_error_500_pass_on_retry_config(self): assert all([r.success for r in batch_response.responses]) assert not any([r.exception for r in batch_response.responses]) - # @respx.mock - # @pytest.mark.asyncio - # async def test_send_each_async_error_request(self): - # payload = json.dumps({ - # 'error': { - # 'status': 'INTERNAL', - # 'message': 'test INTERNAL error', - # 'details': [ - # { - # '@type': 'type.googleapis.com/google.firebase.fcm.v1.FcmError', - # 'errorCode': 'SOME_UNKNOWN_CODE', - # }, - # ], - # } - # }) - # responses = chain( - # [ - # httpx.ConnectError("Test request error", request=httpx.Request('POST', 'URL')) - # # respx.MockResponse(500, http_version='HTTP/2', content=payload), - # ], - # # repeat( - # respx.MockResponse(200, http_version='HTTP/2', json={'name': 'message-id1'})), - # # respx.MockResponse(200, http_version='HTTP/2', json={'name': 'message-id3'}), - # ) - - # # responses = repeat(respx.MockResponse(500, http_version='HTTP/2', content=payload)) - - # msg1 = messaging.Message(topic='foo1') - # route = respx.request( - # 'POST', - # 'https://fcm.googleapis.com/v1/projects/explicit-project-id/messages:send' - # ).mock(side_effect=responses) - # batch_response = await messaging.send_each_async([msg1], dry_run=True) - - # assert route.call_count == 1 - # assert batch_response.success_count == 0 - # assert batch_response.failure_count == 1 - # assert len(batch_response.responses) == 1 - # exception = batch_response.responses[0].exception - # assert isinstance(exception, exceptions.UnavailableError) - - # # assert route.call_count == 4 - # # assert batch_response.success_count == 1 - # # assert batch_response.failure_count == 0 - # # assert len(batch_response.responses) == 1 - # # assert [r.message_id for r in batch_response.responses] == ['message-id1'] - # # assert all([r.success for r in batch_response.responses]) - # # assert not any([r.exception for r in batch_response.responses]) + @respx.mock + @pytest.mark.asyncio + async def test_send_each_async_request_error(self): + responses = httpx.ConnectError("Test request error", request=httpx.Request( + 'POST', + 'https://fcm.googleapis.com/v1/projects/explicit-project-id/messages:send')) + + msg1 = messaging.Message(topic='foo1') + route = respx.request( + 'POST', + 'https://fcm.googleapis.com/v1/projects/explicit-project-id/messages:send' + ).mock(side_effect=responses) + batch_response = await messaging.send_each_async([msg1], dry_run=True) + + assert route.call_count == 1 + assert batch_response.success_count == 0 + assert batch_response.failure_count == 1 + assert len(batch_response.responses) == 1 + exception = batch_response.responses[0].exception + assert isinstance(exception, exceptions.UnavailableError) @pytest.mark.parametrize('status', HTTP_ERROR_CODES) def test_send_each_detailed_error(self, status): From 0b976cb379de569aa6f3542709645df01ced82f3 Mon Sep 17 00:00:00 2001 From: jonathanedey Date: Wed, 14 May 2025 12:24:47 -0400 Subject: [PATCH 4/4] Address review comments --- firebase_admin/_retry.py | 59 ++++++++++--------------------------- firebase_admin/messaging.py | 14 --------- tests/test_retry.py | 39 +++++++++++++----------- 3 files changed, 36 insertions(+), 76 deletions(-) diff --git a/firebase_admin/_retry.py b/firebase_admin/_retry.py index 79c8c8f8..ef330cbd 100644 --- a/firebase_admin/_retry.py +++ b/firebase_admin/_retry.py @@ -34,23 +34,18 @@ class HttpxRetry: """HTTPX based retry config""" - # TODO: Decide - # urllib3.Retry ignores the status_forcelist when respecting Retry-After header - # Only 413, 429 and 503 errors are retried with the Retry-After header. - # Should we do the same? - # Default status codes to be used for ``status_forcelist`` + # Status codes to be used for respecting `Retry-After` header RETRY_AFTER_STATUS_CODES = frozenset([413, 429, 503]) - #: Default maximum backoff time. + # Default maximum backoff time. DEFAULT_BACKOFF_MAX = 120 def __init__( self, - status: int = 10, + max_retries: int = 10, status_forcelist: Optional[List[int]] = None, backoff_factor: float = 0, backoff_max: float = DEFAULT_BACKOFF_MAX, - raise_on_status: bool = False, backoff_jitter: float = 0, history: Optional[List[Tuple[ httpx.Request, @@ -59,11 +54,10 @@ def __init__( ]]] = None, respect_retry_after_header: bool = False, ) -> None: - self.status = status + self.retries_left = max_retries self.status_forcelist = status_forcelist self.backoff_factor = backoff_factor self.backoff_max = backoff_max - self.raise_on_status = raise_on_status self.backoff_jitter = backoff_jitter if history: self.history = history @@ -90,16 +84,10 @@ def is_retryable_response(self, response: httpx.Response) -> bool: return False - # Placeholder for exception retrying - def is_retryable_error(self, error: Exception): - """Determine if the error implies that the request should be retired if possible.""" - logger.debug(error) - return False - def is_exhausted(self) -> bool: """Determine if there are anymore more retires.""" - # status count is negative - return self.status < 0 + # retries_left is negative + return self.retries_left < 0 # Identical implementation of `urllib3.Retry.parse_retry_after()` def _parse_retry_after(self, retry_after_header: str) -> float | None: @@ -111,7 +99,6 @@ def _parse_retry_after(self, retry_after_header: str) -> float | None: else: retry_date_tuple = email.utils.parsedate_tz(retry_after_header) if retry_date_tuple is None: - # TODO: Verify if this is the appropriate way to handle this. raise httpx.RemoteProtocolError(f"Invalid Retry-After header: {retry_after_header}") retry_date = email.utils.mktime_tz(retry_date_tuple) @@ -167,45 +154,29 @@ def increment( error: Optional[Exception] = None ) -> None: """Update the retry state based on request attempt.""" - if response and self.is_retryable_response(response): - self.status -= 1 + self.retries_left -= 1 self.history.append((request, response, error)) -# TODO: Remove comments -# Note - This implementation currently covers: -# - basic retires for pre-defined status errors -# - applying retry backoff and backoff jitter -# - ability to respect a response's retry-after header class HttpxRetryTransport(httpx.AsyncBaseTransport): """HTTPX transport with retry logic.""" - # DEFAULT_RETRY = HttpxRetry( - # connect=1, read=1, status=4, status_forcelist=[500, 503], - # raise_on_status=False, backoff_factor=0.5, allowed_methods=None - # ) - DEFAULT_RETRY = HttpxRetry(status=4, status_forcelist=[500, 503], backoff_factor=0.5) + DEFAULT_RETRY = HttpxRetry(max_retries=4, status_forcelist=[500, 503], backoff_factor=0.5) - # We could also support passing kwargs here def __init__(self, retry: HttpxRetry = DEFAULT_RETRY, **kwargs) -> None: self._retry = retry transport_kwargs = kwargs.copy() transport_kwargs.update({'retries': 0, 'http2': True}) - # We should use a full AsyncHTTPTransport under the hood since that is - # fully implemented. We could consider making this class extend a - # AsyncHTTPTransport instead and use the parent class's methods to handle - # requests. We sould also ensure that that transport's internal retry is - # not enabled. + # We use a full AsyncHTTPTransport under the hood that is already + # set up to handle requests. We also insure that that transport's internal + # retries are not allowed. self._wrapped_transport = httpx.AsyncHTTPTransport(**transport_kwargs) async def handle_async_request(self, request: httpx.Request) -> httpx.Response: return await self._dispatch_with_retry( request, self._wrapped_transport.handle_async_request) - # Two types of retries - # - Status code (500s, redirect) - # - Error code (read, connect, other) async def _dispatch_with_retry( self, request: httpx.Request, @@ -213,7 +184,7 @@ async def _dispatch_with_retry( ) -> httpx.Response: """Sends a request with retry logic using a provided dispatch method.""" # This request config is used across all requests that use this transport and therefore - # needs to be copied to be used for just this request ans it's retries. + # needs to be copied to be used for just this request and it's retries. retry = self._retry.copy() # First request response, error = None, None @@ -238,16 +209,16 @@ async def _dispatch_with_retry( if response and not retry.is_retryable_response(response): return response - if error and not retry.is_retryable_error(error): + if error: raise error - retry.increment(request, response) + retry.increment(request, response, error) if response: return response if error: raise error - raise Exception('_dispatch_with_retry() ended with no response or exception') + raise AssertionError('_dispatch_with_retry() ended with no response or exception') async def aclose(self) -> None: await self._wrapped_transport.aclose() diff --git a/firebase_admin/messaging.py b/firebase_admin/messaging.py index e886b25c..7f747db1 100644 --- a/firebase_admin/messaging.py +++ b/firebase_admin/messaging.py @@ -379,15 +379,6 @@ def exception(self): """A ``FirebaseError`` if an error occurs while sending the message to the FCM service.""" return self._exception -# Auth Flow -# TODO: Remove comments -# The aim here is to be able to get auth credentials right before the request is sent. -# This is similar to what is done in transport.requests.AuthorizedSession(). -# We can then pass this in at the client level. - -# Notes: -# - This implementations does not cover timeouts on requests sent to refresh credentials. -# - Uses HTTP/1 and a blocking credential for refreshing. class GoogleAuthCredentialFlow(httpx.Auth): """Google Auth Credential Auth Flow""" def __init__(self, credential: credentials.Credentials): @@ -679,11 +670,6 @@ def _handle_batch_error(self, error): return _gapic_utils.handle_platform_error_from_googleapiclient( error, _MessagingService._build_fcm_error_googleapiclient) - # TODO: Remove comments - # We should be careful to clean up the httpx clients. - # Since we are using an async client we must also close in async. However we can sync wrap this. - # The close method is called by the app on shutdown/clean-up of each service. We don't seem to - # make use of this much elsewhere. def close(self) -> None: asyncio.run(self._async_client.aclose()) diff --git a/tests/test_retry.py b/tests/test_retry.py index b29b5fef..751fdea7 100644 --- a/tests/test_retry.py +++ b/tests/test_retry.py @@ -37,7 +37,7 @@ class TestHttpxRetryTransport(): @respx.mock async def test_no_retry_on_success(self, base_url: str, mocker: MockerFixture): """Test that a successful response doesn't trigger retries.""" - retry_config = HttpxRetry(status=3, status_forcelist=[500]) + retry_config = HttpxRetry(max_retries=3, status_forcelist=[500]) transport = HttpxRetryTransport(retry=retry_config) client = httpx.AsyncClient(transport=transport) @@ -55,7 +55,7 @@ async def test_no_retry_on_success(self, base_url: str, mocker: MockerFixture): @respx.mock async def test_no_retry_on_non_retryable_status(self, base_url: str, mocker: MockerFixture): """Test that a non-retryable error status doesn't trigger retries.""" - retry_config = HttpxRetry(status=3, status_forcelist=[500, 503]) + retry_config = HttpxRetry(max_retries=3, status_forcelist=[500, 503]) transport = HttpxRetryTransport(retry=retry_config) client = httpx.AsyncClient(transport=transport) @@ -75,7 +75,7 @@ async def test_retry_on_status_code_success_on_last_retry( self, base_url: str, mocker: MockerFixture ): """Test retry on status code from status_forcelist, succeeding on the last attempt.""" - retry_config = HttpxRetry(status=2, status_forcelist=[503, 500], backoff_factor=0.5) + retry_config = HttpxRetry(max_retries=2, status_forcelist=[503, 500], backoff_factor=0.5) transport = HttpxRetryTransport(retry=retry_config) client = httpx.AsyncClient(transport=transport) @@ -101,7 +101,7 @@ async def test_retry_exhausted_returns_last_response( self, base_url: str, mocker: MockerFixture ): """Test that the last response is returned when retries are exhausted.""" - retry_config = HttpxRetry(status=1, status_forcelist=[500], backoff_factor=0) + retry_config = HttpxRetry(max_retries=1, status_forcelist=[500], backoff_factor=0) transport = HttpxRetryTransport(retry=retry_config) client = httpx.AsyncClient(transport=transport) @@ -124,7 +124,8 @@ async def test_retry_exhausted_returns_last_response( @respx.mock async def test_retry_after_header_seconds(self, base_url: str, mocker: MockerFixture): """Test respecting Retry-After header with seconds value.""" - retry_config = HttpxRetry(status=1, respect_retry_after_header=True, backoff_factor=100) + retry_config = HttpxRetry( + max_retries=1, respect_retry_after_header=True, backoff_factor=100) transport = HttpxRetryTransport(retry=retry_config) client = httpx.AsyncClient(transport=transport) @@ -146,7 +147,8 @@ async def test_retry_after_header_seconds(self, base_url: str, mocker: MockerFix @respx.mock async def test_retry_after_header_http_date(self, base_url: str, mocker: MockerFixture): """Test respecting Retry-After header with an HTTP-date value.""" - retry_config = HttpxRetry(status=1, respect_retry_after_header=True, backoff_factor=100) + retry_config = HttpxRetry( + max_retries=1, respect_retry_after_header=True, backoff_factor=100) transport = HttpxRetryTransport(retry=retry_config) client = httpx.AsyncClient(transport=transport) @@ -181,7 +183,7 @@ async def test_retry_after_header_http_date(self, base_url: str, mocker: MockerF async def test_retry_after_ignored_when_disabled(self, base_url: str, mocker: MockerFixture): """Test Retry-After header is ignored if `respect_retry_after_header` is `False`.""" retry_config = HttpxRetry( - status=3, respect_retry_after_header=False, status_forcelist=[429], + max_retries=3, respect_retry_after_header=False, status_forcelist=[429], backoff_factor=0.5, backoff_max=10) transport = HttpxRetryTransport(retry=retry_config) client = httpx.AsyncClient(transport=transport) @@ -215,7 +217,7 @@ async def test_retry_after_header_missing_backoff_fallback( """Test Retry-After header is ignored if `respect_retry_after_header`is `True` but header is not set.""" retry_config = HttpxRetry( - status=3, respect_retry_after_header=True, status_forcelist=[429], + max_retries=3, respect_retry_after_header=True, status_forcelist=[429], backoff_factor=0.5, backoff_max=10) transport = HttpxRetryTransport(retry=retry_config) client = httpx.AsyncClient(transport=transport) @@ -247,7 +249,7 @@ async def test_exponential_backoff(self, base_url: str, mocker: MockerFixture): """Test that sleep time increases exponentially with `backoff_factor`.""" # status=3 allows 3 retries (attempts 2, 3, 4) retry_config = HttpxRetry( - status=3, status_forcelist=[500], backoff_factor=0.1, backoff_max=10.0) + max_retries=3, status_forcelist=[500], backoff_factor=0.1, backoff_max=10.0) transport = HttpxRetryTransport(retry=retry_config) client = httpx.AsyncClient(transport=transport) @@ -278,7 +280,7 @@ async def test_backoff_max(self, base_url: str, mocker: MockerFixture): """Test that backoff time respects `backoff_max`.""" # status=4 allows 4 retries. backoff_factor=1 causes rapid increase. retry_config = HttpxRetry( - status=4, status_forcelist=[500], backoff_factor=1, backoff_max=3.0) + max_retries=4, status_forcelist=[500], backoff_factor=1, backoff_max=3.0) transport = HttpxRetryTransport(retry=retry_config) client = httpx.AsyncClient(transport=transport) @@ -310,7 +312,7 @@ async def test_backoff_max(self, base_url: str, mocker: MockerFixture): async def test_backoff_jitter(self, base_url: str, mocker: MockerFixture): """Test that `backoff_jitter` adds randomness within bounds.""" retry_config = HttpxRetry( - status=3, status_forcelist=[500], backoff_factor=0.2, backoff_jitter=0.1) + max_retries=3, status_forcelist=[500], backoff_factor=0.2, backoff_jitter=0.1) transport = HttpxRetryTransport(retry=retry_config) client = httpx.AsyncClient(transport=transport) @@ -343,7 +345,7 @@ async def test_backoff_jitter(self, base_url: str, mocker: MockerFixture): @respx.mock async def test_error_not_retryable(self, base_url): """Test that non-HTTP errors are raised immediately if not retryable.""" - retry_config = HttpxRetry(status=3) + retry_config = HttpxRetry(max_retries=3) transport = HttpxRetryTransport(retry=retry_config) client = httpx.AsyncClient(transport=transport) @@ -362,7 +364,7 @@ class TestHttpxRetry(): def test_httpx_retry_copy(self, base_url): """Test that `HttpxRetry.copy()` creates a deep copy.""" - original = HttpxRetry(status=5, status_forcelist=[500, 503], backoff_factor=0.5) + original = HttpxRetry(max_retries=5, status_forcelist=[500, 503], backoff_factor=0.5) original.history.append((base_url, None, None)) # Add something mutable copied = original.copy() @@ -372,17 +374,17 @@ def test_httpx_retry_copy(self, base_url): assert original.history is not copied.history # Assert values are the same initially - assert copied.status == original.status + assert copied.retries_left == original.retries_left assert copied.status_forcelist == original.status_forcelist assert copied.backoff_factor == original.backoff_factor assert len(copied.history) == 1 # Modify the copy and check original is unchanged - copied.status = 1 + copied.retries_left = 1 copied.status_forcelist = [404] copied.history.append((base_url, None, None)) - assert original.status == 5 + assert original.retries_left == 5 assert original.status_forcelist == [500, 503] assert len(original.history) == 1 @@ -413,7 +415,8 @@ def test_parse_retry_after_invalid_date(self): retry._parse_retry_after('Invalid Date Format') def test_get_backoff_time_calculation(self): - retry = HttpxRetry(status=6, status_forcelist=[503], backoff_factor=0.5, backoff_max=10.0) + retry = HttpxRetry( + max_retries=6, status_forcelist=[503], backoff_factor=0.5, backoff_max=10.0) response = httpx.Response(503) # No history -> attempt 1 -> no backoff before first request # Note: get_backoff_time() is typically called *before* the *next* request, @@ -447,5 +450,5 @@ def test_get_backoff_time_calculation(self): # Simulate attempt 6 completed retry.increment(self._TEST_REQUEST, response) - # History len 6, attempt 7 -> 0.5*(2^4) = 10.0 + # History len 6, attempt 7 -> 0.5*(2^5) = 16.0 Clamped to 10 assert retry.get_backoff_time() == pytest.approx(10.0)