diff --git a/.github/workflows/maintests.yml b/.github/workflows/maintests.yml index 678fb0e9..0b89fad1 100644 --- a/.github/workflows/maintests.yml +++ b/.github/workflows/maintests.yml @@ -19,23 +19,26 @@ jobs: build: env: AIWORKER_CACHE_HOME: ${{ github.workspace }}/.cache - TESTS_ONGOING: 1 - HORDE_SDK_TESTING: 1 - HORDE_MODEL_REFERENCE_MAKE_FOLDERS: 1 + TESTS_ONGOING: "1" + AI_HORDE_TESTING: "1" runs-on: ubuntu-latest strategy: matrix: - python: ["3.10", "3.11", "3.12", "3.13"] + python-version: + - "3.12" + - "3.13" steps: - - uses: actions/checkout@v3 - - name: Setup Python - uses: actions/setup-python@v4 + - uses: actions/checkout@v4 + - name: Install uv and set the python version + uses: astral-sh/setup-uv@v6 with: - python-version: ${{ matrix.python }} - - name: Install tox and any other packages - run: | - python -m pip install --upgrade pip - pip install --upgrade -r requirements.dev.txt - - name: Run unit tests - run: tox -e tests-no-api-calls + python-version: ${{ matrix.python-version }} + enable-cache: true + + - name: Install the project + run: uv sync --locked --all-extras --dev + + + - name: Run tests + run: uv run pytest tests --ignore-glob='**/*api_calls*' -m "not api_side_ci" diff --git a/.github/workflows/prtests.yml b/.github/workflows/prtests.yml index 899d250d..f226db97 100644 --- a/.github/workflows/prtests.yml +++ b/.github/workflows/prtests.yml @@ -1,7 +1,7 @@ name: Unstable Tests on: - pull_request_target: + pull_request: branches: - main types: @@ -22,25 +22,27 @@ jobs: build: env: AIWORKER_CACHE_HOME: ${{ github.workspace }}/.cache - TESTS_ONGOING: 1 - HORDE_SDK_TESTING: 1 - HORDE_MODEL_REFERENCE_MAKE_FOLDERS: 1 + TESTS_ONGOING: "1" + AI_HORDE_TESTING: "1" runs-on: ubuntu-latest strategy: matrix: - python: ["3.10", "3.11", "3.12", "3.13"] + python-version: + - "3.12" + - "3.13" steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 with: ref: ${{ github.event.pull_request.head.sha }} - - name: Setup Python - uses: actions/setup-python@v4 + - name: Install uv and set the python version + uses: astral-sh/setup-uv@v6 with: - python-version: ${{ matrix.python }} - - name: Install tox and any other packages - run: | - python -m pip install --upgrade pip - pip install --upgrade -r requirements.dev.txt - - name: Run unit tests - run: tox -e tests-no-api-calls + python-version: ${{ matrix.python-version }} + enable-cache: true + + - name: Install the project + run: uv sync --locked --all-extras --dev + + - name: Run tests + run: uv run pytest tests --ignore-glob='**/*api_calls*' -m "not api_side_ci" diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 3409c9c7..94dbcf2c 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -39,12 +39,11 @@ jobs: run: echo "SETUPTOOLS_SCM_PRETEND_VERSION=${{ steps.release.outputs.version }}" >> $GITHUB_ENV # We need python... - - name: "🐍 Set up Python 3.10" + - name: "🐍 Set up Python 3.13" if: ${{ steps.release.outputs.version != '' }} uses: actions/setup-python@v4 with: - python-version: "3.10" - + python-version: "3.13" # Install build deps - name: "🛠 Install pypa/build" if: ${{ steps.release.outputs.version != '' }} diff --git a/.gitignore b/.gitignore index 09b14c89..4432211c 100644 --- a/.gitignore +++ b/.gitignore @@ -155,3 +155,9 @@ tests/testing_result_images/* workers.txt ai_horde_codegen.py + +.codiumai.toml + +.venv-* +non_conforming_docstrings.json +missing_original_docstrings.json diff --git a/.gitmodules b/.gitmodules new file mode 100644 index 00000000..2525f39d --- /dev/null +++ b/.gitmodules @@ -0,0 +1,3 @@ +[submodule "docs/haidra-assets"] + path = docs/haidra-assets + url = https://github.com/Haidra-Org/haidra-assets diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 3317cd2f..f3252a65 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -1,20 +1,18 @@ repos: - repo: https://github.com/pre-commit/pre-commit-hooks - rev: v5.0.0 + rev: v6.0.0 hooks: - id: check-yaml + exclude: ^mkdocs\.yml$ - id: end-of-file-fixer - id: trailing-whitespace -- repo: https://github.com/psf/black - rev: 24.10.0 - hooks: - - id: black - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.7.2 + rev: v0.14.10 hooks: - - id: ruff + - id: ruff-check + - id: ruff-format - repo: https://github.com/pre-commit/mirrors-mypy - rev: 'v1.13.0' + rev: v1.19.1 hooks: - id: mypy args: [., --strict, --ignore-missing-imports, --exclude=^codegen] @@ -22,12 +20,18 @@ repos: additional_dependencies: [ pytest, pydantic>=2.9.2, + logfire, types-Pillow, types-requests, types-pytz, types-setuptools, types-urllib3, types-aiofiles, + types-PyYAML, StrEnum, horde_model_reference>=2.0.0, ] +- repo: https://github.com/gauge-sh/tach-pre-commit + rev: v0.29.0 + hooks: + - id: tach diff --git a/.readthedocs.yaml b/.readthedocs.yaml index f75c81e9..4d36034b 100644 --- a/.readthedocs.yaml +++ b/.readthedocs.yaml @@ -4,6 +4,10 @@ # Required version: 2 +submodules: + include: all + recursive: true + # Set the version of Python and other tools you might need build: os: ubuntu-22.04 diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index b8f0e080..0345e83d 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -1,56 +1,132 @@ # Contributing to horde_sdk +## Table of Contents + +- [Contributing to horde\_sdk](#contributing-to-horde_sdk) + - [Table of Contents](#table-of-contents) + - [Environment Management](#environment-management) + - [First time setup](#first-time-setup) + - [Code Quality Tools](#code-quality-tools) + - [Code Style and System Design](#code-style-and-system-design) + - [Testing](#testing) + - [When the API adds an endpoint or changes a model](#when-the-api-adds-an-endpoint-or-changes-a-model) + - [Verifying the horde SDK API surface](#verifying-the-horde-sdk-api-surface) + - [New Features/Pull Requests/Working on issues](#new-featurespull-requestsworking-on-issues) + - [Pull Request Do's and Don'ts](#pull-request-dos-and-donts) + - [Do](#do) + - [Don't](#dont) + - [Before Requesting Review](#before-requesting-review) + +## Environment Management + +[uv](https://github.com/astral-sh/uv/) is the suggested python environment management tool. + +### First time setup + +- Install uv, as described [here](https://github.com/astral-sh/uv/#installation). +- `uv python install 3.10 3.11 3.12 3.13` +- `uv self update` +- `uv sync --all-groups` +- The `.venv/` directory will now be created with all project, development and documentation dependencies installed. + - Be sure to point your IDE to the python binary appropriate for your OS in this directory. + ## Code Quality Tools -* [tox](https://tox.wiki/) - - Creates virtual environments for CI or local pytest runs. - - Note that the CI does not current execute calls to the production API by default. - - Run `tox list` or see `tox.ini` for more info -* [pre-commit](https://pre-commit.com/) - - Creates virtual environments for formatting and linting tools - - Run `pre-commit run --all-files` or see `.pre-commit-config.yaml` for more info. -* [black](https://github.com/psf/black) - - Whitespace formatter -* [ruff](https://github.com/astral-sh/ruff) - - Linting rules from a wide variety of selectable rule sets - - See `pyproject.toml` for the rules used. - - See all rules (but not necessarily used in the project) availible in rust [here](https://beta.ruff.rs/docs/rules/). -* [mypy](https://mypy-lang.org/) - - Static type safety - - I recommending using the [mypy daemon](https://mypy.readthedocs.io/en/stable/mypy_daemon.html) instead of periodically running `pre-commit` (or `mypy` directly.). - - If you are using VSCode, I recommend the `matangover.mypy` extension, which implements this nicely. +- [**tox**](https://tox.wiki/) + - Creates virtual environments for CI or local pytest runs. + - Note that the CI does not current execute calls to the production API by default. + - Run `tox list` or see `tox.ini` for more info +- [**pre-commit**](https://pre-commit.com/) + - Creates virtual environments for formatting and linting tools + - Run `pre-commit run --all-files` or see `.pre-commit-config.yaml` for more info. -## Things to know +> Note: Many of the tools below are run by `pre-commit` automatically, but can also be run manually if desired. - * The `AI_HORDE_DEV_URL` environment variable overrides `AI_HORDE_URL`. This is useful for testing changes locally. - * pytest files which end in `_api_calls.py` run last, and never run during the CI. It is currently incumbent on individual developers to confirm that these tests run successfully locally. In the future, part of the CI will be to spawn an AI-Horde and worker instances and test it there. +- [**black**](https://github.com/psf/black) + - Whitespace formatter/code style formatter + - Run with `black .` +- [**ruff**](https://github.com/astral-sh/ruff) + - Linting rules from a wide variety of selectable rule sets + - See `pyproject.toml` for the rules used. + - See all rules (but not necessarily used in the project) availible in rust [here](https://beta.ruff.rs/docs/rules/). + - Run with `ruff check .` + - Note: When using autofixing (`ruff check . --fix`), changes may be made that require running black, which can then result in needing to run `ruff check . --fix` again. + - Consider running `black . && ruff check . --fix && black . && ruff check . --fix` to avoid this. +- [**mypy**](https://mypy-lang.org/) + - Static type safety + - I recommending using the [mypy daemon](https://mypy.readthedocs.io/en/stable/mypy_daemon.html) instead of periodically running `pre-commit` (or `mypy` directly.). +- [**pyright**](https://github.com/microsoft/pyright) + - Shipped with vscode by default (via the python extension `ms-python.vscode-pylance`) + - Suggested settings: + - `"python.analysis.typeCheckingMode": "off"` + - The pylance extension has certain opinionated type checking assertions which are clash with mypy. + - For example, overriding an optional field to be non-optional is considered by pylance to be a type error due to the field being invariant and the parent class potentially settings it to `None`. However, by convention in the SDK, this is a forbidden pattern. + - `"python.analysis.languageServerMode": "full"` + - `"python.testing.pytestEnabled": true` +- [**tach**](https://github.com/gauge-sh/tach) + - Enforces internal namespace dependency constraints. This helps avoid circular dependencies and helps ensure implementations are in a logical place. +## Code Style and System Design -## Verifying the horde SDK API surface +See the [style guide in the docs folder](docs/concepts/style_guide.md) or go to the [same place in the horde_sdk documentation](https://horde-sdk.readthedocs.io/en/latest/) for more information on the code style requirements and design patterns used in the SDK. -You can run the following: +## Testing -```bash -pytest -m "object_verify" -``` +- horde_sdk uses [pytest](https://docs.pytest.org/en/stable/) for testing. +- The `AI_HORDE_DEV_URL` environment variable overrides `AI_HORDE_URL`. This is useful for testing changes locally. +- pytest files which end in `_api_calls.py` run last, and never run during the CI. It is currently incumbent on individual developers to confirm that these tests run successfully locally. In the future, part of the CI will be to spawn an AI-Horde and worker instances and test it there. +- **_Rationale_**: Local runs of the test suite benefit from a way to avoid running tests dependent on a live API. -This will run the tests which validate the objects defined in the SDK are: -- In the appropriate place -- Match the live API (or if `AI_HORDE_DEV_URL` that version of the API) -- That the models are exposed via `__init__.py` -- And will run any other tests which ensure internal consistency. - - This generally does not include specific object validation beyond what can be automatically derived directly from the API docs or from general conventions from the SDK itself. - - If adding objects, you should add tests more specific to the expected functionality of that endpoint and the `object_verify` tests should only be treated as the bare-minimum. +### When the API adds an endpoint or changes a model -## When the API adds an endpoint or changes a model With the top level directory (the one that contains `pyproject.toml`) as your working directory: + ```bash python horde_sdk/scripts/write_all_payload_examples_for_tests.py python horde_sdk/scripts/write_all_response_examples_for_tests.py python docs/build_docs.py ``` -This will update the data found in `tests/test_data/` from the default horde URL, or if any of the override environment variables are set, from there. + +This will update the data found in `tests/test_data/` from the default horde URL, or if any of the override environment variables are set, from there. This includes writing example payloads and responses extrapolated from the live APIs. Running `build_docs.py` will update any automatically generated mkdocs documentation stubs or resources (such as the API Model <-> SDK Model map). Be sure to run the test suite (without any `*_api_calls.py` tests) after. You may also may want to just start with `pytest -m "object_verify"` (see also the section on verifying the horde SDK API surface). + +### Verifying the horde SDK API surface + +You can run the following: + +```bash +pytest -m "object_verify" -s +``` + +> Note: The `-s` flag is important as it allows you to see the output of the tests, which can be helpful for debugging. Often, the tests will also print out corrective actions to take if the tests fail. + +## New Features/Pull Requests/Working on issues + +The Horde ecosystem is a collaborative effort made possible through volunteer effort. We welcome all contributions and permission is not needed to work on issues or submit pull requests. However, if there is activity on an issue, you should consider reaching out to that person to coordinate efforts and avoid duplicate/conflicting work. Additionally, please considering commenting on an issue to let others know you are working on it. + +### Pull Request Do's and Don'ts + +#### Do + +- Be bold in your contribution +- Ensure your code complies with the [code style and system design](#code-style-and-system-design) guidelines +- Open draft pull requests when you want early feedback on your approach +- Include clear descriptions of changes made and reasoning behind them +- Include tests for any new features or bug fixes +- Update documentation for new features or changes to existing functionality +- Use descriptive commit messages consistent with the project commit history, especially for medium-to-large changesets. + - While it is possible we will squash commits before merging, it is still helpful to have descriptive commit messages for review and opens the possibility of rebasing instead. + +#### Don't + +- Make large sweeping changes unrelated to your primary goal +- Include unnecessary changes like formatting or edits to unrelated files (open a separate PR for this) + +### Before Requesting Review + +- Ensure your PR is based on the latest `main` branch (rebase if out of date) +- Verify all tests and code quality checks pass against the intended API versions +- Double-check that all necessary documentation is updated, including docstrings and the relevant markdown files in the `docs` directory. diff --git a/docs/.pages b/docs/.pages index 69b687c5..15a7ae7c 100644 --- a/docs/.pages +++ b/docs/.pages @@ -3,8 +3,25 @@ nav: - getting_started.md - faq.md - examples.md - - api_to_sdk_map.md + - Definitions: haidra-assets/docs/definitions + - AI-Horde: + - API Model to SDK Class Map: ai-horde/api_to_sdk_map + - AI Horde Request/Job Lifecycle: haidra-assets/docs/workers + - Kudos Explanation: haidra-assets/docs/kudos + - Motivations: haidra-assets/docs/why + - Workers: + - Generations: worker/generations + - Worker Loop Diagram: haidra-assets/docs/worker_loop + - SDK Development: + - Developer's Getting Started: developers.md + - CONTRIBUTING.md (github link): 'https://github.com/Haidra-Org/horde-sdk/blob/main/CONTRIBUTING.md' + - Style Guide: concepts/style_guide.md + - Package Structure: concepts/package_structure.md + - Definitions: definitions.md + - SDK Client Design: concepts/client_design + - Backend Values Mappers: concepts/backend_values_mappers - horde_sdk + - GitHub Repo: 'https://github.com/Haidra-Org/horde-sdk' order: desc diff --git a/docs/ai-horde/api_to_sdk_map.md b/docs/ai-horde/api_to_sdk_map.md new file mode 100644 index 00000000..3afa30c3 --- /dev/null +++ b/docs/ai-horde/api_to_sdk_map.md @@ -0,0 +1,156 @@ +# AI-Horde API Model to SDK Class Map +This is a mapping of the AI-Horde API models (defined at [https://stablehorde.net/api/](https://stablehorde.net/api/), see also [the swagger doc](https://stablehorde.net/api/swagger.json)) to the SDK classes. + +## Payloads +| API Endpoint | HTTP Method | SDK Request Type | +| ------------ | ----------- | ---------------- | +| /v2/collection_by_name/{collection_name} | GET | [CollectionByNameRequest][horde_sdk.ai_horde_api.apimodels.collections.CollectionByNameRequest] | +| /v2/collections | GET | [AllCollectionsRequest][horde_sdk.ai_horde_api.apimodels.collections.AllCollectionsRequest] | +| /v2/collections | POST | [CreateCollectionRequest][horde_sdk.ai_horde_api.apimodels.collections.CreateCollectionRequest] | +| /v2/collections/{collection_id} | GET | [CollectionByIDRequest][horde_sdk.ai_horde_api.apimodels.collections.CollectionByIDRequest] | +| /v2/collections/{collection_id} | DELETE | [DeleteCollectionRequest][horde_sdk.ai_horde_api.apimodels.collections.DeleteCollectionRequest] | +| /v2/collections/{collection_id} | PATCH | [UpdateCollectionRequest][horde_sdk.ai_horde_api.apimodels.collections.UpdateCollectionRequest] | +| /v2/documents/privacy | GET | [AIHordeGetPrivacyPolicyRequest][horde_sdk.ai_horde_api.apimodels.documents.AIHordeGetPrivacyPolicyRequest] | +| /v2/documents/sponsors | GET | [AIHordeGetSponsorsRequest][horde_sdk.ai_horde_api.apimodels.documents.AIHordeGetSponsorsRequest] | +| /v2/documents/terms | GET | [AIHordeGetTermsRequest][horde_sdk.ai_horde_api.apimodels.documents.AIHordeGetTermsRequest] | +| /v2/filters | POST | [FilterPromptSuspicionRequest][horde_sdk.ai_horde_api.apimodels.filters.FilterPromptSuspicionRequest] | +| /v2/filters | GET | [FiltersListRequest][horde_sdk.ai_horde_api.apimodels.filters.FiltersListRequest] | +| /v2/filters | PUT | [PutNewFilterRequest][horde_sdk.ai_horde_api.apimodels.filters.PutNewFilterRequest] | +| /v2/filters/regex | GET | [FilterRegexRequest][horde_sdk.ai_horde_api.apimodels.filters.FilterRegexRequest] | +| /v2/filters/{filter_id} | DELETE | [DeleteFilterRequest][horde_sdk.ai_horde_api.apimodels.filters.DeleteFilterRequest] | +| /v2/filters/{filter_id} | PATCH | [PatchExistingFilter][horde_sdk.ai_horde_api.apimodels.filters.PatchExistingFilter] | +| /v2/filters/{filter_id} | GET | [SingleFilterRequest][horde_sdk.ai_horde_api.apimodels.filters.SingleFilterRequest] | +| /v2/find_user | GET | [FindUserRequest][horde_sdk.ai_horde_api.apimodels.find_user.FindUserRequest] | +| /v2/generate/async | POST | [ImageGenerateAsyncRequest][horde_sdk.ai_horde_api.apimodels.generate.async_.ImageGenerateAsyncRequest] | +| /v2/generate/check/{id} | GET | [ImageGenerateCheckRequest][horde_sdk.ai_horde_api.apimodels.generate.check.ImageGenerateCheckRequest] | +| /v2/generate/pop | POST | [ImageGenerateJobPopRequest][horde_sdk.ai_horde_api.apimodels.generate.pop.ImageGenerateJobPopRequest] | +| /v2/generate/rate/{id} | POST | [RateRequest][horde_sdk.ai_horde_api.apimodels.generate.rate.RateRequest] | +| /v2/generate/status/{id} | DELETE | [DeleteImageGenerateRequest][horde_sdk.ai_horde_api.apimodels.generate.status.DeleteImageGenerateRequest] | +| /v2/generate/status/{id} | GET | [ImageGenerateStatusRequest][horde_sdk.ai_horde_api.apimodels.generate.status.ImageGenerateStatusRequest] | +| /v2/generate/submit | POST | [ImageGenerationJobSubmitRequest][horde_sdk.ai_horde_api.apimodels.generate.submit.ImageGenerationJobSubmitRequest] | +| /v2/generate/text/async | POST | [TextGenerateAsyncRequest][horde_sdk.ai_horde_api.apimodels.generate.text.async_.TextGenerateAsyncRequest] | +| /v2/generate/text/pop | POST | [TextGenerateJobPopRequest][horde_sdk.ai_horde_api.apimodels.generate.text.pop.TextGenerateJobPopRequest] | +| /v2/generate/text/status/{id} | DELETE | [DeleteTextGenerateRequest][horde_sdk.ai_horde_api.apimodels.generate.text.status.DeleteTextGenerateRequest] | +| /v2/generate/text/status/{id} | GET | [TextGenerateStatusRequest][horde_sdk.ai_horde_api.apimodels.generate.text.status.TextGenerateStatusRequest] | +| /v2/generate/text/submit | POST | [TextGenerationJobSubmitRequest][horde_sdk.ai_horde_api.apimodels.generate.text.submit.TextGenerationJobSubmitRequest] | +| /v2/interrogate/async | POST | [AlchemyAsyncRequest][horde_sdk.ai_horde_api.apimodels.alchemy.async_.AlchemyAsyncRequest] | +| /v2/interrogate/pop | POST | [AlchemyPopRequest][horde_sdk.ai_horde_api.apimodels.alchemy.pop.AlchemyPopRequest] | +| /v2/interrogate/status/{id} | DELETE | [AlchemyDeleteRequest][horde_sdk.ai_horde_api.apimodels.alchemy.status.AlchemyDeleteRequest] | +| /v2/interrogate/status/{id} | GET | [AlchemyStatusRequest][horde_sdk.ai_horde_api.apimodels.alchemy.status.AlchemyStatusRequest] | +| /v2/interrogate/submit | POST | [AlchemyJobSubmitRequest][horde_sdk.ai_horde_api.apimodels.alchemy.submit.AlchemyJobSubmitRequest] | +| /v2/kudos/award | POST | [KudosAwardRequest][horde_sdk.ai_horde_api.apimodels.kudos.KudosAwardRequest] | +| /v2/kudos/transfer | POST | [KudosTransferRequest][horde_sdk.ai_horde_api.apimodels.kudos.KudosTransferRequest] | +| /v2/operations/block_worker_ipaddr/{worker_id} | PUT | [BlockWorkerIPAddressRequest][horde_sdk.ai_horde_api.apimodels.operations.BlockWorkerIPAddressRequest] | +| /v2/operations/block_worker_ipaddr/{worker_id} | DELETE | [DeleteWorkerIPAddressRequest][horde_sdk.ai_horde_api.apimodels.operations.DeleteWorkerIPAddressRequest] | +| /v2/operations/ipaddr | GET | [SingleIPTimeoutsRequest][horde_sdk.ai_horde_api.apimodels.operations.SingleIPTimeoutsRequest] | +| /v2/operations/ipaddr | POST | [BlockIPAddressRequest][horde_sdk.ai_horde_api.apimodels.operations.BlockIPAddressRequest] | +| /v2/operations/ipaddr | DELETE | [DeleteIPAddressRequest][horde_sdk.ai_horde_api.apimodels.operations.DeleteIPAddressRequest] | +| /v2/sharedkeys | PUT | [SharedKeyCreateRequest][horde_sdk.ai_horde_api.apimodels.sharedkeys.SharedKeyCreateRequest] | +| /v2/sharedkeys/{sharedkey_id} | DELETE | [SharedKeyDeleteRequest][horde_sdk.ai_horde_api.apimodels.sharedkeys.SharedKeyDeleteRequest] | +| /v2/sharedkeys/{sharedkey_id} | GET | [SharedKeyDetailsRequest][horde_sdk.ai_horde_api.apimodels.sharedkeys.SharedKeyDetailsRequest] | +| /v2/sharedkeys/{sharedkey_id} | PATCH | [SharedKeyModifyRequest][horde_sdk.ai_horde_api.apimodels.sharedkeys.SharedKeyModifyRequest] | +| /v2/stats/img/models | GET | [ImageStatsModelsRequest][horde_sdk.ai_horde_api.apimodels.stats.ImageStatsModelsRequest] | +| /v2/stats/img/totals | GET | [ImageStatsModelsTotalRequest][horde_sdk.ai_horde_api.apimodels.stats.ImageStatsModelsTotalRequest] | +| /v2/stats/text/models | GET | [TextStatsModelsRequest][horde_sdk.ai_horde_api.apimodels.stats.TextStatsModelsRequest] | +| /v2/stats/text/totals | GET | [TextStatsModelsTotalRequest][horde_sdk.ai_horde_api.apimodels.stats.TextStatsModelsTotalRequest] | +| /v2/status/heartbeat | GET | [AIHordeHeartbeatRequest][horde_sdk.ai_horde_api.apimodels.status.AIHordeHeartbeatRequest] | +| /v2/status/models | GET | [HordeStatusModelsAllRequest][horde_sdk.ai_horde_api.apimodels.status.HordeStatusModelsAllRequest] | +| /v2/status/models/{model_name} | GET | [HordeStatusModelsSingleRequest][horde_sdk.ai_horde_api.apimodels.status.HordeStatusModelsSingleRequest] | +| /v2/status/news | GET | [NewsRequest][horde_sdk.ai_horde_api.apimodels.status.NewsRequest] | +| /v2/status/performance | GET | [HordePerformanceRequest][horde_sdk.ai_horde_api.apimodels.status.HordePerformanceRequest] | +| /v2/styles/image | GET | [AllStylesImageRequest][horde_sdk.ai_horde_api.apimodels.styles.AllStylesImageRequest] | +| /v2/styles/image | POST | [CreateStyleImageRequest][horde_sdk.ai_horde_api.apimodels.styles.CreateStyleImageRequest] | +| /v2/styles/image/{style_id} | DELETE | [DeleteStyleImageRequest][horde_sdk.ai_horde_api.apimodels.styles.DeleteStyleImageRequest] | +| /v2/styles/image/{style_id} | PATCH | [ModifyStyleImageRequest][horde_sdk.ai_horde_api.apimodels.styles.ModifyStyleImageRequest] | +| /v2/styles/image/{style_id} | GET | [SingleStyleImageByIDRequest][horde_sdk.ai_horde_api.apimodels.styles.SingleStyleImageByIDRequest] | +| /v2/styles/image/{style_id}/example | POST | [StyleImageExampleAddRequest][horde_sdk.ai_horde_api.apimodels.styles.StyleImageExampleAddRequest] | +| /v2/styles/image/{style_id}/example/{example_id} | DELETE | [StyleImageExampleDeleteRequest][horde_sdk.ai_horde_api.apimodels.styles.StyleImageExampleDeleteRequest] | +| /v2/styles/image/{style_id}/example/{example_id} | PATCH | [StyleImageExampleModifyRequest][horde_sdk.ai_horde_api.apimodels.styles.StyleImageExampleModifyRequest] | +| /v2/styles/image_by_name/{style_name} | GET | [SingleStyleImageByNameRequest][horde_sdk.ai_horde_api.apimodels.styles.SingleStyleImageByNameRequest] | +| /v2/styles/text | GET | [AllStylesTextRequest][horde_sdk.ai_horde_api.apimodels.styles.AllStylesTextRequest] | +| /v2/styles/text | POST | [CreateStyleTextRequest][horde_sdk.ai_horde_api.apimodels.styles.CreateStyleTextRequest] | +| /v2/styles/text/{style_id} | DELETE | [DeleteStyleTextRequest][horde_sdk.ai_horde_api.apimodels.styles.DeleteStyleTextRequest] | +| /v2/styles/text/{style_id} | PATCH | [ModifyStyleTextRequest][horde_sdk.ai_horde_api.apimodels.styles.ModifyStyleTextRequest] | +| /v2/styles/text/{style_id} | GET | [SingleStyleTextByIDRequest][horde_sdk.ai_horde_api.apimodels.styles.SingleStyleTextByIDRequest] | +| /v2/styles/text_by_name/{style_name} | GET | [SingleStyleTextByNameRequest][horde_sdk.ai_horde_api.apimodels.styles.SingleStyleTextByNameRequest] | +| /v2/teams | GET | [AllTeamDetailsRequest][horde_sdk.ai_horde_api.apimodels.teams.AllTeamDetailsRequest] | +| /v2/teams | POST | [CreateTeamRequest][horde_sdk.ai_horde_api.apimodels.teams.CreateTeamRequest] | +| /v2/teams/{team_id} | DELETE | [DeleteTeamRequest][horde_sdk.ai_horde_api.apimodels.teams.DeleteTeamRequest] | +| /v2/teams/{team_id} | PATCH | [ModifyTeamRequest][horde_sdk.ai_horde_api.apimodels.teams.ModifyTeamRequest] | +| /v2/teams/{team_id} | GET | [SingleTeamDetailsRequest][horde_sdk.ai_horde_api.apimodels.teams.SingleTeamDetailsRequest] | +| /v2/users | GET | [ListUsersDetailsRequest][horde_sdk.ai_horde_api.apimodels.users.ListUsersDetailsRequest] | +| /v2/users/{user_id} | DELETE | [DeleteUserRequest][horde_sdk.ai_horde_api.apimodels.users.DeleteUserRequest] | +| /v2/users/{user_id} | PUT | [ModifyUserRequest][horde_sdk.ai_horde_api.apimodels.users.ModifyUserRequest] | +| /v2/users/{user_id} | GET | [SingleUserDetailsRequest][horde_sdk.ai_horde_api.apimodels.users.SingleUserDetailsRequest] | +| /v2/workers | GET | [AllWorkersDetailsRequest][horde_sdk.ai_horde_api.apimodels.workers.workers.AllWorkersDetailsRequest] | +| /v2/workers/messages | GET | [AllWorkerMessagesRequest][horde_sdk.ai_horde_api.apimodels.workers.messages.AllWorkerMessagesRequest] | +| /v2/workers/messages | POST | [CreateWorkerMessageRequest][horde_sdk.ai_horde_api.apimodels.workers.messages.CreateWorkerMessageRequest] | +| /v2/workers/messages/{message_id} | DELETE | [DeleteWorkerMessageRequest][horde_sdk.ai_horde_api.apimodels.workers.messages.DeleteWorkerMessageRequest] | +| /v2/workers/messages/{message_id} | GET | [SingleWorkerMessageRequest][horde_sdk.ai_horde_api.apimodels.workers.messages.SingleWorkerMessageRequest] | +| /v2/workers/name/{worker_name} | GET | [SingleWorkerNameDetailsRequest][horde_sdk.ai_horde_api.apimodels.workers.workers.SingleWorkerNameDetailsRequest] | +| /v2/workers/{worker_id} | DELETE | [DeleteWorkerRequest][horde_sdk.ai_horde_api.apimodels.workers.workers.DeleteWorkerRequest] | +| /v2/workers/{worker_id} | PUT | [ModifyWorkerRequest][horde_sdk.ai_horde_api.apimodels.workers.workers.ModifyWorkerRequest] | +| /v2/workers/{worker_id} | GET | [SingleWorkerDetailsRequest][horde_sdk.ai_horde_api.apimodels.workers.workers.SingleWorkerDetailsRequest] | + + +## Responses +| API Endpoint | HTTP Status Code | SDK Response Type | +| ------------ | ----------- | ----------------- | +| /v2/collection_by_name/{collection_name} | 200 | [ResponseModelCollection][horde_sdk.ai_horde_api.apimodels.collections.ResponseModelCollection] | +| /v2/collections | 200 | [CreateCollectionResponse][horde_sdk.ai_horde_api.apimodels.collections.CreateCollectionResponse] | +| /v2/collections/{collection_id} | 200 | [UpdateCollectionResponse][horde_sdk.ai_horde_api.apimodels.collections.UpdateCollectionResponse] | +| /v2/documents/privacy | 200 | [HordeDocument][horde_sdk.ai_horde_api.apimodels.documents.HordeDocument] | +| /v2/documents/sponsors | 200 | [HordeDocument][horde_sdk.ai_horde_api.apimodels.documents.HordeDocument] | +| /v2/documents/terms | 200 | [HordeDocument][horde_sdk.ai_horde_api.apimodels.documents.HordeDocument] | +| /v2/filters | 201 | [FilterDetails][horde_sdk.ai_horde_api.apimodels.filters.FilterDetails] | +| /v2/filters/regex | 200 | [FilterRegexResponse][horde_sdk.ai_horde_api.apimodels.filters.FilterRegexResponse] | +| /v2/filters/{filter_id} | 200 | [FilterDetails][horde_sdk.ai_horde_api.apimodels.filters.FilterDetails] | +| /v2/find_user | 200 | [UserDetailsResponse][horde_sdk.ai_horde_api.apimodels.users.UserDetailsResponse] | +| /v2/generate/async | 200 | [ImageGenerateAsyncDryRunResponse][horde_sdk.ai_horde_api.apimodels.generate.async_.ImageGenerateAsyncDryRunResponse] | +| /v2/generate/async | 202 | [ImageGenerateAsyncResponse][horde_sdk.ai_horde_api.apimodels.generate.async_.ImageGenerateAsyncResponse] | +| /v2/generate/check/{id} | 200 | [ImageGenerateCheckResponse][horde_sdk.ai_horde_api.apimodels.generate.check.ImageGenerateCheckResponse] | +| /v2/generate/pop | 200 | [ImageGenerateJobPopResponse][horde_sdk.ai_horde_api.apimodels.generate.pop.ImageGenerateJobPopResponse] | +| /v2/generate/rate/{id} | 200 | [RateResponse][horde_sdk.ai_horde_api.apimodels.generate.rate.RateResponse] | +| /v2/generate/status/{id} | 200 | [ImageGenerateStatusResponse][horde_sdk.ai_horde_api.apimodels.generate.status.ImageGenerateStatusResponse] | +| /v2/generate/submit | 200 | [JobSubmitResponse][horde_sdk.ai_horde_api.apimodels.base.JobSubmitResponse] | +| /v2/generate/text/async | 200 | [TextGenerateAsyncDryRunResponse][horde_sdk.ai_horde_api.apimodels.generate.text.async_.TextGenerateAsyncDryRunResponse] | +| /v2/generate/text/async | 202 | [TextGenerateAsyncResponse][horde_sdk.ai_horde_api.apimodels.generate.text.async_.TextGenerateAsyncResponse] | +| /v2/generate/text/pop | 200 | [TextGenerateJobPopResponse][horde_sdk.ai_horde_api.apimodels.generate.text.pop.TextGenerateJobPopResponse] | +| /v2/generate/text/status/{id} | 200 | [TextGenerateStatusResponse][horde_sdk.ai_horde_api.apimodels.generate.text.status.TextGenerateStatusResponse] | +| /v2/generate/text/submit | 200 | [JobSubmitResponse][horde_sdk.ai_horde_api.apimodels.base.JobSubmitResponse] | +| /v2/interrogate/async | 202 | [AlchemyAsyncResponse][horde_sdk.ai_horde_api.apimodels.alchemy.async_.AlchemyAsyncResponse] | +| /v2/interrogate/pop | 200 | [AlchemyJobPopResponse][horde_sdk.ai_horde_api.apimodels.alchemy.pop.AlchemyJobPopResponse] | +| /v2/interrogate/status/{id} | 200 | [AlchemyStatusResponse][horde_sdk.ai_horde_api.apimodels.alchemy.status.AlchemyStatusResponse] | +| /v2/interrogate/submit | 200 | [AlchemyJobSubmitResponse][horde_sdk.ai_horde_api.apimodels.alchemy.submit.AlchemyJobSubmitResponse] | +| /v2/kudos/award | 200 | [KudosAwardResponse][horde_sdk.ai_horde_api.apimodels.kudos.KudosAwardResponse] | +| /v2/kudos/transfer | 200 | [KudosTransferResponse][horde_sdk.ai_horde_api.apimodels.kudos.KudosTransferResponse] | +| /v2/operations/block_worker_ipaddr/{worker_id} | 200 | [DeleteWorkerIPAddressResponse][horde_sdk.ai_horde_api.apimodels.operations.DeleteWorkerIPAddressResponse] | +| /v2/operations/ipaddr | 200 | [IPTimeoutListResponse][horde_sdk.ai_horde_api.apimodels.operations.IPTimeoutListResponse] | +| /v2/sharedkeys | 200 | [ExpiryStrSharedKeyDetailsResponse][horde_sdk.ai_horde_api.apimodels.sharedkeys.ExpiryStrSharedKeyDetailsResponse] | +| /v2/sharedkeys/{sharedkey_id} | 200 | [ExpiryStrSharedKeyDetailsResponse][horde_sdk.ai_horde_api.apimodels.sharedkeys.ExpiryStrSharedKeyDetailsResponse] | +| /v2/stats/img/models | 200 | [ImageStatsModelsResponse][horde_sdk.ai_horde_api.apimodels.stats.ImageStatsModelsResponse] | +| /v2/stats/img/totals | 200 | [ImageStatsModelsTotalResponse][horde_sdk.ai_horde_api.apimodels.stats.ImageStatsModelsTotalResponse] | +| /v2/stats/text/models | 200 | [TextStatsModelResponse][horde_sdk.ai_horde_api.apimodels.stats.TextStatsModelResponse] | +| /v2/stats/text/totals | 200 | [TextStatsModelsTotalResponse][horde_sdk.ai_horde_api.apimodels.stats.TextStatsModelsTotalResponse] | +| /v2/status/heartbeat | 200 | [AIHordeHeartbeatResponse][horde_sdk.ai_horde_api.apimodels.status.AIHordeHeartbeatResponse] | +| /v2/status/models | 200 | [HordeStatusModelsAllResponse][horde_sdk.ai_horde_api.apimodels.status.HordeStatusModelsAllResponse] | +| /v2/status/models/{model_name} | 200 | [HordeStatusModelsSingleResponse][horde_sdk.ai_horde_api.apimodels.status.HordeStatusModelsSingleResponse] | +| /v2/status/news | 200 | [NewsResponse][horde_sdk.ai_horde_api.apimodels.status.NewsResponse] | +| /v2/status/performance | 200 | [HordePerformanceResponse][horde_sdk.ai_horde_api.apimodels.status.HordePerformanceResponse] | +| /v2/styles/image | 200 | [ModifyStyleImageResponse][horde_sdk.ai_horde_api.apimodels.styles.ModifyStyleImageResponse] | +| /v2/styles/image/{style_id} | 200 | [StyleStable][horde_sdk.ai_horde_api.apimodels.styles.StyleStable] | +| /v2/styles/image/{style_id}/example | 200 | [StyleImageExampleModifyResponse][horde_sdk.ai_horde_api.apimodels.styles.StyleImageExampleModifyResponse] | +| /v2/styles/image/{style_id}/example/{example_id} | 200 | [StyleImageExampleModifyResponse][horde_sdk.ai_horde_api.apimodels.styles.StyleImageExampleModifyResponse] | +| /v2/styles/image_by_name/{style_name} | 200 | [StyleStable][horde_sdk.ai_horde_api.apimodels.styles.StyleStable] | +| /v2/styles/text | 200 | [ModifyStyleTextResponse][horde_sdk.ai_horde_api.apimodels.styles.ModifyStyleTextResponse] | +| /v2/styles/text/{style_id} | 200 | [StyleKobold][horde_sdk.ai_horde_api.apimodels.styles.StyleKobold] | +| /v2/styles/text_by_name/{style_name} | 200 | [StyleKobold][horde_sdk.ai_horde_api.apimodels.styles.StyleKobold] | +| /v2/teams | 200 | [ModifyTeam][horde_sdk.ai_horde_api.apimodels.teams.ModifyTeam] | +| /v2/teams/{team_id} | 200 | [TeamDetails][horde_sdk.ai_horde_api.apimodels.teams.TeamDetails] | +| /v2/users | 200 | [ListUsersDetailsResponse][horde_sdk.ai_horde_api.apimodels.users.ListUsersDetailsResponse] | +| /v2/users/{user_id} | 200 | [UserDetailsResponse][horde_sdk.ai_horde_api.apimodels.users.UserDetailsResponse] | +| /v2/workers | 200 | [AllWorkersDetailsResponse][horde_sdk.ai_horde_api.apimodels.workers.workers.AllWorkersDetailsResponse] | +| /v2/workers/messages | 200 | [ResponseModelMessage][horde_sdk.ai_horde_api.apimodels.workers.messages.ResponseModelMessage] | +| /v2/workers/messages/{message_id} | 200 | [ResponseModelMessage][horde_sdk.ai_horde_api.apimodels.workers.messages.ResponseModelMessage] | +| /v2/workers/name/{worker_name} | 200 | [SingleWorkerDetailsResponse][horde_sdk.ai_horde_api.apimodels.workers.workers.SingleWorkerDetailsResponse] | +| /v2/workers/{worker_id} | 200 | [SingleWorkerDetailsResponse][horde_sdk.ai_horde_api.apimodels.workers.workers.SingleWorkerDetailsResponse] | diff --git a/docs/ai-horde/api_to_sdk_payload_map.json b/docs/ai-horde/api_to_sdk_payload_map.json new file mode 100644 index 00000000..0a910e88 --- /dev/null +++ b/docs/ai-horde/api_to_sdk_payload_map.json @@ -0,0 +1,201 @@ +{ + "/v2/documents/privacy": { + "GET": "horde_sdk.ai_horde_api.apimodels.documents.AIHordeGetPrivacyPolicyRequest" + }, + "/v2/documents/sponsors": { + "GET": "horde_sdk.ai_horde_api.apimodels.documents.AIHordeGetSponsorsRequest" + }, + "/v2/documents/terms": { + "GET": "horde_sdk.ai_horde_api.apimodels.documents.AIHordeGetTermsRequest" + }, + "/v2/status/heartbeat": { + "GET": "horde_sdk.ai_horde_api.apimodels.status.AIHordeHeartbeatRequest" + }, + "/v2/interrogate/async": { + "POST": "horde_sdk.ai_horde_api.apimodels.alchemy.async_.AlchemyAsyncRequest" + }, + "/v2/interrogate/status/{id}": { + "DELETE": "horde_sdk.ai_horde_api.apimodels.alchemy.status.AlchemyDeleteRequest", + "GET": "horde_sdk.ai_horde_api.apimodels.alchemy.status.AlchemyStatusRequest" + }, + "/v2/interrogate/submit": { + "POST": "horde_sdk.ai_horde_api.apimodels.alchemy.submit.AlchemyJobSubmitRequest" + }, + "/v2/interrogate/pop": { + "POST": "horde_sdk.ai_horde_api.apimodels.alchemy.pop.AlchemyPopRequest" + }, + "/v2/collections": { + "GET": "horde_sdk.ai_horde_api.apimodels.collections.AllCollectionsRequest", + "POST": "horde_sdk.ai_horde_api.apimodels.collections.CreateCollectionRequest" + }, + "/v2/operations/ipaddr": { + "GET": "horde_sdk.ai_horde_api.apimodels.operations.SingleIPTimeoutsRequest", + "POST": "horde_sdk.ai_horde_api.apimodels.operations.BlockIPAddressRequest", + "DELETE": "horde_sdk.ai_horde_api.apimodels.operations.DeleteIPAddressRequest" + }, + "/v2/styles/image": { + "GET": "horde_sdk.ai_horde_api.apimodels.styles.AllStylesImageRequest", + "POST": "horde_sdk.ai_horde_api.apimodels.styles.CreateStyleImageRequest" + }, + "/v2/styles/text": { + "GET": "horde_sdk.ai_horde_api.apimodels.styles.AllStylesTextRequest", + "POST": "horde_sdk.ai_horde_api.apimodels.styles.CreateStyleTextRequest" + }, + "/v2/teams": { + "GET": "horde_sdk.ai_horde_api.apimodels.teams.AllTeamDetailsRequest", + "POST": "horde_sdk.ai_horde_api.apimodels.teams.CreateTeamRequest" + }, + "/v2/workers/messages": { + "GET": "horde_sdk.ai_horde_api.apimodels.workers.messages.AllWorkerMessagesRequest", + "POST": "horde_sdk.ai_horde_api.apimodels.workers.messages.CreateWorkerMessageRequest" + }, + "/v2/workers": { + "GET": "horde_sdk.ai_horde_api.apimodels.workers.workers.AllWorkersDetailsRequest" + }, + "/v2/operations/block_worker_ipaddr/{worker_id}": { + "PUT": "horde_sdk.ai_horde_api.apimodels.operations.BlockWorkerIPAddressRequest", + "DELETE": "horde_sdk.ai_horde_api.apimodels.operations.DeleteWorkerIPAddressRequest" + }, + "/v2/collections/{collection_id}": { + "GET": "horde_sdk.ai_horde_api.apimodels.collections.CollectionByIDRequest", + "DELETE": "horde_sdk.ai_horde_api.apimodels.collections.DeleteCollectionRequest", + "PATCH": "horde_sdk.ai_horde_api.apimodels.collections.UpdateCollectionRequest" + }, + "/v2/collection_by_name/{collection_name}": { + "GET": "horde_sdk.ai_horde_api.apimodels.collections.CollectionByNameRequest" + }, + "/v2/filters/{filter_id}": { + "DELETE": "horde_sdk.ai_horde_api.apimodels.filters.DeleteFilterRequest", + "PATCH": "horde_sdk.ai_horde_api.apimodels.filters.PatchExistingFilter", + "GET": "horde_sdk.ai_horde_api.apimodels.filters.SingleFilterRequest" + }, + "/v2/generate/status/{id}": { + "DELETE": "horde_sdk.ai_horde_api.apimodels.generate.status.DeleteImageGenerateRequest", + "GET": "horde_sdk.ai_horde_api.apimodels.generate.status.ImageGenerateStatusRequest" + }, + "/v2/styles/image/{style_id}": { + "DELETE": "horde_sdk.ai_horde_api.apimodels.styles.DeleteStyleImageRequest", + "PATCH": "horde_sdk.ai_horde_api.apimodels.styles.ModifyStyleImageRequest", + "GET": "horde_sdk.ai_horde_api.apimodels.styles.SingleStyleImageByIDRequest" + }, + "/v2/styles/text/{style_id}": { + "DELETE": "horde_sdk.ai_horde_api.apimodels.styles.DeleteStyleTextRequest", + "PATCH": "horde_sdk.ai_horde_api.apimodels.styles.ModifyStyleTextRequest", + "GET": "horde_sdk.ai_horde_api.apimodels.styles.SingleStyleTextByIDRequest" + }, + "/v2/teams/{team_id}": { + "DELETE": "horde_sdk.ai_horde_api.apimodels.teams.DeleteTeamRequest", + "PATCH": "horde_sdk.ai_horde_api.apimodels.teams.ModifyTeamRequest", + "GET": "horde_sdk.ai_horde_api.apimodels.teams.SingleTeamDetailsRequest" + }, + "/v2/generate/text/status/{id}": { + "DELETE": "horde_sdk.ai_horde_api.apimodels.generate.text.status.DeleteTextGenerateRequest", + "GET": "horde_sdk.ai_horde_api.apimodels.generate.text.status.TextGenerateStatusRequest" + }, + "/v2/users/{user_id}": { + "DELETE": "horde_sdk.ai_horde_api.apimodels.users.DeleteUserRequest", + "PUT": "horde_sdk.ai_horde_api.apimodels.users.ModifyUserRequest", + "GET": "horde_sdk.ai_horde_api.apimodels.users.SingleUserDetailsRequest" + }, + "/v2/workers/messages/{message_id}": { + "DELETE": "horde_sdk.ai_horde_api.apimodels.workers.messages.DeleteWorkerMessageRequest", + "GET": "horde_sdk.ai_horde_api.apimodels.workers.messages.SingleWorkerMessageRequest" + }, + "/v2/workers/{worker_id}": { + "DELETE": "horde_sdk.ai_horde_api.apimodels.workers.workers.DeleteWorkerRequest", + "PUT": "horde_sdk.ai_horde_api.apimodels.workers.workers.ModifyWorkerRequest", + "GET": "horde_sdk.ai_horde_api.apimodels.workers.workers.SingleWorkerDetailsRequest" + }, + "/v2/filters": { + "POST": "horde_sdk.ai_horde_api.apimodels.filters.FilterPromptSuspicionRequest", + "GET": "horde_sdk.ai_horde_api.apimodels.filters.FiltersListRequest", + "PUT": "horde_sdk.ai_horde_api.apimodels.filters.PutNewFilterRequest" + }, + "/v2/filters/regex": { + "GET": "horde_sdk.ai_horde_api.apimodels.filters.FilterRegexRequest" + }, + "/v2/find_user": { + "GET": "horde_sdk.ai_horde_api.apimodels.find_user.FindUserRequest" + }, + "/v2/status/performance": { + "GET": "horde_sdk.ai_horde_api.apimodels.status.HordePerformanceRequest" + }, + "/v2/status/models": { + "GET": "horde_sdk.ai_horde_api.apimodels.status.HordeStatusModelsAllRequest" + }, + "/v2/status/models/{model_name}": { + "GET": "horde_sdk.ai_horde_api.apimodels.status.HordeStatusModelsSingleRequest" + }, + "/v2/generate/async": { + "POST": "horde_sdk.ai_horde_api.apimodels.generate.async_.ImageGenerateAsyncRequest" + }, + "/v2/generate/check/{id}": { + "GET": "horde_sdk.ai_horde_api.apimodels.generate.check.ImageGenerateCheckRequest" + }, + "/v2/generate/pop": { + "POST": "horde_sdk.ai_horde_api.apimodels.generate.pop.ImageGenerateJobPopRequest" + }, + "/v2/generate/submit": { + "POST": "horde_sdk.ai_horde_api.apimodels.generate.submit.ImageGenerationJobSubmitRequest" + }, + "/v2/stats/img/models": { + "GET": "horde_sdk.ai_horde_api.apimodels.stats.ImageStatsModelsRequest" + }, + "/v2/stats/img/totals": { + "GET": "horde_sdk.ai_horde_api.apimodels.stats.ImageStatsModelsTotalRequest" + }, + "/v2/kudos/award": { + "POST": "horde_sdk.ai_horde_api.apimodels.kudos.KudosAwardRequest" + }, + "/v2/kudos/transfer": { + "POST": "horde_sdk.ai_horde_api.apimodels.kudos.KudosTransferRequest" + }, + "/v2/users": { + "GET": "horde_sdk.ai_horde_api.apimodels.users.ListUsersDetailsRequest" + }, + "/v2/status/news": { + "GET": "horde_sdk.ai_horde_api.apimodels.status.NewsRequest" + }, + "/v2/generate/rate/{id}": { + "POST": "horde_sdk.ai_horde_api.apimodels.generate.rate.RateRequest" + }, + "/v2/sharedkeys": { + "PUT": "horde_sdk.ai_horde_api.apimodels.sharedkeys.SharedKeyCreateRequest" + }, + "/v2/sharedkeys/{sharedkey_id}": { + "DELETE": "horde_sdk.ai_horde_api.apimodels.sharedkeys.SharedKeyDeleteRequest", + "GET": "horde_sdk.ai_horde_api.apimodels.sharedkeys.SharedKeyDetailsRequest", + "PATCH": "horde_sdk.ai_horde_api.apimodels.sharedkeys.SharedKeyModifyRequest" + }, + "/v2/styles/image_by_name/{style_name}": { + "GET": "horde_sdk.ai_horde_api.apimodels.styles.SingleStyleImageByNameRequest" + }, + "/v2/styles/text_by_name/{style_name}": { + "GET": "horde_sdk.ai_horde_api.apimodels.styles.SingleStyleTextByNameRequest" + }, + "/v2/workers/name/{worker_name}": { + "GET": "horde_sdk.ai_horde_api.apimodels.workers.workers.SingleWorkerNameDetailsRequest" + }, + "/v2/styles/image/{style_id}/example": { + "POST": "horde_sdk.ai_horde_api.apimodels.styles.StyleImageExampleAddRequest" + }, + "/v2/styles/image/{style_id}/example/{example_id}": { + "DELETE": "horde_sdk.ai_horde_api.apimodels.styles.StyleImageExampleDeleteRequest", + "PATCH": "horde_sdk.ai_horde_api.apimodels.styles.StyleImageExampleModifyRequest" + }, + "/v2/generate/text/async": { + "POST": "horde_sdk.ai_horde_api.apimodels.generate.text.async_.TextGenerateAsyncRequest" + }, + "/v2/generate/text/pop": { + "POST": "horde_sdk.ai_horde_api.apimodels.generate.text.pop.TextGenerateJobPopRequest" + }, + "/v2/generate/text/submit": { + "POST": "horde_sdk.ai_horde_api.apimodels.generate.text.submit.TextGenerationJobSubmitRequest" + }, + "/v2/stats/text/models": { + "GET": "horde_sdk.ai_horde_api.apimodels.stats.TextStatsModelsRequest" + }, + "/v2/stats/text/totals": { + "GET": "horde_sdk.ai_horde_api.apimodels.stats.TextStatsModelsTotalRequest" + } +} diff --git a/docs/ai-horde/api_to_sdk_response_map.json b/docs/ai-horde/api_to_sdk_response_map.json new file mode 100644 index 00000000..347fc800 --- /dev/null +++ b/docs/ai-horde/api_to_sdk_response_map.json @@ -0,0 +1,172 @@ +{ + "/v2/documents/privacy": { + "200": "horde_sdk.ai_horde_api.apimodels.documents.HordeDocument" + }, + "/v2/documents/sponsors": { + "200": "horde_sdk.ai_horde_api.apimodels.documents.HordeDocument" + }, + "/v2/documents/terms": { + "200": "horde_sdk.ai_horde_api.apimodels.documents.HordeDocument" + }, + "/v2/status/heartbeat": { + "200": "horde_sdk.ai_horde_api.apimodels.status.AIHordeHeartbeatResponse" + }, + "/v2/interrogate/async": { + "202": "horde_sdk.ai_horde_api.apimodels.alchemy.async_.AlchemyAsyncResponse" + }, + "/v2/interrogate/status/{id}": { + "200": "horde_sdk.ai_horde_api.apimodels.alchemy.status.AlchemyStatusResponse" + }, + "/v2/interrogate/submit": { + "200": "horde_sdk.ai_horde_api.apimodels.alchemy.submit.AlchemyJobSubmitResponse" + }, + "/v2/interrogate/pop": { + "200": "horde_sdk.ai_horde_api.apimodels.alchemy.pop.AlchemyJobPopResponse" + }, + "/v2/collections": { + "200": "horde_sdk.ai_horde_api.apimodels.collections.CreateCollectionResponse" + }, + "/v2/operations/ipaddr": { + "200": "horde_sdk.ai_horde_api.apimodels.operations.IPTimeoutListResponse" + }, + "/v2/styles/image": { + "200": "horde_sdk.ai_horde_api.apimodels.styles.ModifyStyleImageResponse" + }, + "/v2/styles/text": { + "200": "horde_sdk.ai_horde_api.apimodels.styles.ModifyStyleTextResponse" + }, + "/v2/teams": { + "200": "horde_sdk.ai_horde_api.apimodels.teams.ModifyTeam" + }, + "/v2/workers/messages": { + "200": "horde_sdk.ai_horde_api.apimodels.workers.messages.ResponseModelMessage" + }, + "/v2/workers": { + "200": "horde_sdk.ai_horde_api.apimodels.workers.workers.AllWorkersDetailsResponse" + }, + "/v2/operations/block_worker_ipaddr/{worker_id}": { + "200": "horde_sdk.ai_horde_api.apimodels.operations.DeleteWorkerIPAddressResponse" + }, + "/v2/collections/{collection_id}": { + "200": "horde_sdk.ai_horde_api.apimodels.collections.UpdateCollectionResponse" + }, + "/v2/collection_by_name/{collection_name}": { + "200": "horde_sdk.ai_horde_api.apimodels.collections.ResponseModelCollection" + }, + "/v2/filters/{filter_id}": { + "200": "horde_sdk.ai_horde_api.apimodels.filters.FilterDetails" + }, + "/v2/generate/status/{id}": { + "200": "horde_sdk.ai_horde_api.apimodels.generate.status.ImageGenerateStatusResponse" + }, + "/v2/styles/image/{style_id}": { + "200": "horde_sdk.ai_horde_api.apimodels.styles.StyleStable" + }, + "/v2/styles/text/{style_id}": { + "200": "horde_sdk.ai_horde_api.apimodels.styles.StyleKobold" + }, + "/v2/teams/{team_id}": { + "200": "horde_sdk.ai_horde_api.apimodels.teams.TeamDetails" + }, + "/v2/generate/text/status/{id}": { + "200": "horde_sdk.ai_horde_api.apimodels.generate.text.status.TextGenerateStatusResponse" + }, + "/v2/users/{user_id}": { + "200": "horde_sdk.ai_horde_api.apimodels.users.UserDetailsResponse" + }, + "/v2/workers/messages/{message_id}": { + "200": "horde_sdk.ai_horde_api.apimodels.workers.messages.ResponseModelMessage" + }, + "/v2/workers/{worker_id}": { + "200": "horde_sdk.ai_horde_api.apimodels.workers.workers.SingleWorkerDetailsResponse" + }, + "/v2/filters": { + "201": "horde_sdk.ai_horde_api.apimodels.filters.FilterDetails" + }, + "/v2/filters/regex": { + "200": "horde_sdk.ai_horde_api.apimodels.filters.FilterRegexResponse" + }, + "/v2/find_user": { + "200": "horde_sdk.ai_horde_api.apimodels.users.UserDetailsResponse" + }, + "/v2/status/performance": { + "200": "horde_sdk.ai_horde_api.apimodels.status.HordePerformanceResponse" + }, + "/v2/status/models": { + "200": "horde_sdk.ai_horde_api.apimodels.status.HordeStatusModelsAllResponse" + }, + "/v2/status/models/{model_name}": { + "200": "horde_sdk.ai_horde_api.apimodels.status.HordeStatusModelsSingleResponse" + }, + "/v2/generate/async": { + "200": "horde_sdk.ai_horde_api.apimodels.generate.async_.ImageGenerateAsyncDryRunResponse", + "202": "horde_sdk.ai_horde_api.apimodels.generate.async_.ImageGenerateAsyncResponse" + }, + "/v2/generate/check/{id}": { + "200": "horde_sdk.ai_horde_api.apimodels.generate.check.ImageGenerateCheckResponse" + }, + "/v2/generate/pop": { + "200": "horde_sdk.ai_horde_api.apimodels.generate.pop.ImageGenerateJobPopResponse" + }, + "/v2/generate/submit": { + "200": "horde_sdk.ai_horde_api.apimodels.base.JobSubmitResponse" + }, + "/v2/stats/img/models": { + "200": "horde_sdk.ai_horde_api.apimodels.stats.ImageStatsModelsResponse" + }, + "/v2/stats/img/totals": { + "200": "horde_sdk.ai_horde_api.apimodels.stats.ImageStatsModelsTotalResponse" + }, + "/v2/kudos/award": { + "200": "horde_sdk.ai_horde_api.apimodels.kudos.KudosAwardResponse" + }, + "/v2/kudos/transfer": { + "200": "horde_sdk.ai_horde_api.apimodels.kudos.KudosTransferResponse" + }, + "/v2/users": { + "200": "horde_sdk.ai_horde_api.apimodels.users.ListUsersDetailsResponse" + }, + "/v2/status/news": { + "200": "horde_sdk.ai_horde_api.apimodels.status.NewsResponse" + }, + "/v2/generate/rate/{id}": { + "200": "horde_sdk.ai_horde_api.apimodels.generate.rate.RateResponse" + }, + "/v2/sharedkeys": { + "200": "horde_sdk.ai_horde_api.apimodels.sharedkeys.ExpiryStrSharedKeyDetailsResponse" + }, + "/v2/sharedkeys/{sharedkey_id}": { + "200": "horde_sdk.ai_horde_api.apimodels.sharedkeys.ExpiryStrSharedKeyDetailsResponse" + }, + "/v2/styles/image_by_name/{style_name}": { + "200": "horde_sdk.ai_horde_api.apimodels.styles.StyleStable" + }, + "/v2/styles/text_by_name/{style_name}": { + "200": "horde_sdk.ai_horde_api.apimodels.styles.StyleKobold" + }, + "/v2/workers/name/{worker_name}": { + "200": "horde_sdk.ai_horde_api.apimodels.workers.workers.SingleWorkerDetailsResponse" + }, + "/v2/styles/image/{style_id}/example": { + "200": "horde_sdk.ai_horde_api.apimodels.styles.StyleImageExampleModifyResponse" + }, + "/v2/styles/image/{style_id}/example/{example_id}": { + "200": "horde_sdk.ai_horde_api.apimodels.styles.StyleImageExampleModifyResponse" + }, + "/v2/generate/text/async": { + "200": "horde_sdk.ai_horde_api.apimodels.generate.text.async_.TextGenerateAsyncDryRunResponse", + "202": "horde_sdk.ai_horde_api.apimodels.generate.text.async_.TextGenerateAsyncResponse" + }, + "/v2/generate/text/pop": { + "200": "horde_sdk.ai_horde_api.apimodels.generate.text.pop.TextGenerateJobPopResponse" + }, + "/v2/generate/text/submit": { + "200": "horde_sdk.ai_horde_api.apimodels.base.JobSubmitResponse" + }, + "/v2/stats/text/models": { + "200": "horde_sdk.ai_horde_api.apimodels.stats.TextStatsModelResponse" + }, + "/v2/stats/text/totals": { + "200": "horde_sdk.ai_horde_api.apimodels.stats.TextStatsModelsTotalResponse" + } +} diff --git a/docs/ai-horde/request_field_names_and_descriptions.json b/docs/ai-horde/request_field_names_and_descriptions.json new file mode 100644 index 00000000..507798fa --- /dev/null +++ b/docs/ai-horde/request_field_names_and_descriptions.json @@ -0,0 +1,3421 @@ +{ + "AIHordeGetPrivacyPolicyRequest": { + "format": { + "description": "The format of the document to return. Default is markdown.", + "types": [ + "horde_sdk.ai_horde_api.apimodels.documents.DocumentFormat", + "str" + ] + }, + "accept": { + "description": "The 'accept' header field.", + "types": [ + "horde_sdk.generic_api.metadata.GenericAcceptTypes" + ] + }, + "client_agent": { + "description": "The requesting client's agent. You should set this to reflect the name, version and contact information\nfor your client.", + "types": [ + "str" + ] + } + }, + "AIHordeGetSponsorsRequest": { + "format": { + "description": "The format of the document to return. Default is markdown.", + "types": [ + "horde_sdk.ai_horde_api.apimodels.documents.DocumentFormat", + "str" + ] + }, + "accept": { + "description": "The 'accept' header field.", + "types": [ + "horde_sdk.generic_api.metadata.GenericAcceptTypes" + ] + }, + "client_agent": { + "description": "The requesting client's agent. You should set this to reflect the name, version and contact information\nfor your client.", + "types": [ + "str" + ] + } + }, + "AIHordeGetTermsRequest": { + "format": { + "description": "The format of the document to return. Default is markdown.", + "types": [ + "horde_sdk.ai_horde_api.apimodels.documents.DocumentFormat", + "str" + ] + }, + "accept": { + "description": "The 'accept' header field.", + "types": [ + "horde_sdk.generic_api.metadata.GenericAcceptTypes" + ] + }, + "client_agent": { + "description": "The requesting client's agent. You should set this to reflect the name, version and contact information\nfor your client.", + "types": [ + "str" + ] + } + }, + "AIHordeHeartbeatRequest": { + "accept": { + "description": "The 'accept' header field.", + "types": [ + "horde_sdk.generic_api.metadata.GenericAcceptTypes" + ] + }, + "client_agent": { + "description": "The requesting client's agent. You should set this to reflect the name, version and contact information\nfor your client.", + "types": [ + "str" + ] + } + }, + "AlchemyAsyncRequest": { + "apikey": { + "description": "Defaults to `ANON_API_KEY`. See also `.is_api_key_required()`", + "types": [ + "str", + "None" + ] + }, + "accept": { + "description": "The 'accept' header field.", + "types": [ + "horde_sdk.generic_api.metadata.GenericAcceptTypes" + ] + }, + "client_agent": { + "description": "The requesting client's agent. You should set this to reflect the name, version and contact information\nfor your client.", + "types": [ + "str" + ] + }, + "forms": { + "description": "The list of forms (types of post-processing/interrogation/captioning/etc) to request.", + "types": [ + "list[horde_sdk.ai_horde_api.apimodels.alchemy.async_.AlchemyAsyncRequestFormItem]" + ] + }, + "source_image": { + "description": "The public URL of the source image or a base64 string to use.", + "types": [ + "str" + ] + }, + "slow_workers": { + "description": "Whether to use the slower workers. Costs additional kudos if `False`.", + "types": [ + "bool" + ] + }, + "extra_slow_workers": { + "description": "Whether to use the super slow workers.", + "types": [ + "bool" + ] + }, + "webhook": { + "description": "Provide a URL where the AI Horde will send a POST call after each delivered generation. The request will\ninclude the details of the job as well as the request ID.", + "types": [ + "str", + "None" + ] + } + }, + "AlchemyDeleteRequest": { + "id_": { + "description": "The UUID for this job. Use this to post the results in the future.", + "types": [ + "horde_sdk.ai_horde_api.fields.GenerationID" + ] + }, + "accept": { + "description": "The 'accept' header field.", + "types": [ + "horde_sdk.generic_api.metadata.GenericAcceptTypes" + ] + }, + "client_agent": { + "description": "The requesting client's agent. You should set this to reflect the name, version and contact information\nfor your client.", + "types": [ + "str" + ] + } + }, + "AlchemyJobSubmitRequest": { + "apikey": { + "description": "Defaults to `ANON_API_KEY`. See also `.is_api_key_required()`", + "types": [ + "str", + "None" + ] + }, + "id_": { + "description": "The UUID for this job. Use this to post the results in the future.", + "types": [ + "horde_sdk.ai_horde_api.fields.GenerationID" + ] + }, + "accept": { + "description": "The 'accept' header field.", + "types": [ + "horde_sdk.generic_api.metadata.GenericAcceptTypes" + ] + }, + "client_agent": { + "description": "The requesting client's agent. You should set this to reflect the name, version and contact information\nfor your client.", + "types": [ + "str" + ] + }, + "result": { + "description": "The result of the alchemy job.", + "types": [ + "str" + ] + }, + "state": { + "description": "The state of this generation. See `GENERATION_STATE` for more information.", + "types": [ + "horde_sdk.ai_horde_api.consts.GENERATION_STATE" + ] + } + }, + "AlchemyPopRequest": { + "apikey": { + "description": "Defaults to `ANON_API_KEY`. See also `.is_api_key_required()`", + "types": [ + "str", + "None" + ] + }, + "accept": { + "description": "The 'accept' header field.", + "types": [ + "horde_sdk.generic_api.metadata.GenericAcceptTypes" + ] + }, + "client_agent": { + "description": "The requesting client's agent. You should set this to reflect the name, version and contact information\nfor your client.", + "types": [ + "str" + ] + }, + "bridge_agent": { + "description": "The agent string for the bridge, in the format `name:version:url or contact info`.", + "types": [ + "str" + ] + }, + "name": { + "description": "The name of the request. This is used to identify the request in the logs.", + "types": [ + "str" + ] + }, + "priority_usernames": { + "description": "The usernames that should be prioritized for this request.", + "types": [ + "list[str]" + ] + }, + "forms": { + "description": "The types of alchemy that should be generated.", + "types": [ + "list[horde_sdk.generation_parameters.alchemy.consts.KNOWN_ALCHEMY_TYPES]" + ] + }, + "amount": { + "description": "The number of jobs to request.", + "types": [ + "int" + ] + }, + "threads": { + "description": "The number of threads to report that this bridge is using to the API.", + "types": [ + "int" + ] + }, + "max_tiles": { + "description": "The maximum number of 512x512 tiles that this worker can process.", + "types": [ + "int" + ] + } + }, + "AlchemyStatusRequest": { + "apikey": { + "description": "Defaults to `ANON_API_KEY`. See also `.is_api_key_required()`", + "types": [ + "str", + "None" + ] + }, + "id_": { + "description": "The UUID for this job. Use this to post the results in the future.", + "types": [ + "horde_sdk.ai_horde_api.fields.GenerationID" + ] + }, + "accept": { + "description": "The 'accept' header field.", + "types": [ + "horde_sdk.generic_api.metadata.GenericAcceptTypes" + ] + }, + "client_agent": { + "description": "The requesting client's agent. You should set this to reflect the name, version and contact information\nfor your client.", + "types": [ + "str" + ] + } + }, + "AllCollectionsRequest": { + "accept": { + "description": "The 'accept' header field.", + "types": [ + "horde_sdk.generic_api.metadata.GenericAcceptTypes" + ] + }, + "client_agent": { + "description": "The requesting client's agent. You should set this to reflect the name, version and contact information\nfor your client.", + "types": [ + "str" + ] + }, + "sort": { + "description": "The sort order for the collections.", + "types": [ + "Literal" + ] + }, + "page": { + "description": "The page number for the collections. Each page has 25 styles.", + "types": [ + "int" + ] + }, + "type_": { + "description": "The type of collections to retrieve.", + "types": [ + "Literal" + ] + } + }, + "AllIPTimeoutsRequest": { + "apikey": { + "description": "Defaults to `ANON_API_KEY`. See also `.is_api_key_required()`", + "types": [ + "str", + "None" + ] + }, + "accept": { + "description": "The 'accept' header field.", + "types": [ + "horde_sdk.generic_api.metadata.GenericAcceptTypes" + ] + }, + "client_agent": { + "description": "The requesting client's agent. You should set this to reflect the name, version and contact information\nfor your client.", + "types": [ + "str" + ] + } + }, + "AllStylesImageRequest": { + "accept": { + "description": "The 'accept' header field.", + "types": [ + "horde_sdk.generic_api.metadata.GenericAcceptTypes" + ] + }, + "client_agent": { + "description": "The requesting client's agent. You should set this to reflect the name, version and contact information\nfor your client.", + "types": [ + "str" + ] + }, + "sort": { + "description": "The sort order of the styles.", + "types": [ + "Literal" + ] + }, + "page": { + "description": "The page of styles to retrieve. Each page has 25 styles.", + "types": [ + "int" + ] + }, + "tag": { + "description": "If specified, return only styles with this tag.", + "types": [ + "str", + "None" + ] + }, + "model": { + "description": "If specified, return only styles which use this model.", + "types": [ + "str", + "None" + ] + } + }, + "AllStylesTextRequest": { + "accept": { + "description": "The 'accept' header field.", + "types": [ + "horde_sdk.generic_api.metadata.GenericAcceptTypes" + ] + }, + "client_agent": { + "description": "The requesting client's agent. You should set this to reflect the name, version and contact information\nfor your client.", + "types": [ + "str" + ] + }, + "sort": { + "description": "The sort order of the styles.", + "types": [ + "Literal" + ] + }, + "page": { + "description": "The page of styles to retrieve. Each page has 25 styles.", + "types": [ + "int" + ] + }, + "tag": { + "description": "If specified, return only styles with this tag.", + "types": [ + "str", + "None" + ] + }, + "model": { + "description": "If specified, return only styles which use this model.", + "types": [ + "str", + "None" + ] + } + }, + "AllTeamDetailsRequest": { + "accept": { + "description": "The 'accept' header field.", + "types": [ + "horde_sdk.generic_api.metadata.GenericAcceptTypes" + ] + }, + "client_agent": { + "description": "The requesting client's agent. You should set this to reflect the name, version and contact information\nfor your client.", + "types": [ + "str" + ] + } + }, + "AllWorkerMessagesRequest": { + "apikey": { + "description": "Defaults to `ANON_API_KEY`. See also `.is_api_key_required()`", + "types": [ + "str", + "None" + ] + }, + "accept": { + "description": "The 'accept' header field.", + "types": [ + "horde_sdk.generic_api.metadata.GenericAcceptTypes" + ] + }, + "client_agent": { + "description": "The requesting client's agent. You should set this to reflect the name, version and contact information\nfor your client.", + "types": [ + "str" + ] + }, + "user_id": { + "description": "The ID of the user to retrieve messages for. If not specified, all messages will be retrieved.", + "types": [ + "str", + "None" + ] + }, + "worker_id": { + "description": "The ID of the worker to retrieve messages for. If not specified, all messages will be retrieved.", + "types": [ + "str", + "None" + ] + }, + "validity": { + "description": "The validity of the messages to retrieve.", + "types": [ + "Literal" + ] + }, + "page": { + "description": "The page of messages to retrieve. Each page has 50 messages.", + "types": [ + "int" + ] + } + }, + "AllWorkersDetailsRequest": { + "apikey": { + "description": "Defaults to `ANON_API_KEY`. See also `.is_api_key_required()`", + "types": [ + "str", + "None" + ] + }, + "accept": { + "description": "The 'accept' header field.", + "types": [ + "horde_sdk.generic_api.metadata.GenericAcceptTypes" + ] + }, + "client_agent": { + "description": "The requesting client's agent. You should set this to reflect the name, version and contact information\nfor your client.", + "types": [ + "str" + ] + }, + "type_": { + "description": "Filter workers by type. Default is 'all' which returns all workers.", + "types": [ + "horde_sdk.ai_horde_api.consts.AI_HORDE_WORKER_TYPES" + ] + }, + "name": { + "description": "Returns a worker matching the exact name provided. Case insensitive.", + "types": [ + "str", + "None" + ] + } + }, + "BlockIPAddressRequest": { + "apikey": { + "description": "Defaults to `ANON_API_KEY`. See also `.is_api_key_required()`", + "types": [ + "str", + "None" + ] + }, + "accept": { + "description": "The 'accept' header field.", + "types": [ + "horde_sdk.generic_api.metadata.GenericAcceptTypes" + ] + }, + "client_agent": { + "description": "The requesting client's agent. You should set this to reflect the name, version and contact information\nfor your client.", + "types": [ + "str" + ] + }, + "ipaddr": { + "description": "The IP address of the user to block.", + "types": [ + "str" + ] + }, + "hours": { + "description": "The number of hours to block the IP address for.", + "types": [ + "int" + ] + } + }, + "BlockWorkerIPAddressRequest": { + "apikey": { + "description": "Defaults to `ANON_API_KEY`. See also `.is_api_key_required()`", + "types": [ + "str", + "None" + ] + }, + "accept": { + "description": "The 'accept' header field.", + "types": [ + "horde_sdk.generic_api.metadata.GenericAcceptTypes" + ] + }, + "client_agent": { + "description": "The requesting client's agent. You should set this to reflect the name, version and contact information\nfor your client.", + "types": [ + "str" + ] + }, + "worker_id": { + "description": "The ID of the worker to block.", + "types": [ + "str" + ] + }, + "days": { + "description": "The number of days to block the worker IP address for.", + "types": [ + "int" + ] + } + }, + "CollectionByIDRequest": { + "accept": { + "description": "The 'accept' header field.", + "types": [ + "horde_sdk.generic_api.metadata.GenericAcceptTypes" + ] + }, + "client_agent": { + "description": "The requesting client's agent. You should set this to reflect the name, version and contact information\nfor your client.", + "types": [ + "str" + ] + }, + "collection_id": { + "description": "The ID of the collection.", + "types": [ + "str" + ] + } + }, + "CollectionByNameRequest": { + "accept": { + "description": "The 'accept' header field.", + "types": [ + "horde_sdk.generic_api.metadata.GenericAcceptTypes" + ] + }, + "client_agent": { + "description": "The requesting client's agent. You should set this to reflect the name, version and contact information\nfor your client.", + "types": [ + "str" + ] + }, + "collection_name": { + "description": "The name of the collection.", + "types": [ + "str" + ] + } + }, + "CreateCollectionRequest": { + "apikey": { + "description": "Defaults to `ANON_API_KEY`. See also `.is_api_key_required()`", + "types": [ + "str", + "None" + ] + }, + "accept": { + "description": "The 'accept' header field.", + "types": [ + "horde_sdk.generic_api.metadata.GenericAcceptTypes" + ] + }, + "client_agent": { + "description": "The requesting client's agent. You should set this to reflect the name, version and contact information\nfor your client.", + "types": [ + "str" + ] + }, + "name": { + "description": "The name for the collection. Case-sensitive and unique per user.", + "types": [ + "str" + ] + }, + "info": { + "description": "Extra information about this collection.", + "types": [ + "str", + "None" + ] + }, + "public": { + "description": "When true this collection will be listed among all collections publicly.When false, information about this\ncollection can only be seen by people who know its ID or name.", + "types": [ + "bool" + ] + }, + "styles": { + "description": "The styles to use in this collection.", + "types": [ + "list[str]" + ] + } + }, + "CreateStyleImageRequest": { + "apikey": { + "description": "Defaults to `ANON_API_KEY`. See also `.is_api_key_required()`", + "types": [ + "str", + "None" + ] + }, + "accept": { + "description": "The 'accept' header field.", + "types": [ + "horde_sdk.generic_api.metadata.GenericAcceptTypes" + ] + }, + "client_agent": { + "description": "The requesting client's agent. You should set this to reflect the name, version and contact information\nfor your client.", + "types": [ + "str" + ] + }, + "name": { + "description": "The name of the style.", + "types": [ + "str" + ] + }, + "info": { + "description": "Extra information or comments about this style provided by its creator.", + "types": [ + "str", + "None" + ] + }, + "prompt": { + "description": "The prompt template which will be sent to generate an image.\n\nThe user's prompt will be injected into this. This argument MUST include a '{p}' which specifies the part where the user's prompt will be injected and an '{np}' where the user's negative prompt will be injected (if any)", + "types": [ + "str" + ] + }, + "public": { + "description": "When true this style will be listed among all styles publicly.\n\nWhen false, information about this style can only be seen by people who know its ID or name.", + "types": [ + "bool" + ] + }, + "nsfw": { + "description": "When true, it signified this style is expected to generate NSFW images primarily.", + "types": [ + "bool" + ] + }, + "tags": { + "description": "Tags associated with this style.", + "types": [ + "list[str]", + "None" + ] + }, + "models": { + "description": "The models which this style will attempt to use.", + "types": [ + "list[str]", + "None" + ] + }, + "params": { + "description": "The parameters to use for all generations using this style, if not set by the user.", + "types": [ + "horde_sdk.ai_horde_api.apimodels.styles.ModelStyleInputParamsStable" + ] + }, + "sharedkey": { + "description": "The UUID of a shared key which will be used to fulfil this style when active.", + "types": [ + "str", + "None" + ] + } + }, + "CreateStyleTextRequest": { + "apikey": { + "description": "Defaults to `ANON_API_KEY`. See also `.is_api_key_required()`", + "types": [ + "str", + "None" + ] + }, + "name": { + "description": "The name of the style.", + "types": [ + "str" + ] + }, + "info": { + "description": "Extra information or comments about this style provided by its creator.", + "types": [ + "str", + "None" + ] + }, + "prompt": { + "description": "The prompt template which will be sent to generate an image.\n\nThe user's prompt will be injected into this. This argument MUST include a '{p}' which specifies the part where the user's prompt will be injected and an '{np}' where the user's negative prompt will be injected (if any)", + "types": [ + "str" + ] + }, + "public": { + "description": "When true this style will be listed among all styles publicly.\n\nWhen false, information about this style can only be seen by people who know its ID or name.", + "types": [ + "bool" + ] + }, + "nsfw": { + "description": "When true, it signified this style is expected to generate NSFW images primarily.", + "types": [ + "bool" + ] + }, + "tags": { + "description": "Tags associated with this style.", + "types": [ + "list[str]", + "None" + ] + }, + "models": { + "description": "The models which this style will attempt to use.", + "types": [ + "list[str]", + "None" + ] + }, + "accept": { + "description": "The 'accept' header field.", + "types": [ + "horde_sdk.generic_api.metadata.GenericAcceptTypes" + ] + }, + "client_agent": { + "description": "The requesting client's agent. You should set this to reflect the name, version and contact information\nfor your client.", + "types": [ + "str" + ] + }, + "params": { + "description": "The parameters to use for all generations using this style, if not set by the user.", + "types": [ + "horde_sdk.ai_horde_api.apimodels.styles.ModelStyleInputParamsKobold" + ] + } + }, + "CreateTeamRequest": { + "name": { + "description": "The name of the team.", + "types": [ + "str" + ] + }, + "info": { + "description": "Extra information or comments about this team provided by its owner.", + "types": [ + "str", + "None" + ] + }, + "apikey": { + "description": "Defaults to `ANON_API_KEY`. See also `.is_api_key_required()`", + "types": [ + "str", + "None" + ] + }, + "accept": { + "description": "The 'accept' header field.", + "types": [ + "horde_sdk.generic_api.metadata.GenericAcceptTypes" + ] + }, + "client_agent": { + "description": "The requesting client's agent. You should set this to reflect the name, version and contact information\nfor your client.", + "types": [ + "str" + ] + } + }, + "CreateWorkerMessageRequest": { + "worker_id": { + "description": "The ID of the worker that the message is for.", + "types": [ + "str", + "None" + ] + }, + "message": { + "description": "The message.", + "types": [ + "str" + ] + }, + "origin": { + "description": "The origin of the message.", + "types": [ + "str", + "None" + ] + }, + "apikey": { + "description": "Defaults to `ANON_API_KEY`. See also `.is_api_key_required()`", + "types": [ + "str", + "None" + ] + }, + "accept": { + "description": "The 'accept' header field.", + "types": [ + "horde_sdk.generic_api.metadata.GenericAcceptTypes" + ] + }, + "client_agent": { + "description": "The requesting client's agent. You should set this to reflect the name, version and contact information\nfor your client.", + "types": [ + "str" + ] + }, + "expiry": { + "description": "The number of hours after which this message expires.", + "types": [ + "int" + ] + } + }, + "DeleteCollectionRequest": { + "apikey": { + "description": "Defaults to `ANON_API_KEY`. See also `.is_api_key_required()`", + "types": [ + "str", + "None" + ] + }, + "accept": { + "description": "The 'accept' header field.", + "types": [ + "horde_sdk.generic_api.metadata.GenericAcceptTypes" + ] + }, + "client_agent": { + "description": "The requesting client's agent. You should set this to reflect the name, version and contact information\nfor your client.", + "types": [ + "str" + ] + }, + "collection_id": { + "description": "The ID of the collection.", + "types": [ + "str" + ] + } + }, + "DeleteFilterRequest": { + "apikey": { + "description": "Defaults to `ANON_API_KEY`. See also `.is_api_key_required()`", + "types": [ + "str", + "None" + ] + }, + "accept": { + "description": "The 'accept' header field.", + "types": [ + "horde_sdk.generic_api.metadata.GenericAcceptTypes" + ] + }, + "client_agent": { + "description": "The requesting client's agent. You should set this to reflect the name, version and contact information\nfor your client.", + "types": [ + "str" + ] + }, + "filter_id": { + "description": "The ID of the filter to delete.", + "types": [ + "str" + ] + } + }, + "DeleteIPAddressRequest": { + "apikey": { + "description": "Defaults to `ANON_API_KEY`. See also `.is_api_key_required()`", + "types": [ + "str", + "None" + ] + }, + "accept": { + "description": "The 'accept' header field.", + "types": [ + "horde_sdk.generic_api.metadata.GenericAcceptTypes" + ] + }, + "client_agent": { + "description": "The requesting client's agent. You should set this to reflect the name, version and contact information\nfor your client.", + "types": [ + "str" + ] + }, + "ipaddr": { + "description": "The IP address of the user to unblock.", + "types": [ + "str" + ] + } + }, + "DeleteImageGenerateRequest": { + "id_": { + "description": "The UUID for this job. Use this to post the results in the future.", + "types": [ + "horde_sdk.ai_horde_api.fields.GenerationID" + ] + }, + "accept": { + "description": "The 'accept' header field.", + "types": [ + "horde_sdk.generic_api.metadata.GenericAcceptTypes" + ] + }, + "client_agent": { + "description": "The requesting client's agent. You should set this to reflect the name, version and contact information\nfor your client.", + "types": [ + "str" + ] + } + }, + "DeleteStyleImageRequest": { + "apikey": { + "description": "Defaults to `ANON_API_KEY`. See also `.is_api_key_required()`", + "types": [ + "str", + "None" + ] + }, + "accept": { + "description": "The 'accept' header field.", + "types": [ + "horde_sdk.generic_api.metadata.GenericAcceptTypes" + ] + }, + "client_agent": { + "description": "The requesting client's agent. You should set this to reflect the name, version and contact information\nfor your client.", + "types": [ + "str" + ] + }, + "style_id": { + "description": "The ID of the style to delete.", + "types": [ + "str" + ] + } + }, + "DeleteStyleTextRequest": { + "apikey": { + "description": "Defaults to `ANON_API_KEY`. See also `.is_api_key_required()`", + "types": [ + "str", + "None" + ] + }, + "accept": { + "description": "The 'accept' header field.", + "types": [ + "horde_sdk.generic_api.metadata.GenericAcceptTypes" + ] + }, + "client_agent": { + "description": "The requesting client's agent. You should set this to reflect the name, version and contact information\nfor your client.", + "types": [ + "str" + ] + }, + "style_id": { + "description": "The ID of the style to delete.", + "types": [ + "str" + ] + } + }, + "DeleteTeamRequest": { + "apikey": { + "description": "Defaults to `ANON_API_KEY`. See also `.is_api_key_required()`", + "types": [ + "str", + "None" + ] + }, + "accept": { + "description": "The 'accept' header field.", + "types": [ + "horde_sdk.generic_api.metadata.GenericAcceptTypes" + ] + }, + "client_agent": { + "description": "The requesting client's agent. You should set this to reflect the name, version and contact information\nfor your client.", + "types": [ + "str" + ] + }, + "team_id": { + "description": "The ID of the team to delete.", + "types": [ + "str" + ] + } + }, + "DeleteTextGenerateRequest": { + "id_": { + "description": "The UUID for this job. Use this to post the results in the future.", + "types": [ + "horde_sdk.ai_horde_api.fields.GenerationID" + ] + }, + "accept": { + "description": "The 'accept' header field.", + "types": [ + "horde_sdk.generic_api.metadata.GenericAcceptTypes" + ] + }, + "client_agent": { + "description": "The requesting client's agent. You should set this to reflect the name, version and contact information\nfor your client.", + "types": [ + "str" + ] + } + }, + "DeleteUserRequest": { + "apikey": { + "description": "Defaults to `ANON_API_KEY`. See also `.is_api_key_required()`", + "types": [ + "str", + "None" + ] + }, + "user_id": { + "description": "The user's ID, as a `str`, but only containing numeric values.", + "types": [ + "str" + ] + }, + "accept": { + "description": "The 'accept' header field.", + "types": [ + "horde_sdk.generic_api.metadata.GenericAcceptTypes" + ] + }, + "client_agent": { + "description": "The requesting client's agent. You should set this to reflect the name, version and contact information\nfor your client.", + "types": [ + "str" + ] + } + }, + "DeleteWorkerIPAddressRequest": { + "apikey": { + "description": "Defaults to `ANON_API_KEY`. See also `.is_api_key_required()`", + "types": [ + "str", + "None" + ] + }, + "accept": { + "description": "The 'accept' header field.", + "types": [ + "horde_sdk.generic_api.metadata.GenericAcceptTypes" + ] + }, + "client_agent": { + "description": "The requesting client's agent. You should set this to reflect the name, version and contact information\nfor your client.", + "types": [ + "str" + ] + }, + "worker_id": { + "description": "The ID of the worker to unblock.", + "types": [ + "str" + ] + } + }, + "DeleteWorkerMessageRequest": { + "apikey": { + "description": "Defaults to `ANON_API_KEY`. See also `.is_api_key_required()`", + "types": [ + "str", + "None" + ] + }, + "accept": { + "description": "The 'accept' header field.", + "types": [ + "horde_sdk.generic_api.metadata.GenericAcceptTypes" + ] + }, + "client_agent": { + "description": "The requesting client's agent. You should set this to reflect the name, version and contact information\nfor your client.", + "types": [ + "str" + ] + }, + "message_id": { + "description": "The ID of the message to delete.", + "types": [ + "str" + ] + } + }, + "DeleteWorkerRequest": { + "worker_id": { + "description": "The UUID of the worker in question for this request.", + "types": [ + "str", + "horde_sdk.ai_horde_api.fields.WorkerID" + ] + }, + "apikey": { + "description": "Defaults to `ANON_API_KEY`. See also `.is_api_key_required()`", + "types": [ + "str", + "None" + ] + }, + "accept": { + "description": "The 'accept' header field.", + "types": [ + "horde_sdk.generic_api.metadata.GenericAcceptTypes" + ] + }, + "client_agent": { + "description": "The requesting client's agent. You should set this to reflect the name, version and contact information\nfor your client.", + "types": [ + "str" + ] + } + }, + "FilterPromptSuspicionRequest": { + "apikey": { + "description": "Defaults to `ANON_API_KEY`. See also `.is_api_key_required()`", + "types": [ + "str", + "None" + ] + }, + "accept": { + "description": "The 'accept' header field.", + "types": [ + "horde_sdk.generic_api.metadata.GenericAcceptTypes" + ] + }, + "client_agent": { + "description": "The requesting client's agent. You should set this to reflect the name, version and contact information\nfor your client.", + "types": [ + "str" + ] + }, + "prompt": { + "description": "The prompt to check for suspicion.", + "types": [ + "str" + ] + }, + "filter_type": { + "description": "The type of filter to use. If not specified, all filters are used.", + "types": [ + "int" + ] + } + }, + "FilterRegexRequest": { + "apikey": { + "description": "Defaults to `ANON_API_KEY`. See also `.is_api_key_required()`", + "types": [ + "str", + "None" + ] + }, + "accept": { + "description": "The 'accept' header field.", + "types": [ + "horde_sdk.generic_api.metadata.GenericAcceptTypes" + ] + }, + "client_agent": { + "description": "The requesting client's agent. You should set this to reflect the name, version and contact information\nfor your client.", + "types": [ + "str" + ] + } + }, + "FiltersListRequest": { + "apikey": { + "description": "Defaults to `ANON_API_KEY`. See also `.is_api_key_required()`", + "types": [ + "str", + "None" + ] + }, + "accept": { + "description": "The 'accept' header field.", + "types": [ + "horde_sdk.generic_api.metadata.GenericAcceptTypes" + ] + }, + "client_agent": { + "description": "The requesting client's agent. You should set this to reflect the name, version and contact information\nfor your client.", + "types": [ + "str" + ] + }, + "filter_type": { + "description": "The type of filter to return. If not specified, all filters are returned.", + "types": [ + "int", + "None" + ] + }, + "contains": { + "description": "A string to search for in the filter description.", + "types": [ + "str", + "None" + ] + } + }, + "FindUserRequest": { + "apikey": { + "description": "Defaults to `ANON_API_KEY`. See also `.is_api_key_required()`", + "types": [ + "str", + "None" + ] + }, + "accept": { + "description": "The 'accept' header field.", + "types": [ + "horde_sdk.generic_api.metadata.GenericAcceptTypes" + ] + }, + "client_agent": { + "description": "The requesting client's agent. You should set this to reflect the name, version and contact information\nfor your client.", + "types": [ + "str" + ] + } + }, + "HordePerformanceRequest": { + "accept": { + "description": "The 'accept' header field.", + "types": [ + "horde_sdk.generic_api.metadata.GenericAcceptTypes" + ] + }, + "client_agent": { + "description": "The requesting client's agent. You should set this to reflect the name, version and contact information\nfor your client.", + "types": [ + "str" + ] + } + }, + "HordeStatusModelsAllRequest": { + "accept": { + "description": "The 'accept' header field.", + "types": [ + "horde_sdk.generic_api.metadata.GenericAcceptTypes" + ] + }, + "client_agent": { + "description": "The requesting client's agent. You should set this to reflect the name, version and contact information\nfor your client.", + "types": [ + "str" + ] + }, + "type_": { + "description": "The type of model to filter by.", + "types": [ + "horde_sdk.ai_horde_api.consts.MODEL_TYPE" + ] + }, + "min_count": { + "description": "Filter only models that have at least this amount of threads serving.", + "types": [ + "int", + "None" + ] + }, + "max_count": { + "description": "Filter only models that have at most this amount of threads serving.", + "types": [ + "int", + "None" + ] + }, + "model_state": { + "description": "If 'known', only show stats for known models in the model reference. If 'custom' only show stats for custom\nmodels. If 'all' shows stats for all models.", + "types": [ + "horde_sdk.ai_horde_api.consts.MODEL_STATE" + ] + } + }, + "HordeStatusModelsSingleRequest": { + "accept": { + "description": "The 'accept' header field.", + "types": [ + "horde_sdk.generic_api.metadata.GenericAcceptTypes" + ] + }, + "client_agent": { + "description": "The requesting client's agent. You should set this to reflect the name, version and contact information\nfor your client.", + "types": [ + "str" + ] + }, + "model_name": { + "description": "The name of the model to request.", + "types": [ + "str" + ] + } + }, + "ImageGenerateAsyncRequest": { + "trusted_workers": { + "description": "When true, only trusted workers will serve this request. When False, Evaluating workers will also be used\nwhich can increase speed but adds more risk!", + "types": [ + "bool" + ] + }, + "slow_workers": { + "description": "When True, allows slower workers to pick up this request. Disabling this incurs an extra kudos cost.", + "types": [ + "bool" + ] + }, + "extra_slow_workers": { + "description": "When True, allows extra slow workers to pick up this request.", + "types": [ + "bool" + ] + }, + "workers": { + "description": "A list of worker IDs to use for this request. If empty, any worker can pick up the request. Using this incurs\nand extra kudos cost.", + "types": [ + "list[str]" + ] + }, + "worker_blacklist": { + "description": "If true, the worker list will be treated as a blacklist instead of a whitelist.", + "types": [ + "bool" + ] + }, + "models": { + "description": "The generative models to use for this request.", + "types": [ + "list[str]" + ] + }, + "validated_backends": { + "description": "When true, only inference backends that are validated by the AI Horde devs will serve this request. When False,\nnon-validated backends will also be used which can increase speed but you may end up with unexpected results.", + "types": [ + "bool", + "None" + ] + }, + "style": { + "description": "The style ID to use for the generation.", + "types": [ + "str", + "None" + ] + }, + "dry_run": { + "description": "If true, the request will not be processed, but will return a response with the estimated kudos cost.", + "types": [ + "bool" + ] + }, + "webhook": { + "description": "Provide a URL where the AI Horde will send a POST call after each delivered generation.\nThe request will include the details of the job as well as the request ID.", + "types": [ + "str", + "None" + ] + }, + "allow_downgrade": { + "description": "If true, the request can be modified to lesser parameters if the original request is too expensive.", + "types": [ + "bool" + ] + }, + "proxied_account": { + "description": "The account this request is being proxied for. This requires the sending API to be a service account.", + "types": [ + "str", + "None" + ] + }, + "disable_batching": { + "description": "When true, This request will not use batching. This will allow you to retrieve accurate seeds.\nFeature is restricted to Trusted users and Patreons.", + "types": [ + "bool", + "None" + ] + }, + "apikey": { + "description": "Defaults to `ANON_API_KEY`. See also `.is_api_key_required()`", + "types": [ + "str", + "None" + ] + }, + "accept": { + "description": "The 'accept' header field.", + "types": [ + "horde_sdk.generic_api.metadata.GenericAcceptTypes" + ] + }, + "client_agent": { + "description": "The requesting client's agent. You should set this to reflect the name, version and contact information\nfor your client.", + "types": [ + "str" + ] + }, + "prompt": { + "description": "The prompt which will be sent to Stable Diffusion to generate an image.", + "types": [ + "str" + ] + }, + "params": { + "description": "The parameters for the image generation.", + "types": [ + "horde_sdk.ai_horde_api.apimodels.generate.async_.ImageGenerationInputPayload", + "None" + ] + }, + "nsfw": { + "description": "Set to true if this request is NSFW. This will skip workers which censor images.", + "types": [ + "bool", + "None" + ] + }, + "censor_nsfw": { + "description": "If the request is SFW, and the worker accidentally generates NSFW, it will send back a censored image.", + "types": [ + "bool" + ] + }, + "r2": { + "description": "If True, the image will be sent via cloudflare r2 download link.", + "types": [ + "bool" + ] + }, + "shared": { + "description": "If True, The image will be shared with LAION for improving their dataset. This will also reduce your\nkudos consumption by 2. For anonymous users, this is always True.", + "types": [ + "bool" + ] + }, + "replacement_filter": { + "description": "If enabled, suspicious prompts are sanitized through a string replacement filter instead.", + "types": [ + "bool" + ] + }, + "source_image": { + "description": "The public URL of the source image or a base64 string to use.", + "types": [ + "str", + "None" + ] + }, + "source_processing": { + "description": "If source_image is provided, specifies how to process it.", + "types": [ + "horde_sdk.generation_parameters.image.consts.KNOWN_IMAGE_SOURCE_PROCESSING" + ] + }, + "source_mask": { + "description": "If source_processing is set to 'inpainting' or 'outpainting', this parameter can be optionally provided as the\nBase64-encoded webp mask of the areas to inpaint. If this arg is not passed, the inpainting/outpainting mask has to\nbe embedded as alpha channel.", + "types": [ + "str", + "None" + ] + }, + "extra_source_images": { + "description": "Additional uploaded images which can be used for further operations.", + "types": [ + "list[horde_sdk.ai_horde_api.apimodels.base.ExtraSourceImageEntry]", + "None" + ] + } + }, + "ImageGenerateCheckRequest": { + "id_": { + "description": "The UUID for this job. Use this to post the results in the future.", + "types": [ + "horde_sdk.ai_horde_api.fields.GenerationID" + ] + }, + "accept": { + "description": "The 'accept' header field.", + "types": [ + "horde_sdk.generic_api.metadata.GenericAcceptTypes" + ] + }, + "client_agent": { + "description": "The requesting client's agent. You should set this to reflect the name, version and contact information\nfor your client.", + "types": [ + "str" + ] + } + }, + "ImageGenerateJobPopRequest": { + "amount": { + "description": "The number of jobs to pop at the same time.", + "types": [ + "int", + "None" + ] + }, + "bridge_agent": { + "description": "The worker name, version and website.", + "types": [ + "str", + "None" + ] + }, + "models": { + "description": "The models this worker can generate.", + "types": [ + "list[str]" + ] + }, + "name": { + "description": "The Name of the Worker.", + "types": [ + "str" + ] + }, + "nsfw": { + "description": "Whether this worker can generate NSFW requests or not.", + "types": [ + "bool", + "None" + ] + }, + "priority_usernames": { + "description": "The usernames that should be prioritized by this worker.", + "types": [ + "list[str]", + "None" + ] + }, + "require_upfront_kudos": { + "description": "If True, this worker will only pick up requests where the owner has the required kudos to consume already available.", + "types": [ + "bool", + "None" + ] + }, + "threads": { + "description": "How many threads this worker is running. This is used to accurately the current power available in the horde.", + "types": [ + "int", + "None" + ] + }, + "apikey": { + "description": "Defaults to `ANON_API_KEY`. See also `.is_api_key_required()`", + "types": [ + "str", + "None" + ] + }, + "accept": { + "description": "The 'accept' header field.", + "types": [ + "horde_sdk.generic_api.metadata.GenericAcceptTypes" + ] + }, + "client_agent": { + "description": "The requesting client's agent. You should set this to reflect the name, version and contact information\nfor your client.", + "types": [ + "str" + ] + }, + "bridge_version": { + "description": "The version of the bridge this worker is running.", + "types": [ + "int", + "None" + ] + }, + "max_pixels": { + "description": "The maximum number of pixels this worker can generate.", + "types": [ + "int" + ] + }, + "blacklist": { + "description": "The list of words this worker will not accept in a prompt.", + "types": [ + "list[str]" + ] + }, + "allow_img2img": { + "description": "Whether this worker can generate img2img.", + "types": [ + "bool" + ] + }, + "allow_painting": { + "description": "Whether this worker can generate inpainting/outpainting.", + "types": [ + "bool" + ] + }, + "allow_unsafe_ipaddr": { + "description": "Whether this worker will generate from unsafe/VPN IP addresses.", + "types": [ + "bool" + ] + }, + "allow_post_processing": { + "description": "Whether this worker can do post-processing.", + "types": [ + "bool" + ] + }, + "allow_controlnet": { + "description": "Whether this worker can generate using controlnets.", + "types": [ + "bool" + ] + }, + "allow_sdxl_controlnet": { + "description": "Whether this worker can generate using SDXL controlnets.", + "types": [ + "bool" + ] + }, + "allow_lora": { + "description": "Whether this worker can generate using Loras.", + "types": [ + "bool" + ] + }, + "extra_slow_worker": { + "description": "Marks the worker as extra slow.", + "types": [ + "bool" + ] + }, + "limit_max_steps": { + "description": "Prevents the worker picking up jobs with more steps than the model average.", + "types": [ + "bool" + ] + } + }, + "ImageGenerateStatusRequest": { + "id_": { + "description": "The UUID for this job. Use this to post the results in the future.", + "types": [ + "horde_sdk.ai_horde_api.fields.GenerationID" + ] + }, + "accept": { + "description": "The 'accept' header field.", + "types": [ + "horde_sdk.generic_api.metadata.GenericAcceptTypes" + ] + }, + "client_agent": { + "description": "The requesting client's agent. You should set this to reflect the name, version and contact information\nfor your client.", + "types": [ + "str" + ] + } + }, + "ImageGenerationJobSubmitRequest": { + "apikey": { + "description": "Defaults to `ANON_API_KEY`. See also `.is_api_key_required()`", + "types": [ + "str", + "None" + ] + }, + "id_": { + "description": "The UUID for this job. Use this to post the results in the future.", + "types": [ + "horde_sdk.ai_horde_api.fields.GenerationID" + ] + }, + "accept": { + "description": "The 'accept' header field.", + "types": [ + "horde_sdk.generic_api.metadata.GenericAcceptTypes" + ] + }, + "client_agent": { + "description": "The requesting client's agent. You should set this to reflect the name, version and contact information\nfor your client.", + "types": [ + "str" + ] + }, + "generation": { + "description": "R2 result was uploaded to R2, else the string of the result as base64.", + "types": [ + "Union" + ] + }, + "state": { + "description": "The state of this generation.", + "types": [ + "horde_sdk.ai_horde_api.consts.GENERATION_STATE" + ] + }, + "seed": { + "description": "The seed for this generation.", + "types": [ + "int" + ] + }, + "censored": { + "description": "If True, this resulting image has been censored.", + "types": [ + "bool" + ] + }, + "gen_metadata": { + "description": "Extra metadata about faulted or defaulted components of the generation", + "types": [ + "list[horde_sdk.ai_horde_api.apimodels.base.GenMetadataEntry]", + "None" + ] + } + }, + "ImageStatsModelsRequest": { + "accept": { + "description": "The 'accept' header field.", + "types": [ + "horde_sdk.generic_api.metadata.GenericAcceptTypes" + ] + }, + "client_agent": { + "description": "The requesting client's agent. You should set this to reflect the name, version and contact information\nfor your client.", + "types": [ + "str" + ] + }, + "api_model_state": { + "description": "The state of the models to get stats for. Known models are models that are known to the system.", + "types": [ + "horde_sdk.ai_horde_api.consts.MODEL_STATE" + ] + } + }, + "ImageStatsModelsTotalRequest": { + "accept": { + "description": "The 'accept' header field.", + "types": [ + "horde_sdk.generic_api.metadata.GenericAcceptTypes" + ] + }, + "client_agent": { + "description": "The requesting client's agent. You should set this to reflect the name, version and contact information\nfor your client.", + "types": [ + "str" + ] + } + }, + "KudosAwardRequest": { + "apikey": { + "description": "Defaults to `ANON_API_KEY`. See also `.is_api_key_required()`", + "types": [ + "str", + "None" + ] + }, + "accept": { + "description": "The 'accept' header field.", + "types": [ + "horde_sdk.generic_api.metadata.GenericAcceptTypes" + ] + }, + "client_agent": { + "description": "The requesting client's agent. You should set this to reflect the name, version and contact information\nfor your client.", + "types": [ + "str" + ] + }, + "username": { + "description": "The username of the user to award Kudos to.", + "types": [ + "str" + ] + }, + "amount": { + "description": "The amount of Kudos to award.", + "types": [ + "float" + ] + } + }, + "KudosTransferRequest": { + "apikey": { + "description": "Defaults to `ANON_API_KEY`. See also `.is_api_key_required()`", + "types": [ + "str", + "None" + ] + }, + "accept": { + "description": "The 'accept' header field.", + "types": [ + "horde_sdk.generic_api.metadata.GenericAcceptTypes" + ] + }, + "client_agent": { + "description": "The requesting client's agent. You should set this to reflect the name, version and contact information\nfor your client.", + "types": [ + "str" + ] + }, + "username": { + "description": "The username of the user to transfer Kudos to, including the '#'. For example, 'db0#1'.", + "types": [ + "str" + ] + }, + "amount": { + "description": "The amount of Kudos to transfer.", + "types": [ + "float" + ] + } + }, + "ListUsersDetailsRequest": { + "accept": { + "description": "The 'accept' header field.", + "types": [ + "horde_sdk.generic_api.metadata.GenericAcceptTypes" + ] + }, + "client_agent": { + "description": "The requesting client's agent. You should set this to reflect the name, version and contact information\nfor your client.", + "types": [ + "str" + ] + }, + "page": { + "description": "The page number to request. There are up to 25 users per page.", + "types": [ + "int" + ] + }, + "sort": { + "description": "The field to sort the users by. The default is by kudos.", + "types": [ + "str" + ] + } + }, + "ModifyStyleImageRequest": { + "apikey": { + "description": "Defaults to `ANON_API_KEY`. See also `.is_api_key_required()`", + "types": [ + "str", + "None" + ] + }, + "accept": { + "description": "The 'accept' header field.", + "types": [ + "horde_sdk.generic_api.metadata.GenericAcceptTypes" + ] + }, + "client_agent": { + "description": "The requesting client's agent. You should set this to reflect the name, version and contact information\nfor your client.", + "types": [ + "str" + ] + }, + "name": { + "description": "The name of the style.", + "types": [ + "str" + ] + }, + "info": { + "description": "Extra information or comments about this style provided by its creator.", + "types": [ + "str", + "None" + ] + }, + "prompt": { + "description": "The prompt template which will be sent to generate an image.\n\nThe user's prompt will be injected into this. This argument MUST include a '{p}' which specifies the part where the user's prompt will be injected and an '{np}' where the user's negative prompt will be injected (if any)", + "types": [ + "str" + ] + }, + "public": { + "description": "When true this style will be listed among all styles publicly.\n\nWhen false, information about this style can only be seen by people who know its ID or name.", + "types": [ + "bool" + ] + }, + "nsfw": { + "description": "When true, it signified this style is expected to generate NSFW images primarily.", + "types": [ + "bool" + ] + }, + "tags": { + "description": "Tags associated with this style.", + "types": [ + "list[str]", + "None" + ] + }, + "models": { + "description": "The models which this style will attempt to use.", + "types": [ + "list[str]", + "None" + ] + }, + "style_id": { + "description": "The ID of the style to modify.", + "types": [ + "str" + ] + }, + "params": { + "description": "The parameters to use for all generations using this style, if not set by the user.", + "types": [ + "horde_sdk.ai_horde_api.apimodels.styles.ModelStyleInputParamsStable" + ] + }, + "sharedkey": { + "description": "The UUID of a shared key which will be used to fulfil this style when active.", + "types": [ + "str", + "None" + ] + } + }, + "ModifyStyleTextRequest": { + "apikey": { + "description": "Defaults to `ANON_API_KEY`. See also `.is_api_key_required()`", + "types": [ + "str", + "None" + ] + }, + "name": { + "description": "The name of the style.", + "types": [ + "str" + ] + }, + "info": { + "description": "Extra information or comments about this style provided by its creator.", + "types": [ + "str", + "None" + ] + }, + "prompt": { + "description": "The prompt template which will be sent to generate an image.\n\nThe user's prompt will be injected into this. This argument MUST include a '{p}' which specifies the part where the user's prompt will be injected and an '{np}' where the user's negative prompt will be injected (if any)", + "types": [ + "str" + ] + }, + "public": { + "description": "When true this style will be listed among all styles publicly.\n\nWhen false, information about this style can only be seen by people who know its ID or name.", + "types": [ + "bool" + ] + }, + "nsfw": { + "description": "When true, it signified this style is expected to generate NSFW images primarily.", + "types": [ + "bool" + ] + }, + "tags": { + "description": "Tags associated with this style.", + "types": [ + "list[str]", + "None" + ] + }, + "models": { + "description": "The models which this style will attempt to use.", + "types": [ + "list[str]", + "None" + ] + }, + "accept": { + "description": "The 'accept' header field.", + "types": [ + "horde_sdk.generic_api.metadata.GenericAcceptTypes" + ] + }, + "client_agent": { + "description": "The requesting client's agent. You should set this to reflect the name, version and contact information\nfor your client.", + "types": [ + "str" + ] + }, + "style_id": { + "description": "The ID of the style to modify.", + "types": [ + "str" + ] + }, + "params": { + "description": "The parameters to use for all generations using this style, if not set by the user.", + "types": [ + "horde_sdk.ai_horde_api.apimodels.styles.ModelStyleInputParamsKobold" + ] + } + }, + "ModifyTeamRequest": { + "name": { + "description": "The name of the team.", + "types": [ + "str" + ] + }, + "info": { + "description": "Extra information or comments about this team provided by its owner.", + "types": [ + "str", + "None" + ] + }, + "apikey": { + "description": "Defaults to `ANON_API_KEY`. See also `.is_api_key_required()`", + "types": [ + "str", + "None" + ] + }, + "accept": { + "description": "The 'accept' header field.", + "types": [ + "horde_sdk.generic_api.metadata.GenericAcceptTypes" + ] + }, + "client_agent": { + "description": "The requesting client's agent. You should set this to reflect the name, version and contact information\nfor your client.", + "types": [ + "str" + ] + }, + "team_id": { + "description": "The ID of the team to modify.", + "types": [ + "str" + ] + } + }, + "ModifyUserRequest": { + "apikey": { + "description": "Defaults to `ANON_API_KEY`. See also `.is_api_key_required()`", + "types": [ + "str", + "None" + ] + }, + "admin_comment": { + "description": "Add further information about this user for the other admins.", + "types": [ + "str", + "None" + ] + }, + "concurrency": { + "description": "The amount of concurrent request this user can have.", + "types": [ + "int", + "None" + ] + }, + "contact": { + "description": "Contact details for the horde admins to reach the user in case of emergency. This is only visible to horde\nmoderators.", + "types": [ + "str", + "None" + ] + }, + "customizer": { + "description": "When set to true, the user will be able to serve custom Stable Diffusion models which do not exist in the\nOfficial AI Horde Model Reference.", + "types": [ + "bool", + "None" + ] + }, + "education": { + "description": "When set to true, the user is considered an education account and some options become more restrictive.", + "types": [ + "bool", + "None" + ] + }, + "filtered": { + "description": "When set to true, the replacement filter will always be applied against this user", + "types": [ + "bool", + "None" + ] + }, + "flagged": { + "description": "When set to true, the user cannot transfer kudos and all their workers are put into permanent maintenance.", + "types": [ + "bool", + "None" + ] + }, + "moderator": { + "description": "Set to true to make this user a horde moderator.", + "types": [ + "bool", + "None" + ] + }, + "monthly_kudos": { + "description": "When specified, will start assigning the user monthly kudos, starting now!", + "types": [ + "int", + "None" + ] + }, + "public_workers": { + "description": "Set to true to make this user display their worker IDs.", + "types": [ + "bool", + "None" + ] + }, + "service": { + "description": "When set to true, the user is considered a service account proxying the requests for other users.", + "types": [ + "bool", + "None" + ] + }, + "special": { + "description": "When set to true, The user can send special payloads.", + "types": [ + "bool", + "None" + ] + }, + "trusted": { + "description": "When set to true,the user and their servers will not be affected by suspicion.", + "types": [ + "bool", + "None" + ] + }, + "usage_multiplier": { + "description": "The amount by which to multiply the users kudos consumption.", + "types": [ + "float", + "None" + ] + }, + "username": { + "description": "When specified, will change the username. No profanity allowed!", + "types": [ + "str", + "None" + ] + }, + "vpn": { + "description": "When set to true, the user will be able to onboard workers behind a VPN. This should be used as a temporary\nsolution until the user is trusted.", + "types": [ + "bool", + "None" + ] + }, + "worker_invited": { + "description": "Set to the amount of workers this user is allowed to join to the horde when in worker invite-only mode.", + "types": [ + "int", + "None" + ] + }, + "kudos": { + "description": "The amount of kudos to modify (can be negative).", + "types": [ + "float", + "None" + ] + }, + "reset_suspicion": { + "description": "Set the user's suspicion back to 0.", + "types": [ + "bool", + "None" + ] + }, + "undelete": { + "description": "When set to true, A user's who's marked to be deleted will become active again.", + "types": [ + "bool", + "None" + ] + }, + "user_id": { + "description": "The user's ID, as a `str`, but only containing numeric values.", + "types": [ + "str" + ] + }, + "accept": { + "description": "The 'accept' header field.", + "types": [ + "horde_sdk.generic_api.metadata.GenericAcceptTypes" + ] + }, + "client_agent": { + "description": "The requesting client's agent. You should set this to reflect the name, version and contact information\nfor your client.", + "types": [ + "str" + ] + } + }, + "ModifyWorkerRequest": { + "worker_id": { + "description": "The UUID of the worker in question for this request.", + "types": [ + "str", + "horde_sdk.ai_horde_api.fields.WorkerID" + ] + }, + "apikey": { + "description": "Defaults to `ANON_API_KEY`. See also `.is_api_key_required()`", + "types": [ + "str", + "None" + ] + }, + "accept": { + "description": "The 'accept' header field.", + "types": [ + "horde_sdk.generic_api.metadata.GenericAcceptTypes" + ] + }, + "client_agent": { + "description": "The requesting client's agent. You should set this to reflect the name, version and contact information\nfor your client.", + "types": [ + "str" + ] + }, + "info": { + "description": "You can optionally provide a server note which will be seen in the server details. No profanity allowed!", + "types": [ + "str", + "None" + ] + }, + "maintenance": { + "description": "Set to true to put this worker into maintenance.", + "types": [ + "bool", + "None" + ] + }, + "maintenance_msg": { + "description": "If maintenance is True, you can optionally provide a message to be used instead of the default maintenance\nmessage, so that the owner is informed.", + "types": [ + "str", + "None" + ] + }, + "name": { + "description": "When this is set, it will change the worker's name. No profanity allowed!", + "types": [ + "str", + "None" + ] + }, + "paused": { + "description": "(Mods only) Set to true to pause this worker.", + "types": [ + "bool", + "None" + ] + }, + "team": { + "description": "The team towards which this worker contributes kudos. It an empty string ('') is passed, it will leave the", + "types": [ + "str", + "None" + ] + } + }, + "NewsRequest": { + "accept": { + "description": "The 'accept' header field.", + "types": [ + "horde_sdk.generic_api.metadata.GenericAcceptTypes" + ] + }, + "client_agent": { + "description": "The requesting client's agent. You should set this to reflect the name, version and contact information\nfor your client.", + "types": [ + "str" + ] + } + }, + "PatchExistingFilter": { + "apikey": { + "description": "Defaults to `ANON_API_KEY`. See also `.is_api_key_required()`", + "types": [ + "str", + "None" + ] + }, + "accept": { + "description": "The 'accept' header field.", + "types": [ + "horde_sdk.generic_api.metadata.GenericAcceptTypes" + ] + }, + "client_agent": { + "description": "The requesting client's agent. You should set this to reflect the name, version and contact information\nfor your client.", + "types": [ + "str" + ] + }, + "description": { + "description": "The description of the filter.", + "types": [ + "str", + "None" + ] + }, + "regex": { + "description": "The regex for this filter.", + "types": [ + "str", + "None" + ] + }, + "replacement": { + "description": "The replacement string for this regex.", + "types": [ + "str", + "None" + ] + }, + "filter_type": { + "description": "The type of filter to add.", + "types": [ + "int", + "None" + ] + }, + "filter_id": { + "description": "The ID of the filter to update.", + "types": [ + "str" + ] + } + }, + "PutNewFilterRequest": { + "apikey": { + "description": "Defaults to `ANON_API_KEY`. See also `.is_api_key_required()`", + "types": [ + "str", + "None" + ] + }, + "accept": { + "description": "The 'accept' header field.", + "types": [ + "horde_sdk.generic_api.metadata.GenericAcceptTypes" + ] + }, + "client_agent": { + "description": "The requesting client's agent. You should set this to reflect the name, version and contact information\nfor your client.", + "types": [ + "str" + ] + }, + "description": { + "description": "The description of the filter.", + "types": [ + "str" + ] + }, + "filter_type": { + "description": "The type of filter to add.", + "types": [ + "int" + ] + }, + "regex": { + "description": "The regex for this filter.", + "types": [ + "str" + ] + }, + "replacement": { + "description": "The replacement string for this regex.", + "types": [ + "str", + "None" + ] + } + }, + "RateRequest": { + "accept": { + "description": "The 'accept' header field.", + "types": [ + "horde_sdk.generic_api.metadata.GenericAcceptTypes" + ] + }, + "client_agent": { + "description": "The requesting client's agent. You should set this to reflect the name, version and contact information\nfor your client.", + "types": [ + "str" + ] + }, + "best": { + "description": "The UUID of the best image in this generation batch (only used when 2+ images generated).\nIf 2+ aesthetic ratings are also provided, then they take precedence if they're not tied.", + "types": [ + "str", + "None" + ] + }, + "ratings": { + "description": "The aesthetic ratings for each image in the batch.", + "types": [ + "list[horde_sdk.ai_horde_api.apimodels.generate.rate.AestheticRating]" + ] + } + }, + "SharedKeyCreateRequest": { + "apikey": { + "description": "Defaults to `ANON_API_KEY`. See also `.is_api_key_required()`", + "types": [ + "str", + "None" + ] + }, + "kudos": { + "description": "The Kudos limit assigned to this key.", + "types": [ + "int" + ] + }, + "name": { + "description": "The Shared Key Name.", + "types": [ + "str" + ] + }, + "max_image_pixels": { + "description": "The maximum amount of image pixels this key can generate per job. -1 means unlimited.", + "types": [ + "int" + ] + }, + "max_image_steps": { + "description": "The maximum amount of image steps this key can use per job. -1 means unlimited.", + "types": [ + "int" + ] + }, + "max_text_tokens": { + "description": "The maximum amount of text tokens this key can generate per job. -1 means unlimited.", + "types": [ + "int" + ] + }, + "expiry": { + "description": "The number of days until this key expires. -1 means never expires.", + "types": [ + "int" + ] + }, + "accept": { + "description": "The 'accept' header field.", + "types": [ + "horde_sdk.generic_api.metadata.GenericAcceptTypes" + ] + }, + "client_agent": { + "description": "The requesting client's agent. You should set this to reflect the name, version and contact information\nfor your client.", + "types": [ + "str" + ] + } + }, + "SharedKeyDeleteRequest": { + "apikey": { + "description": "Defaults to `ANON_API_KEY`. See also `.is_api_key_required()`", + "types": [ + "str", + "None" + ] + }, + "sharedkey_id": { + "description": "The shared key ID to use for this request.", + "types": [ + "horde_sdk.ai_horde_api.fields.SharedKeyID" + ] + }, + "accept": { + "description": "The 'accept' header field.", + "types": [ + "horde_sdk.generic_api.metadata.GenericAcceptTypes" + ] + }, + "client_agent": { + "description": "The requesting client's agent. You should set this to reflect the name, version and contact information\nfor your client.", + "types": [ + "str" + ] + } + }, + "SharedKeyDetailsRequest": { + "sharedkey_id": { + "description": "The shared key ID to use for this request.", + "types": [ + "horde_sdk.ai_horde_api.fields.SharedKeyID" + ] + }, + "accept": { + "description": "The 'accept' header field.", + "types": [ + "horde_sdk.generic_api.metadata.GenericAcceptTypes" + ] + }, + "client_agent": { + "description": "The requesting client's agent. You should set this to reflect the name, version and contact information\nfor your client.", + "types": [ + "str" + ] + } + }, + "SharedKeyModifyRequest": { + "apikey": { + "description": "Defaults to `ANON_API_KEY`. See also `.is_api_key_required()`", + "types": [ + "str", + "None" + ] + }, + "sharedkey_id": { + "description": "The shared key ID to use for this request.", + "types": [ + "horde_sdk.ai_horde_api.fields.SharedKeyID" + ] + }, + "kudos": { + "description": "The Kudos limit assigned to this key.", + "types": [ + "int" + ] + }, + "name": { + "description": "The Shared Key Name.", + "types": [ + "str" + ] + }, + "max_image_pixels": { + "description": "The maximum amount of image pixels this key can generate per job. -1 means unlimited.", + "types": [ + "int" + ] + }, + "max_image_steps": { + "description": "The maximum amount of image steps this key can use per job. -1 means unlimited.", + "types": [ + "int" + ] + }, + "max_text_tokens": { + "description": "The maximum amount of text tokens this key can generate per job. -1 means unlimited.", + "types": [ + "int" + ] + }, + "expiry": { + "description": "The number of days until this key expires. -1 means never expires.", + "types": [ + "int" + ] + }, + "accept": { + "description": "The 'accept' header field.", + "types": [ + "horde_sdk.generic_api.metadata.GenericAcceptTypes" + ] + }, + "client_agent": { + "description": "The requesting client's agent. You should set this to reflect the name, version and contact information\nfor your client.", + "types": [ + "str" + ] + } + }, + "SingleFilterRequest": { + "apikey": { + "description": "Defaults to `ANON_API_KEY`. See also `.is_api_key_required()`", + "types": [ + "str", + "None" + ] + }, + "accept": { + "description": "The 'accept' header field.", + "types": [ + "horde_sdk.generic_api.metadata.GenericAcceptTypes" + ] + }, + "client_agent": { + "description": "The requesting client's agent. You should set this to reflect the name, version and contact information\nfor your client.", + "types": [ + "str" + ] + }, + "filter_id": { + "description": "The ID of the filter to retrieve.", + "types": [ + "str" + ] + } + }, + "SingleIPTimeoutsRequest": { + "apikey": { + "description": "Defaults to `ANON_API_KEY`. See also `.is_api_key_required()`", + "types": [ + "str", + "None" + ] + }, + "accept": { + "description": "The 'accept' header field.", + "types": [ + "horde_sdk.generic_api.metadata.GenericAcceptTypes" + ] + }, + "client_agent": { + "description": "The requesting client's agent. You should set this to reflect the name, version and contact information\nfor your client.", + "types": [ + "str" + ] + } + }, + "SingleStyleImageByIDRequest": { + "accept": { + "description": "The 'accept' header field.", + "types": [ + "horde_sdk.generic_api.metadata.GenericAcceptTypes" + ] + }, + "client_agent": { + "description": "The requesting client's agent. You should set this to reflect the name, version and contact information\nfor your client.", + "types": [ + "str" + ] + }, + "style_id": { + "description": "The ID of the style to retrieve.", + "types": [ + "str" + ] + } + }, + "SingleStyleImageByNameRequest": { + "accept": { + "description": "The 'accept' header field.", + "types": [ + "horde_sdk.generic_api.metadata.GenericAcceptTypes" + ] + }, + "client_agent": { + "description": "The requesting client's agent. You should set this to reflect the name, version and contact information\nfor your client.", + "types": [ + "str" + ] + }, + "style_name": { + "description": "The name of the style to retrieve.", + "types": [ + "str" + ] + } + }, + "SingleStyleTextByIDRequest": { + "accept": { + "description": "The 'accept' header field.", + "types": [ + "horde_sdk.generic_api.metadata.GenericAcceptTypes" + ] + }, + "client_agent": { + "description": "The requesting client's agent. You should set this to reflect the name, version and contact information\nfor your client.", + "types": [ + "str" + ] + }, + "style_id": { + "description": "The ID of the style to retrieve.", + "types": [ + "str" + ] + } + }, + "SingleStyleTextByNameRequest": { + "accept": { + "description": "The 'accept' header field.", + "types": [ + "horde_sdk.generic_api.metadata.GenericAcceptTypes" + ] + }, + "client_agent": { + "description": "The requesting client's agent. You should set this to reflect the name, version and contact information\nfor your client.", + "types": [ + "str" + ] + }, + "style_name": { + "description": "The name of the style to retrieve.", + "types": [ + "str" + ] + } + }, + "SingleTeamDetailsRequest": { + "accept": { + "description": "The 'accept' header field.", + "types": [ + "horde_sdk.generic_api.metadata.GenericAcceptTypes" + ] + }, + "client_agent": { + "description": "The requesting client's agent. You should set this to reflect the name, version and contact information\nfor your client.", + "types": [ + "str" + ] + }, + "team_id": { + "description": "The ID of the team to get details for.", + "types": [ + "str" + ] + } + }, + "SingleUserDetailsRequest": { + "user_id": { + "description": "The user's ID, as a `str`, but only containing numeric values.", + "types": [ + "str" + ] + }, + "accept": { + "description": "The 'accept' header field.", + "types": [ + "horde_sdk.generic_api.metadata.GenericAcceptTypes" + ] + }, + "client_agent": { + "description": "The requesting client's agent. You should set this to reflect the name, version and contact information\nfor your client.", + "types": [ + "str" + ] + } + }, + "SingleWorkerDetailsRequest": { + "apikey": { + "description": "Defaults to `ANON_API_KEY`. See also `.is_api_key_required()`", + "types": [ + "str", + "None" + ] + }, + "worker_id": { + "description": "The UUID of the worker in question for this request.", + "types": [ + "str", + "horde_sdk.ai_horde_api.fields.WorkerID" + ] + }, + "accept": { + "description": "The 'accept' header field.", + "types": [ + "horde_sdk.generic_api.metadata.GenericAcceptTypes" + ] + }, + "client_agent": { + "description": "The requesting client's agent. You should set this to reflect the name, version and contact information\nfor your client.", + "types": [ + "str" + ] + } + }, + "SingleWorkerMessageRequest": { + "accept": { + "description": "The 'accept' header field.", + "types": [ + "horde_sdk.generic_api.metadata.GenericAcceptTypes" + ] + }, + "client_agent": { + "description": "The requesting client's agent. You should set this to reflect the name, version and contact information\nfor your client.", + "types": [ + "str" + ] + }, + "message_id": { + "description": "The ID of the message to retrieve.", + "types": [ + "str" + ] + } + }, + "SingleWorkerNameDetailsRequest": { + "apikey": { + "description": "Defaults to `ANON_API_KEY`. See also `.is_api_key_required()`", + "types": [ + "str", + "None" + ] + }, + "worker_name": { + "description": "The name of the worker in question for this request.", + "types": [ + "str" + ] + }, + "accept": { + "description": "The 'accept' header field.", + "types": [ + "horde_sdk.generic_api.metadata.GenericAcceptTypes" + ] + }, + "client_agent": { + "description": "The requesting client's agent. You should set this to reflect the name, version and contact information\nfor your client.", + "types": [ + "str" + ] + } + }, + "StyleImageExampleAddRequest": { + "apikey": { + "description": "Defaults to `ANON_API_KEY`. See also `.is_api_key_required()`", + "types": [ + "str", + "None" + ] + }, + "accept": { + "description": "The 'accept' header field.", + "types": [ + "horde_sdk.generic_api.metadata.GenericAcceptTypes" + ] + }, + "client_agent": { + "description": "The requesting client's agent. You should set this to reflect the name, version and contact information\nfor your client.", + "types": [ + "str" + ] + }, + "style_id": { + "description": "The ID of the style to add the example to.", + "types": [ + "str" + ] + }, + "url": { + "description": "The URL of the image to add as an example.", + "types": [ + "str" + ] + }, + "primary": { + "description": "When true this image is to be used as the primary example for this style.", + "types": [ + "bool" + ] + } + }, + "StyleImageExampleDeleteRequest": { + "apikey": { + "description": "Defaults to `ANON_API_KEY`. See also `.is_api_key_required()`", + "types": [ + "str", + "None" + ] + }, + "accept": { + "description": "The 'accept' header field.", + "types": [ + "horde_sdk.generic_api.metadata.GenericAcceptTypes" + ] + }, + "client_agent": { + "description": "The requesting client's agent. You should set this to reflect the name, version and contact information\nfor your client.", + "types": [ + "str" + ] + }, + "style_id": { + "description": "The ID of the style to delete the example from.", + "types": [ + "str" + ] + }, + "example_id": { + "description": "The ID of the example to delete.", + "types": [ + "str" + ] + } + }, + "StyleImageExampleModifyRequest": { + "apikey": { + "description": "Defaults to `ANON_API_KEY`. See also `.is_api_key_required()`", + "types": [ + "str", + "None" + ] + }, + "accept": { + "description": "The 'accept' header field.", + "types": [ + "horde_sdk.generic_api.metadata.GenericAcceptTypes" + ] + }, + "client_agent": { + "description": "The requesting client's agent. You should set this to reflect the name, version and contact information\nfor your client.", + "types": [ + "str" + ] + }, + "style_id": { + "description": "The ID of the style to modify the example of.", + "types": [ + "str" + ] + }, + "example_id": { + "description": "The ID of the example to modify.", + "types": [ + "str" + ] + }, + "url": { + "description": "The URL of the image to add as an example.", + "types": [ + "str" + ] + }, + "primary": { + "description": "When true this image is to be used as the primary example for this style.", + "types": [ + "bool" + ] + } + }, + "TextGenerateAsyncRequest": { + "trusted_workers": { + "description": "When true, only trusted workers will serve this request. When False, Evaluating workers will also be used\nwhich can increase speed but adds more risk!", + "types": [ + "bool" + ] + }, + "slow_workers": { + "description": "When True, allows slower workers to pick up this request. Disabling this incurs an extra kudos cost.", + "types": [ + "bool" + ] + }, + "extra_slow_workers": { + "description": "When True, allows extra slow workers to pick up this request.", + "types": [ + "bool" + ] + }, + "workers": { + "description": "A list of worker IDs to use for this request. If empty, any worker can pick up the request. Using this incurs\nand extra kudos cost.", + "types": [ + "list[str]" + ] + }, + "worker_blacklist": { + "description": "If true, the worker list will be treated as a blacklist instead of a whitelist.", + "types": [ + "bool" + ] + }, + "models": { + "description": "The generative models to use for this request.", + "types": [ + "list[str]" + ] + }, + "validated_backends": { + "description": "When true, only inference backends that are validated by the AI Horde devs will serve this request. When False,\nnon-validated backends will also be used which can increase speed but you may end up with unexpected results.", + "types": [ + "bool", + "None" + ] + }, + "style": { + "description": "The style ID to use for the generation.", + "types": [ + "str", + "None" + ] + }, + "dry_run": { + "description": "If true, the request will not be processed, but will return a response with the estimated kudos cost.", + "types": [ + "bool" + ] + }, + "webhook": { + "description": "Provide a URL where the AI Horde will send a POST call after each delivered generation.\nThe request will include the details of the job as well as the request ID.", + "types": [ + "str", + "None" + ] + }, + "allow_downgrade": { + "description": "If true, the request can be modified to lesser parameters if the original request is too expensive.", + "types": [ + "bool" + ] + }, + "proxied_account": { + "description": "If using a service account as a proxy, provide this value to identify the actual account from which this\nrequest is coming from.", + "types": [ + "str", + "None" + ] + }, + "disable_batching": { + "description": "When true, This request will not use batching. This will allow you to retrieve accurate seeds.\nFeature is restricted to Trusted users and Patreons.", + "types": [ + "bool", + "None" + ] + }, + "apikey": { + "description": "Defaults to `ANON_API_KEY`. See also `.is_api_key_required()`", + "types": [ + "str", + "None" + ] + }, + "accept": { + "description": "The 'accept' header field.", + "types": [ + "horde_sdk.generic_api.metadata.GenericAcceptTypes" + ] + }, + "client_agent": { + "description": "The requesting client's agent. You should set this to reflect the name, version and contact information\nfor your client.", + "types": [ + "str" + ] + }, + "params": { + "description": "The parameters to use for the generation.", + "types": [ + "horde_sdk.ai_horde_api.apimodels.generate.text.async_.ModelGenerationInputKobold", + "None" + ] + }, + "prompt": { + "description": "The prompt which will be sent to KoboldAI to generate text.", + "types": [ + "str", + "None" + ] + }, + "extra_source_images": { + "description": "Any extra source images that should be used for this request; e.g., for multi-modal models.", + "types": [ + "list[horde_sdk.ai_horde_api.apimodels.base.ExtraSourceImageEntry]", + "None" + ] + }, + "softprompt": { + "description": "Specify which softprompt needs to be used to service this request.", + "types": [ + "str", + "None" + ] + } + }, + "TextGenerateJobPopRequest": { + "amount": { + "description": "The number of jobs to pop at the same time.", + "types": [ + "int", + "None" + ] + }, + "bridge_agent": { + "description": "The worker name, version and website.", + "types": [ + "str", + "None" + ] + }, + "models": { + "description": "The models this worker can generate.", + "types": [ + "list[str]" + ] + }, + "name": { + "description": "The Name of the Worker.", + "types": [ + "str" + ] + }, + "nsfw": { + "description": "Whether this worker can generate NSFW requests or not.", + "types": [ + "bool", + "None" + ] + }, + "priority_usernames": { + "description": "The usernames that should be prioritized by this worker.", + "types": [ + "list[str]", + "None" + ] + }, + "require_upfront_kudos": { + "description": "If True, this worker will only pick up requests where the owner has the required kudos to consume already available.", + "types": [ + "bool", + "None" + ] + }, + "threads": { + "description": "How many threads this worker is running. This is used to accurately the current power available in the horde.", + "types": [ + "int", + "None" + ] + }, + "max_length": { + "description": "The maximum amount of tokens this worker can generate.", + "types": [ + "int" + ] + }, + "max_context_length": { + "description": "The max amount of context to submit to this AI for sampling.", + "types": [ + "int" + ] + }, + "softprompts": { + "description": "The available softprompt files on this worker for the currently running model.", + "types": [ + "list[str]", + "None" + ] + }, + "extra_slow_worker": { + "description": "If this worker is extra slow and requires a longer timeout.\n\nNote that this reduces the pool of jobs available to this worker.", + "types": [ + "bool" + ] + }, + "apikey": { + "description": "Defaults to `ANON_API_KEY`. See also `.is_api_key_required()`", + "types": [ + "str", + "None" + ] + }, + "accept": { + "description": "The 'accept' header field.", + "types": [ + "horde_sdk.generic_api.metadata.GenericAcceptTypes" + ] + }, + "client_agent": { + "description": "The requesting client's agent. You should set this to reflect the name, version and contact information\nfor your client.", + "types": [ + "str" + ] + } + }, + "TextGenerateStatusRequest": { + "id_": { + "description": "The UUID for this job. Use this to post the results in the future.", + "types": [ + "horde_sdk.ai_horde_api.fields.GenerationID" + ] + }, + "accept": { + "description": "The 'accept' header field.", + "types": [ + "horde_sdk.generic_api.metadata.GenericAcceptTypes" + ] + }, + "client_agent": { + "description": "The requesting client's agent. You should set this to reflect the name, version and contact information\nfor your client.", + "types": [ + "str" + ] + } + }, + "TextGenerationJobSubmitRequest": { + "apikey": { + "description": "Defaults to `ANON_API_KEY`. See also `.is_api_key_required()`", + "types": [ + "str", + "None" + ] + }, + "id_": { + "description": "The UUID for this job. Use this to post the results in the future.", + "types": [ + "horde_sdk.ai_horde_api.fields.GenerationID" + ] + }, + "accept": { + "description": "The 'accept' header field.", + "types": [ + "horde_sdk.generic_api.metadata.GenericAcceptTypes" + ] + }, + "client_agent": { + "description": "The requesting client's agent. You should set this to reflect the name, version and contact information\nfor your client.", + "types": [ + "str" + ] + }, + "generation": { + "description": "R2 result was uploaded to R2, else the string of the result.", + "types": [ + "str" + ] + }, + "state": { + "description": "The state of this generation.", + "types": [ + "horde_sdk.ai_horde_api.consts.GENERATION_STATE" + ] + }, + "gen_metadata": { + "description": "Extra metadata about faulted or defaulted components of the generation", + "types": [ + "list[horde_sdk.ai_horde_api.apimodels.base.GenMetadataEntry]", + "None" + ] + } + }, + "TextStatsModelsRequest": { + "accept": { + "description": "The 'accept' header field.", + "types": [ + "horde_sdk.generic_api.metadata.GenericAcceptTypes" + ] + }, + "client_agent": { + "description": "The requesting client's agent. You should set this to reflect the name, version and contact information\nfor your client.", + "types": [ + "str" + ] + } + }, + "TextStatsModelsTotalRequest": { + "accept": { + "description": "The 'accept' header field.", + "types": [ + "horde_sdk.generic_api.metadata.GenericAcceptTypes" + ] + }, + "client_agent": { + "description": "The requesting client's agent. You should set this to reflect the name, version and contact information\nfor your client.", + "types": [ + "str" + ] + } + }, + "UpdateCollectionRequest": { + "apikey": { + "description": "Defaults to `ANON_API_KEY`. See also `.is_api_key_required()`", + "types": [ + "str", + "None" + ] + }, + "accept": { + "description": "The 'accept' header field.", + "types": [ + "horde_sdk.generic_api.metadata.GenericAcceptTypes" + ] + }, + "client_agent": { + "description": "The requesting client's agent. You should set this to reflect the name, version and contact information\nfor your client.", + "types": [ + "str" + ] + }, + "name": { + "description": "The name for the collection. Case-sensitive and unique per user.", + "types": [ + "str" + ] + }, + "info": { + "description": "Extra information about this collection.", + "types": [ + "str", + "None" + ] + }, + "public": { + "description": "When true this collection will be listed among all collections publicly.When false, information about this\ncollection can only be seen by people who know its ID or name.", + "types": [ + "bool" + ] + }, + "styles": { + "description": "The styles to use in this collection.", + "types": [ + "list[str]" + ] + }, + "collection_id": { + "description": "The ID of the collection to update.", + "types": [ + "str" + ] + } + } +} diff --git a/docs/response_field_names_and_descriptions.json b/docs/ai-horde/response_field_names_and_descriptions.json similarity index 60% rename from docs/response_field_names_and_descriptions.json rename to docs/ai-horde/response_field_names_and_descriptions.json index fd7258ec..9c8520ce 100644 --- a/docs/response_field_names_and_descriptions.json +++ b/docs/ai-horde/response_field_names_and_descriptions.json @@ -39,7 +39,7 @@ "id_": { "description": "The UUID for this job.", "types": [ - "horde_sdk.ai_horde_api.fields.JobID" + "horde_sdk.ai_horde_api.fields.GenerationID" ] } }, @@ -53,7 +53,7 @@ "forms": { "description": "The status of each form in the job.", "types": [ - "list[horde_sdk.ai_horde_api.apimodels.alchemy._status.AlchemyFormStatus]" + "list[horde_sdk.ai_horde_api.apimodels.alchemy.status.AlchemyFormStatus]" ] } }, @@ -65,27 +65,274 @@ ] } }, - "AlchemyPopResponse": { + "AlchemyJobPopResponse": { "forms": { "description": "The forms that to be generated", "types": [ - "list[horde_sdk.ai_horde_api.apimodels.alchemy._pop.AlchemyPopFormPayload]", + "list[horde_sdk.ai_horde_api.apimodels.alchemy.pop.AlchemyPopFormPayload]", "None" ] }, "skipped": { "description": "The requests that were skipped because this worker were not eligible for them.", "types": [ - "horde_sdk.ai_horde_api.apimodels.alchemy._pop.NoValidAlchemyFound", + "horde_sdk.ai_horde_api.apimodels.alchemy.pop.NoValidAlchemyFound", "None" ] } }, + "AllCollectionsResponse": { + "root": { + "description": "The underlying list of collections.", + "types": [ + "list[horde_sdk.ai_horde_api.apimodels.collections.ResponseModelCollection]" + ] + } + }, + "IPTimeoutListResponse": { + "root": { + "description": "The underlying list of IP addresses that are blocked.", + "types": [ + "list[horde_sdk.ai_horde_api.apimodels.operations.IPTimeout]" + ] + } + }, + "AllStylesImageResponse": { + "root": { + "description": "The underlying list of styles.", + "types": [ + "list[horde_sdk.ai_horde_api.apimodels.styles.StyleStable]" + ] + } + }, + "AllStylesTextResponse": { + "root": { + "description": "The underlying list of styles.", + "types": [ + "list[horde_sdk.ai_horde_api.apimodels.styles.StyleKobold]" + ] + } + }, + "AllTeamDetailsResponse": { + "root": { + "description": "The underlying list of teams.", + "types": [ + "list[horde_sdk.ai_horde_api.apimodels.teams.TeamDetails]" + ] + } + }, + "ResponseModelMessages": { + "root": { + "description": "The underlying list of messages.", + "types": [ + "list[horde_sdk.ai_horde_api.apimodels.workers.messages.ResponseModelMessage]" + ] + } + }, "AllWorkersDetailsResponse": { "root": { "description": "The underlying list of worker details.", "types": [ - "list[horde_sdk.ai_horde_api.apimodels.workers._workers.WorkerDetailItem]" + "list[horde_sdk.ai_horde_api.apimodels.workers.workers.WorkerDetailItem]" + ] + } + }, + "BlockIPAddressResponse": { + "message": { + "description": "A message from the API. This is typically an error or warning message, but may also be informational.", + "types": [ + "str" + ] + } + }, + "BlockWorkerIPAddressResponse": { + "message": { + "description": "A message from the API. This is typically an error or warning message, but may also be informational.", + "types": [ + "str" + ] + } + }, + "ResponseModelCollection": { + "id": { + "description": "The UUID of the collection. Use this to use this collection of retrieve its information in the future.", + "types": [ + "str" + ] + }, + "name": { + "description": "The name for the collection. Case-sensitive and unique per user.", + "types": [ + "str" + ] + }, + "type": { + "description": "The kind of styles stored in this collection.", + "types": [ + "Literal" + ] + }, + "info": { + "description": "Extra information about this collection.", + "types": [ + "str", + "None" + ] + }, + "public": { + "description": "When true this collection will be listed among all collection publicly.When false, information about this\ncollection can only be seen by people who know its ID or name.", + "types": [ + "bool" + ] + }, + "styles": { + "description": "The styles contained in this collection.", + "types": [ + "list[horde_sdk.ai_horde_api.apimodels.collections.ResponseModelStylesShort]" + ] + }, + "use_count": { + "description": "The number of times this collection has been used.", + "types": [ + "int", + "None" + ] + } + }, + "CreateCollectionResponse": { + "warnings": { + "description": "A list of warnings from the API. This is typically an error or warning message,\nbut may also be informational.", + "types": [ + "list[horde_sdk.generic_api.apimodels.RequestSingleWarning]", + "None" + ] + }, + "message": { + "description": "A message from the API. This is typically an error or warning message, but may also be informational.", + "types": [ + "str" + ] + }, + "id_": { + "description": "The ID of the collection.", + "types": [ + "str" + ] + } + }, + "ModifyStyleImageResponse": { + "warnings": { + "description": "A list of warnings from the API. This is typically an error or warning message,\nbut may also be informational.", + "types": [ + "list[horde_sdk.generic_api.apimodels.RequestSingleWarning]", + "None" + ] + }, + "message": { + "description": "A message from the API. This is typically an error or warning message, but may also be informational.", + "types": [ + "str" + ] + }, + "id_": { + "description": "The ID of the style.", + "types": [ + "str" + ] + } + }, + "ModifyStyleTextResponse": { + "warnings": { + "description": "A list of warnings from the API. This is typically an error or warning message,\nbut may also be informational.", + "types": [ + "list[horde_sdk.generic_api.apimodels.RequestSingleWarning]", + "None" + ] + }, + "message": { + "description": "A message from the API. This is typically an error or warning message, but may also be informational.", + "types": [ + "str" + ] + }, + "id_": { + "description": "The ID of the style.", + "types": [ + "str" + ] + } + }, + "ModifyTeam": { + "id_": { + "description": "The ID of the team.", + "types": [ + "str" + ] + }, + "name": { + "description": "The name of the team.", + "types": [ + "str" + ] + }, + "info": { + "description": "Extra information or comments about this team provided by its owner.", + "types": [ + "str", + "None" + ] + } + }, + "ResponseModelMessage": { + "worker_id": { + "description": "The ID of the worker that the message is for.", + "types": [ + "str", + "None" + ] + }, + "message": { + "description": "The message.", + "types": [ + "str" + ] + }, + "origin": { + "description": "The origin of the message.", + "types": [ + "str", + "None" + ] + }, + "expiry": { + "description": "The number of hours after which this message expires.", + "types": [ + "int", + "None" + ] + } + }, + "DeleteCollectionResponse": { + "message": { + "description": "A message from the API. This is typically an error or warning message, but may also be informational.", + "types": [ + "str" + ] + } + }, + "DeleteFilterResponse": { + "message": { + "description": "A message from the API. This is typically an error or warning message, but may also be informational.", + "types": [ + "str" + ] + } + }, + "DeleteIPAddressResponse": { + "message": { + "description": "A message from the API. This is typically an error or warning message, but may also be informational.", + "types": [ + "str" ] } }, @@ -153,7 +400,7 @@ "generations": { "description": "The individual image generation responses in this request.", "types": [ - "list[horde_sdk.ai_horde_api.apimodels.generate._status.ImageGeneration]" + "list[horde_sdk.ai_horde_api.apimodels.generate.status.ImageGeneration]" ] }, "shared": { @@ -164,6 +411,36 @@ ] } }, + "DeleteStyleImageResponse": { + "message": { + "description": "A message from the API. This is typically an error or warning message, but may also be informational.", + "types": [ + "str" + ] + } + }, + "DeleteStyleTextResponse": { + "message": { + "description": "A message from the API. This is typically an error or warning message, but may also be informational.", + "types": [ + "str" + ] + } + }, + "DeleteTeamResponse": { + "deleted_id": { + "description": "The ID of the team that was deleted.", + "types": [ + "str" + ] + }, + "deleted_name": { + "description": "The name of the team that was deleted.", + "types": [ + "str" + ] + } + }, "TextGenerateStatusResponse": { "finished": { "description": "The amount of finished jobs in this request.", @@ -228,7 +505,31 @@ "generations": { "description": "The generations that have been completed in this request.", "types": [ - "list[horde_sdk.ai_horde_api.apimodels.generate.text._status.GenerationKobold]" + "list[horde_sdk.ai_horde_api.apimodels.generate.text.status.GenerationKobold]" + ] + } + }, + "DeleteUserResponse": { + "message": { + "description": "A message from the API. This is typically an error or warning message, but may also be informational.", + "types": [ + "str" + ] + } + }, + "DeleteWorkerIPAddressResponse": { + "message": { + "description": "A message from the API. This is typically an error or warning message, but may also be informational.", + "types": [ + "str" + ] + } + }, + "DeleteWorkerMessageResponse": { + "message": { + "description": "A message from the API. This is typically an error or warning message, but may also be informational.", + "types": [ + "str" ] } }, @@ -248,11 +549,42 @@ ] } }, + "FilterPromptSuspicionResponse": { + "matches": { + "description": "The sections of the prompt that matched the filter.", + "types": [ + "list[str]", + "None" + ] + }, + "suspicion": { + "description": "Rates how suspicious the provided prompt is. A suspicion over 2 means it would be blocked.", + "types": [ + "int" + ] + } + }, + "FilterRegexResponse": { + "root": { + "description": "The underlying list of filters.", + "types": [ + "list[horde_sdk.ai_horde_api.apimodels.filters.FilterRegex]" + ] + } + }, + "FiltersListResponse": { + "root": { + "description": "The underlying list of filters.", + "types": [ + "list[horde_sdk.ai_horde_api.apimodels.filters.FilterDetails]" + ] + } + }, "UserDetailsResponse": { "active_generations": { "description": "The active generations this user has requested.", "types": [ - "horde_sdk.ai_horde_api.apimodels._users.ActiveGenerations", + "horde_sdk.ai_horde_api.apimodels.users.ActiveGenerations", "None" ] }, @@ -287,7 +619,7 @@ "contributions": { "description": "How many images and megapixelsteps this user has generated.", "types": [ - "horde_sdk.ai_horde_api.apimodels._users.ContributionsDetails", + "horde_sdk.ai_horde_api.apimodels.users.ContributionsDetails", "None" ] }, @@ -329,7 +661,7 @@ "kudos_details": { "description": "How much Kudos this user has accumulated or used for generating images.", "types": [ - "horde_sdk.ai_horde_api.apimodels._users.UserKudosDetails", + "horde_sdk.ai_horde_api.apimodels.users.UserKudosDetails", "None" ] }, @@ -343,7 +675,7 @@ "monthly_kudos": { "description": "How much recurring Kudos this user receives monthly.", "types": [ - "horde_sdk.ai_horde_api.apimodels._users.MonthlyKudos", + "horde_sdk.ai_horde_api.apimodels.users.MonthlyKudos", "None" ] }, @@ -357,7 +689,7 @@ "records": { "description": "How many images, texts, megapixelsteps and tokens this user has generated or requested.", "types": [ - "horde_sdk.ai_horde_api.apimodels._users.UserRecords", + "horde_sdk.ai_horde_api.apimodels.users.UserRecords", "None" ] }, @@ -382,10 +714,17 @@ "None" ] }, - "suspicious": { - "description": "(Privileged) How much suspicion this user has accumulated.", + "deleted": { + "description": "If True, this user has been deleted.", "types": [ - "int", + "bool", + "None" + ] + }, + "suspicious": { + "description": "(Privileged) How much suspicion this user has accumulated.", + "types": [ + "int", "None" ] }, @@ -399,7 +738,7 @@ "usage": { "description": "How many images and megapixelsteps this user has requested.", "types": [ - "horde_sdk.ai_horde_api.apimodels._users.UsageDetails", + "horde_sdk.ai_horde_api.apimodels.users.UsageDetails", "None" ] }, @@ -448,7 +787,7 @@ "styles": { "description": "The styles this user has created.", "types": [ - "list[horde_sdk.ai_horde_api.apimodels._styles.ResponseModelStylesUser]", + "list[horde_sdk.ai_horde_api.apimodels.styles.ResponseModelStylesUser]", "None" ] } @@ -550,7 +889,7 @@ "root": { "description": "The underlying list of models.", "types": [ - "list[horde_sdk.ai_horde_api.apimodels._status.ActiveModel]" + "list[horde_sdk.ai_horde_api.apimodels.base.ActiveModel]" ] } }, @@ -558,7 +897,7 @@ "root": { "description": "The underlying list of models.", "types": [ - "list[horde_sdk.ai_horde_api.apimodels._status.ActiveModel]" + "list[horde_sdk.ai_horde_api.apimodels.base.ActiveModel]" ] } }, @@ -580,7 +919,7 @@ "id_": { "description": "The UUID for this job.", "types": [ - "horde_sdk.ai_horde_api.fields.JobID" + "horde_sdk.ai_horde_api.fields.GenerationID" ] }, "kudos": { @@ -670,26 +1009,26 @@ "id_": { "description": "(Obsolete) The UUID for this image generation.", "types": [ - "horde_sdk.ai_horde_api.fields.JobID", + "horde_sdk.ai_horde_api.fields.GenerationID", "None" ] }, "ids": { "description": "A list of UUIDs for image generation.", "types": [ - "list[horde_sdk.ai_horde_api.fields.JobID]" + "list[horde_sdk.ai_horde_api.fields.GenerationID]" ] }, "payload": { "description": "The parameters used to generate this image.", "types": [ - "horde_sdk.ai_horde_api.apimodels.generate._pop.ImageGenerateJobPopPayload" + "horde_sdk.ai_horde_api.apimodels.generate.pop.ImageGenerateJobPopPayload" ] }, "skipped": { "description": "The reasons this worker was not issued certain jobs, and the number of jobs for each reason.", "types": [ - "horde_sdk.ai_horde_api.apimodels.generate._pop.ImageGenerateJobPopSkippedStatus" + "horde_sdk.ai_horde_api.apimodels.generate.pop.ImageGenerateJobPopSkippedStatus" ] }, "model": { @@ -710,7 +1049,7 @@ "description": "If source_image is provided, specifies how to process it.", "types": [ "str", - "horde_sdk.ai_horde_api.consts.KNOWN_SOURCE_PROCESSING" + "horde_sdk.generation_parameters.image.consts.KNOWN_IMAGE_SOURCE_PROCESSING" ] }, "source_mask": { @@ -720,287 +1059,672 @@ "None" ] }, - "r2_upload": { - "description": "(Obsolete) The r2 upload link to use to upload this image.", + "r2_upload": { + "description": "(Obsolete) The r2 upload link to use to upload this image.", + "types": [ + "str", + "None" + ] + }, + "r2_uploads": { + "description": "The r2 upload links for each this image. Each index matches the ID in self.ids", + "types": [ + "list[str]", + "None" + ] + }, + "ttl": { + "description": "The amount of seconds before this job is considered stale and aborted.", + "types": [ + "int", + "None" + ] + }, + "messages": { + "description": "The messages that have been sent to this worker.", + "types": [ + "list[horde_sdk.ai_horde_api.apimodels.generate.pop.PopResponseModelMessage]", + "None" + ] + } + }, + "JobSubmitResponse": { + "reward": { + "description": "The amount of kudos gained for submitting this request.", + "types": [ + "float" + ] + } + }, + "ImageStatsModelsResponse": { + "day": { + "description": "The stats for the past day.", + "types": [ + "dict[str, int]" + ] + }, + "month": { + "description": "The stats for the past month.", + "types": [ + "dict[str, int]" + ] + }, + "total": { + "description": "The total stats for all time.", + "types": [ + "dict[str, int]" + ] + } + }, + "ImageStatsModelsTotalResponse": { + "day": { + "description": "The total stats for the past day.", + "types": [ + "horde_sdk.ai_horde_api.apimodels.stats.SinglePeriodImgStat", + "None" + ] + }, + "hour": { + "description": "The total stats for the past hour.", + "types": [ + "horde_sdk.ai_horde_api.apimodels.stats.SinglePeriodImgStat", + "None" + ] + }, + "minute": { + "description": "The total stats for the past minute.", + "types": [ + "horde_sdk.ai_horde_api.apimodels.stats.SinglePeriodImgStat", + "None" + ] + }, + "month": { + "description": "The total stats for the past month.", + "types": [ + "horde_sdk.ai_horde_api.apimodels.stats.SinglePeriodImgStat", + "None" + ] + }, + "total": { + "description": "The total stats for all time.", + "types": [ + "horde_sdk.ai_horde_api.apimodels.stats.SinglePeriodImgStat", + "None" + ] + } + }, + "KudosAwardResponse": { + "awarded": { + "description": "The amount of Kudos awarded.", + "types": [ + "float", + "None" + ] + } + }, + "KudosTransferResponse": { + "transferred": { + "description": "The amount of Kudos transferred.", + "types": [ + "float", + "None" + ] + } + }, + "ListUsersDetailsResponse": { + "root": { + "description": "The underlying list of user details.", + "types": [ + "list[horde_sdk.ai_horde_api.apimodels.users.UserDetailsResponse]" + ] + } + }, + "ModifyUserResponse": { + "admin_comment": { + "description": "Add further information about this user for the other admins.", + "types": [ + "str", + "None" + ] + }, + "concurrency": { + "description": "The amount of concurrent request this user can have.", + "types": [ + "int", + "None" + ] + }, + "contact": { + "description": "Contact details for the horde admins to reach the user in case of emergency. This is only visible to horde\nmoderators.", + "types": [ + "str", + "None" + ] + }, + "customizer": { + "description": "When set to true, the user will be able to serve custom Stable Diffusion models which do not exist in the\nOfficial AI Horde Model Reference.", + "types": [ + "bool", + "None" + ] + }, + "education": { + "description": "When set to true, the user is considered an education account and some options become more restrictive.", + "types": [ + "bool", + "None" + ] + }, + "filtered": { + "description": "When set to true, the replacement filter will always be applied against this user", + "types": [ + "bool", + "None" + ] + }, + "flagged": { + "description": "When set to true, the user cannot transfer kudos and all their workers are put into permanent maintenance.", + "types": [ + "bool", + "None" + ] + }, + "moderator": { + "description": "Set to true to make this user a horde moderator.", + "types": [ + "bool", + "None" + ] + }, + "monthly_kudos": { + "description": "When specified, will start assigning the user monthly kudos, starting now!", + "types": [ + "int", + "None" + ] + }, + "public_workers": { + "description": "Set to true to make this user display their worker IDs.", + "types": [ + "bool", + "None" + ] + }, + "service": { + "description": "When set to true, the user is considered a service account proxying the requests for other users.", + "types": [ + "bool", + "None" + ] + }, + "special": { + "description": "When set to true, The user can send special payloads.", + "types": [ + "bool", + "None" + ] + }, + "trusted": { + "description": "When set to true,the user and their servers will not be affected by suspicion.", + "types": [ + "bool", + "None" + ] + }, + "usage_multiplier": { + "description": "The amount by which to multiply the users kudos consumption.", + "types": [ + "float", + "None" + ] + }, + "username": { + "description": "When specified, will change the username. No profanity allowed!", + "types": [ + "str", + "None" + ] + }, + "vpn": { + "description": "When set to true, the user will be able to onboard workers behind a VPN. This should be used as a temporary\nsolution until the user is trusted.", + "types": [ + "bool", + "None" + ] + }, + "worker_invited": { + "description": "Set to the amount of workers this user is allowed to join to the horde when in worker invite-only mode.", + "types": [ + "int", + "None" + ] + }, + "new_kudos": { + "description": "The new amount of kudos this user has.", + "types": [ + "float", + "None" + ] + }, + "new_suspicion": { + "description": "The new amount of suspicion this user has.", + "types": [ + "int", + "None" + ] + }, + "undeleted": { + "description": "True if the user was undeleted with this operation.", + "types": [ + "bool", + "None" + ] + } + }, + "ModifyWorkerResponse": { + "info": { + "description": "The new state of the 'info' var for this worker.", + "types": [ + "str", + "None" + ] + }, + "maintenance": { + "description": "The new state of the 'maintenance' var for this worker. When True, this worker will not pick up any new\nrequests.", + "types": [ + "bool", + "None" + ] + }, + "name": { + "description": "The new name for this this worker. No profanity allowed!", + "types": [ + "str", + "None" + ] + }, + "paused": { + "description": "The new state of the 'paused' var for this worker. When True, this worker will not be given any new requests.", + "types": [ + "bool", + "None" + ] + }, + "team": { + "description": "The new team of this worker.", + "types": [ + "str", + "None" + ] + } + }, + "NewsResponse": { + "root": { + "description": "The underlying list of newspieces.", + "types": [ + "list[horde_sdk.ai_horde_api.apimodels.status.Newspiece]" + ] + } + }, + "FilterDetails": { + "description": { + "description": "Description about this regex.", + "types": [ + "str", + "None" + ] + }, + "filter_type": { + "description": "The type of this filter.", + "types": [ + "int" + ] + }, + "id_": { + "description": "The UUID of this filter.", + "types": [ + "str" + ] + }, + "regex": { + "description": "The regex for this filter.", + "types": [ + "str" + ] + }, + "replacement": { + "description": "The replacement string for this regex.", + "types": [ + "str", + "None" + ] + }, + "user": { + "description": "The moderator which added or last updated this regex.", + "types": [ + "str" + ] + } + }, + "RateResponse": { + "reward": { + "description": "The reward for the rating.", + "types": [ + "float" + ] + } + }, + "ExpiryStrSharedKeyDetailsResponse": { + "kudos": { + "description": "The Kudos limit assigned to this key.", + "types": [ + "int" + ] + }, + "name": { + "description": "The Shared Key Name.", + "types": [ + "str" + ] + }, + "max_image_pixels": { + "description": "The maximum amount of image pixels this key can generate per job. -1 means unlimited.", + "types": [ + "int" + ] + }, + "max_image_steps": { + "description": "The maximum amount of image steps this key can use per job. -1 means unlimited.", + "types": [ + "int" + ] + }, + "max_text_tokens": { + "description": "The maximum amount of text tokens this key can generate per job. -1 means unlimited.", + "types": [ + "int" + ] + }, + "sharedkey_id": { + "description": "The shared key ID to use for this request.", + "types": [ + "horde_sdk.ai_horde_api.fields.SharedKeyID" + ] + }, + "expiry": { + "description": "The expiry date of the shared key.", "types": [ "str", "None" ] }, - "r2_uploads": { - "description": "The r2 upload links for each this image. Each index matches the ID in self.ids", + "username": { + "description": "The owning user's unique Username. It is a combination of their chosen alias plus their ID.", "types": [ - "list[str]", - "None" + "str" ] }, - "ttl": { - "description": "The amount of seconds before this job is considered stale and aborted.", + "utilized": { + "description": "How much kudos has been utilized via this shared key until now.", "types": [ - "int", - "None" + "int" ] } }, - "JobSubmitResponse": { - "reward": { - "description": "The amount of kudos gained for submitting this request.", + "SharedKeyDeleteResponse": { + "message": { + "description": "A message from the API. This is typically an error or warning message, but may also be informational.", "types": [ - "float" + "str" ] } }, - "ImageStatsModelsResponse": { - "day": { - "description": "The stats for the past day.", - "types": [ - "dict[str, int]" - ] - }, - "month": { - "description": "The stats for the past month.", + "StyleStable": { + "name": { + "description": "The name of the style.", "types": [ - "dict[str, int]" + "str" ] }, - "total": { - "description": "The total stats for all time.", - "types": [ - "dict[str, int]" - ] - } - }, - "ImageStatsModelsTotalResponse": { - "day": { - "description": "The total stats for the past day.", + "info": { + "description": "Extra information or comments about this style provided by its creator.", "types": [ - "horde_sdk.ai_horde_api.apimodels._stats.SinglePeriodImgStat", + "str", "None" ] }, - "hour": { - "description": "The total stats for the past hour.", + "prompt": { + "description": "The prompt template which will be sent to generate an image.\n\nThe user's prompt will be injected into this. This argument MUST include a '{p}' which specifies the part where the user's prompt will be injected and an '{np}' where the user's negative prompt will be injected (if any)", "types": [ - "horde_sdk.ai_horde_api.apimodels._stats.SinglePeriodImgStat", - "None" + "str" ] }, - "minute": { - "description": "The total stats for the past minute.", + "public": { + "description": "When true this style will be listed among all styles publicly.\n\nWhen false, information about this style can only be seen by people who know its ID or name.", "types": [ - "horde_sdk.ai_horde_api.apimodels._stats.SinglePeriodImgStat", - "None" + "bool" ] }, - "month": { - "description": "The total stats for the past month.", + "nsfw": { + "description": "When true, it signified this style is expected to generate NSFW images primarily.", "types": [ - "horde_sdk.ai_horde_api.apimodels._stats.SinglePeriodImgStat", - "None" + "bool" ] }, - "total": { - "description": "The total stats for all time.", + "tags": { + "description": "Tags associated with this style.", "types": [ - "horde_sdk.ai_horde_api.apimodels._stats.SinglePeriodImgStat", + "list[str]", "None" ] - } - }, - "KudosTransferResponse": { - "transferred": { - "description": "The amount of Kudos transferred.", + }, + "models": { + "description": "The models which this style will attempt to use.", "types": [ - "float", + "list[str]", "None" ] - } - }, - "ListUsersDetailsResponse": { - "root": { - "description": "The underlying list of user details.", + }, + "id_": { + "description": "The UUID of the style. Use this to use the style or retrieve its information in the future.", "types": [ - "list[horde_sdk.ai_horde_api.apimodels._users.UserDetailsResponse]" + "str" ] - } - }, - "ModifyUserResponse": { - "admin_comment": { - "description": "Add further information about this user for the other admins.", + }, + "creator": { + "description": "The alias of the user which created this style.", "types": [ "str", "None" ] }, - "concurrency": { - "description": "The amount of concurrent request this user can have.", + "use_count": { + "description": "The amount of times this style has been used in generations.", "types": [ "int", "None" ] }, - "contact": { - "description": "Contact details for the horde admins to reach the user in case of emergency. This is only visible to horde\nmoderators.", + "sharedkey": { + "description": "The UUID of a shared key which will be used to fulfil this style when active.", "types": [ "str", "None" ] }, - "customizer": { - "description": "When set to true, the user will be able to serve custom Stable Diffusion models which do not exist in the\nOfficial AI Horde Model Reference.", + "params": { + "description": "The parameters to use for all generations using this style, if not set by the user.", "types": [ - "bool", + "horde_sdk.ai_horde_api.apimodels.styles.ModelStyleInputParamsStable", "None" ] }, - "education": { - "description": "When set to true, the user is considered an education account and some options become more restrictive.", + "examples": { + "description": "A list of examples of images generated by this style.", "types": [ - "bool", + "list[horde_sdk.ai_horde_api.apimodels.styles.StyleExample]", "None" ] }, - "filtered": { - "description": "When set to true, the replacement filter will always be applied against this user", + "shared_key": { + "description": "The shared key backing this style, if any.", "types": [ - "bool", + "horde_sdk.ai_horde_api.apimodels.sharedkeys.ExpiryStrSharedKeyDetailsResponse", "None" ] + } + }, + "StyleKobold": { + "name": { + "description": "The name of the style.", + "types": [ + "str" + ] }, - "flagged": { - "description": "When set to true, the user cannot transfer kudos and all their workers are put into permanent maintenance.", + "info": { + "description": "Extra information or comments about this style provided by its creator.", "types": [ - "bool", + "str", "None" ] }, - "moderator": { - "description": "Set to true to make this user a horde moderator.", + "prompt": { + "description": "The prompt template which will be sent to generate an image.\n\nThe user's prompt will be injected into this. This argument MUST include a '{p}' which specifies the part where the user's prompt will be injected and an '{np}' where the user's negative prompt will be injected (if any)", "types": [ - "bool", - "None" + "str" ] }, - "monthly_kudos": { - "description": "When specified, will start assigning the user monthly kudos, starting now!", + "public": { + "description": "When true this style will be listed among all styles publicly.\n\nWhen false, information about this style can only be seen by people who know its ID or name.", "types": [ - "int", - "None" + "bool" ] }, - "public_workers": { - "description": "Set to true to make this user display their worker IDs.", + "nsfw": { + "description": "When true, it signified this style is expected to generate NSFW images primarily.", "types": [ - "bool", - "None" + "bool" ] }, - "service": { - "description": "When set to true, the user is considered a service account proxying the requests for other users.", + "tags": { + "description": "Tags associated with this style.", "types": [ - "bool", + "list[str]", "None" ] }, - "special": { - "description": "When set to true, The user can send special payloads.", + "models": { + "description": "The models which this style will attempt to use.", "types": [ - "bool", + "list[str]", "None" ] }, - "trusted": { - "description": "When set to true,the user and their servers will not be affected by suspicion.", + "id_": { + "description": "The UUID of the style. Use this to use the style or retrieve its information in the future.", "types": [ - "bool", + "str" + ] + }, + "creator": { + "description": "The alias of the user which created this style.", + "types": [ + "str", "None" ] }, - "usage_multiplier": { - "description": "The amount by which to multiply the users kudos consumption.", + "use_count": { + "description": "The amount of times this style has been used in generations.", "types": [ - "float", + "int", "None" ] }, - "username": { - "description": "When specified, will change the username. No profanity allowed!", + "sharedkey": { + "description": "The UUID of a shared key which will be used to fulfil this style when active.", "types": [ "str", "None" ] }, - "vpn": { - "description": "When set to true, the user will be able to onboard workers behind a VPN. This should be used as a temporary\nsolution until the user is trusted.", + "params": { + "description": "The parameters to use for all generations using this style, if not set by the user.", "types": [ - "bool", + "horde_sdk.ai_horde_api.apimodels.styles.ModelStyleInputParamsKobold", "None" ] - }, - "worker_invited": { - "description": "Set to the amount of workers this user is allowed to join to the horde when in worker invite-only mode.", + } + }, + "TeamDetails": { + "name": { + "description": "The Name given to this team.", "types": [ - "int", + "str", "None" ] }, - "new_kudos": { - "description": "The new amount of kudos this user has.", + "id_": { + "description": "The UUID of this team.", "types": [ - "float", + "str", + "horde_sdk.ai_horde_api.fields.TeamID", "None" ] }, - "new_suspicion": { - "description": "The new amount of suspicion this user has.", + "info": { + "description": "Extra information or comments about this team provided by its owner.", "types": [ - "int", + "str", "None" ] - } - }, - "ModifyWorkerResponse": { - "info": { - "description": "The new state of the 'info' var for this worker.", + }, + "creator": { + "description": "The alias of the user which created this team.", "types": [ "str", "None" ] }, - "maintenance": { - "description": "The new state of the 'maintenance' var for this worker. When True, this worker will not pick up any new\nrequests.", + "kudos": { + "description": "How many Kudos the workers in this team have been rewarded while part of this team.", "types": [ - "bool", + "float", "None" ] }, - "name": { - "description": "The new name for this this worker. No profanity allowed!", + "models": { + "description": "The models that this team has run.", "types": [ - "str", + "list[horde_sdk.ai_horde_api.apimodels.base.ActiveModelLite]", "None" ] }, - "paused": { - "description": "The new state of the 'paused' var for this worker. When True, this worker will not be given any new requests.", + "requests_fulfilled": { + "description": "How many images this team's workers have generated.", "types": [ - "bool", + "int", "None" ] }, - "team": { - "description": "The new team of this worker.", + "uptime": { + "description": "The total amount of time workers have stayed online while on this team.", "types": [ - "str", + "int", "None" ] - } - }, - "NewsResponse": { - "root": { - "description": "The underlying list of newspieces.", + }, + "worker_count": { + "description": "How many workers have been dedicated to this team.", + "types": [ + "int", + "None" + ] + }, + "workers": { + "description": "The workers that have been dedicated to this team.", "types": [ - "list[horde_sdk.ai_horde_api.apimodels._status.Newspiece]" + "list[horde_sdk.ai_horde_api.apimodels.workers.workers.WorkerDetailLite]", + "None" ] } }, @@ -1008,7 +1732,7 @@ "type_": { "description": "The type of worker.", "types": [ - "horde_sdk.ai_horde_api.consts.WORKER_TYPE" + "horde_sdk.ai_horde_api.consts.AI_HORDE_WORKER_TYPES" ] }, "name": { @@ -1048,7 +1772,7 @@ "kudos_details": { "description": "How much Kudos this worker has accumulated or used for generating images.", "types": [ - "horde_sdk.ai_horde_api.apimodels.workers._workers.WorkerKudosDetails", + "horde_sdk.ai_horde_api.apimodels.workers.workers.WorkerKudosDetails", "None" ] }, @@ -1159,7 +1883,7 @@ "team": { "description": "The team this worker belongs to.", "types": [ - "horde_sdk.ai_horde_api.apimodels.workers._workers.TeamDetailsLite", + "horde_sdk.ai_horde_api.apimodels.workers.workers.TeamDetailsLite", "None" ] }, @@ -1252,6 +1976,42 @@ "bool", "None" ] + }, + "messages": { + "description": "The messages that have been sent to this worker.", + "types": [ + "list[horde_sdk.ai_horde_api.apimodels.workers.messages.ResponseModelMessage]", + "None" + ] + } + }, + "StyleImageExampleModifyResponse": { + "warnings": { + "description": "A list of warnings from the API. This is typically an error or warning message,\nbut may also be informational.", + "types": [ + "list[horde_sdk.generic_api.apimodels.RequestSingleWarning]", + "None" + ] + }, + "message": { + "description": "A message from the API. This is typically an error or warning message, but may also be informational.", + "types": [ + "str" + ] + }, + "id_": { + "description": "The ID of the example.", + "types": [ + "str" + ] + } + }, + "StyleImageExampleDeleteResponse": { + "message": { + "description": "A message from the API. This is typically an error or warning message, but may also be informational.", + "types": [ + "str" + ] } }, "TextGenerateAsyncDryRunResponse": { @@ -1272,7 +2032,7 @@ "id_": { "description": "The UUID for this job.", "types": [ - "horde_sdk.ai_horde_api.fields.JobID" + "horde_sdk.ai_horde_api.fields.GenerationID" ] }, "kudos": { @@ -1301,26 +2061,26 @@ "payload": { "description": "The settings for this text generation.", "types": [ - "horde_sdk.ai_horde_api.apimodels.generate.text._pop.ModelPayloadKobold" + "horde_sdk.ai_horde_api.apimodels.generate.text.pop.ModelPayloadKobold" ] }, "id_": { "description": "The UUID for this text generation.", "types": [ - "horde_sdk.ai_horde_api.fields.JobID", + "horde_sdk.ai_horde_api.fields.GenerationID", "None" ] }, "ids": { "description": "The UUIDs for this text generations.", "types": [ - "list[horde_sdk.ai_horde_api.fields.JobID]" + "list[horde_sdk.ai_horde_api.fields.GenerationID]" ] }, "skipped": { "description": "The skipped requests that were not valid for this worker.", "types": [ - "horde_sdk.ai_horde_api.apimodels.generate.text._pop.NoValidRequestFoundKobold" + "horde_sdk.ai_horde_api.apimodels.generate.text.pop.NoValidRequestFoundKobold" ] }, "softprompt": { @@ -1343,6 +2103,13 @@ "int", "None" ] + }, + "messages": { + "description": "The messages sent to this worker.", + "types": [ + "list[horde_sdk.ai_horde_api.apimodels.generate.pop.PopResponseModelMessage]", + "None" + ] } }, "TextStatsModelResponse": { @@ -1396,5 +2163,26 @@ "dict[str, int]" ] } + }, + "UpdateCollectionResponse": { + "warnings": { + "description": "A list of warnings from the API. This is typically an error or warning message,\nbut may also be informational.", + "types": [ + "list[horde_sdk.generic_api.apimodels.RequestSingleWarning]", + "None" + ] + }, + "message": { + "description": "A message from the API. This is typically an error or warning message, but may also be informational.", + "types": [ + "str" + ] + }, + "id_": { + "description": "The ID of the collection that was updated.", + "types": [ + "str" + ] + } } } diff --git a/docs/api_to_sdk_map.md b/docs/api_to_sdk_map.md deleted file mode 100644 index 0f5a5df5..00000000 --- a/docs/api_to_sdk_map.md +++ /dev/null @@ -1,83 +0,0 @@ -# AI-Horde API Model to SDK Class Map -This is a mapping of the AI-Horde API models (defined at [https://stablehorde.net/api/](https://stablehorde.net/api/), see also [the swagger doc](https://stablehorde.net/api/swagger.json)) to the SDK classes. - -## Payloads -| API Endpoint | HTTP Method | SDK Request Type | -| ------------ | ----------- | ---------------- | -| /v2/documents/privacy | GET | [AIHordeGetPrivacyPolicyRequest][horde_sdk.ai_horde_api.apimodels._documents.AIHordeGetPrivacyPolicyRequest] | -| /v2/documents/sponsors | GET | [AIHordeGetSponsorsRequest][horde_sdk.ai_horde_api.apimodels._documents.AIHordeGetSponsorsRequest] | -| /v2/documents/terms | GET | [AIHordeGetTermsRequest][horde_sdk.ai_horde_api.apimodels._documents.AIHordeGetTermsRequest] | -| /v2/find_user | GET | [FindUserRequest][horde_sdk.ai_horde_api.apimodels._find_user.FindUserRequest] | -| /v2/generate/async | POST | [ImageGenerateAsyncRequest][horde_sdk.ai_horde_api.apimodels.generate._async.ImageGenerateAsyncRequest] | -| /v2/generate/check/{id} | GET | [ImageGenerateCheckRequest][horde_sdk.ai_horde_api.apimodels.generate._check.ImageGenerateCheckRequest] | -| /v2/generate/pop | POST | [ImageGenerateJobPopRequest][horde_sdk.ai_horde_api.apimodels.generate._pop.ImageGenerateJobPopRequest] | -| /v2/generate/status/{id} | DELETE | [DeleteImageGenerateRequest][horde_sdk.ai_horde_api.apimodels.generate._status.DeleteImageGenerateRequest] | -| /v2/generate/status/{id} | GET | [ImageGenerateStatusRequest][horde_sdk.ai_horde_api.apimodels.generate._status.ImageGenerateStatusRequest] | -| /v2/generate/submit | POST | [ImageGenerationJobSubmitRequest][horde_sdk.ai_horde_api.apimodels.generate._submit.ImageGenerationJobSubmitRequest] | -| /v2/generate/text/async | POST | [TextGenerateAsyncRequest][horde_sdk.ai_horde_api.apimodels.generate.text._async.TextGenerateAsyncRequest] | -| /v2/generate/text/pop | POST | [TextGenerateJobPopRequest][horde_sdk.ai_horde_api.apimodels.generate.text._pop.TextGenerateJobPopRequest] | -| /v2/generate/text/status/{id} | DELETE | [DeleteTextGenerateRequest][horde_sdk.ai_horde_api.apimodels.generate.text._status.DeleteTextGenerateRequest] | -| /v2/generate/text/status/{id} | GET | [TextGenerateStatusRequest][horde_sdk.ai_horde_api.apimodels.generate.text._status.TextGenerateStatusRequest] | -| /v2/generate/text/submit | POST | [TextGenerationJobSubmitRequest][horde_sdk.ai_horde_api.apimodels.generate.text._submit.TextGenerationJobSubmitRequest] | -| /v2/interrogate/async | POST | [AlchemyAsyncRequest][horde_sdk.ai_horde_api.apimodels.alchemy._async.AlchemyAsyncRequest] | -| /v2/interrogate/pop | POST | [AlchemyPopRequest][horde_sdk.ai_horde_api.apimodels.alchemy._pop.AlchemyPopRequest] | -| /v2/interrogate/status/{id} | DELETE | [AlchemyDeleteRequest][horde_sdk.ai_horde_api.apimodels.alchemy._status.AlchemyDeleteRequest] | -| /v2/interrogate/status/{id} | GET | [AlchemyStatusRequest][horde_sdk.ai_horde_api.apimodels.alchemy._status.AlchemyStatusRequest] | -| /v2/interrogate/submit | POST | [AlchemyJobSubmitRequest][horde_sdk.ai_horde_api.apimodels.alchemy._submit.AlchemyJobSubmitRequest] | -| /v2/kudos/transfer | POST | [KudosTransferRequest][horde_sdk.ai_horde_api.apimodels._kudos.KudosTransferRequest] | -| /v2/stats/img/models | GET | [ImageStatsModelsRequest][horde_sdk.ai_horde_api.apimodels._stats.ImageStatsModelsRequest] | -| /v2/stats/img/totals | GET | [ImageStatsModelsTotalRequest][horde_sdk.ai_horde_api.apimodels._stats.ImageStatsModelsTotalRequest] | -| /v2/stats/text/models | GET | [TextStatsModelsRequest][horde_sdk.ai_horde_api.apimodels._stats.TextStatsModelsRequest] | -| /v2/stats/text/totals | GET | [TextStatsModelsTotalRequest][horde_sdk.ai_horde_api.apimodels._stats.TextStatsModelsTotalRequest] | -| /v2/status/heartbeat | GET | [AIHordeHeartbeatRequest][horde_sdk.ai_horde_api.apimodels._status.AIHordeHeartbeatRequest] | -| /v2/status/models | GET | [HordeStatusModelsAllRequest][horde_sdk.ai_horde_api.apimodels._status.HordeStatusModelsAllRequest] | -| /v2/status/models/{model_name} | GET | [HordeStatusModelsSingleRequest][horde_sdk.ai_horde_api.apimodels._status.HordeStatusModelsSingleRequest] | -| /v2/status/news | GET | [NewsRequest][horde_sdk.ai_horde_api.apimodels._status.NewsRequest] | -| /v2/status/performance | GET | [HordePerformanceRequest][horde_sdk.ai_horde_api.apimodels._status.HordePerformanceRequest] | -| /v2/users | GET | [ListUsersDetailsRequest][horde_sdk.ai_horde_api.apimodels._users.ListUsersDetailsRequest] | -| /v2/users/{user_id} | PUT | [ModifyUserRequest][horde_sdk.ai_horde_api.apimodels._users.ModifyUserRequest] | -| /v2/users/{user_id} | GET | [SingleUserDetailsRequest][horde_sdk.ai_horde_api.apimodels._users.SingleUserDetailsRequest] | -| /v2/workers | GET | [AllWorkersDetailsRequest][horde_sdk.ai_horde_api.apimodels.workers._workers.AllWorkersDetailsRequest] | -| /v2/workers/name/{worker_name} | GET | [SingleWorkerNameDetailsRequest][horde_sdk.ai_horde_api.apimodels.workers._workers.SingleWorkerNameDetailsRequest] | -| /v2/workers/{worker_id} | DELETE | [DeleteWorkerRequest][horde_sdk.ai_horde_api.apimodels.workers._workers.DeleteWorkerRequest] | -| /v2/workers/{worker_id} | PUT | [ModifyWorkerRequest][horde_sdk.ai_horde_api.apimodels.workers._workers.ModifyWorkerRequest] | -| /v2/workers/{worker_id} | GET | [SingleWorkerDetailsRequest][horde_sdk.ai_horde_api.apimodels.workers._workers.SingleWorkerDetailsRequest] | - - -## Responses -| API Endpoint | HTTP Status Code | SDK Response Type | -| ------------ | ----------- | ----------------- | -| /v2/documents/privacy | 200 | [HordeDocument][horde_sdk.ai_horde_api.apimodels._documents.HordeDocument] | -| /v2/documents/sponsors | 200 | [HordeDocument][horde_sdk.ai_horde_api.apimodels._documents.HordeDocument] | -| /v2/documents/terms | 200 | [HordeDocument][horde_sdk.ai_horde_api.apimodels._documents.HordeDocument] | -| /v2/find_user | 200 | [UserDetailsResponse][horde_sdk.ai_horde_api.apimodels._users.UserDetailsResponse] | -| /v2/generate/async | 200 | [ImageGenerateAsyncDryRunResponse][horde_sdk.ai_horde_api.apimodels.generate._async.ImageGenerateAsyncDryRunResponse] | -| /v2/generate/async | 202 | [ImageGenerateAsyncResponse][horde_sdk.ai_horde_api.apimodels.generate._async.ImageGenerateAsyncResponse] | -| /v2/generate/check/{id} | 200 | [ImageGenerateCheckResponse][horde_sdk.ai_horde_api.apimodels.generate._check.ImageGenerateCheckResponse] | -| /v2/generate/pop | 200 | [ImageGenerateJobPopResponse][horde_sdk.ai_horde_api.apimodels.generate._pop.ImageGenerateJobPopResponse] | -| /v2/generate/status/{id} | 200 | [ImageGenerateStatusResponse][horde_sdk.ai_horde_api.apimodels.generate._status.ImageGenerateStatusResponse] | -| /v2/generate/submit | 200 | [JobSubmitResponse][horde_sdk.ai_horde_api.apimodels.base.JobSubmitResponse] | -| /v2/generate/text/async | 200 | [TextGenerateAsyncDryRunResponse][horde_sdk.ai_horde_api.apimodels.generate.text._async.TextGenerateAsyncDryRunResponse] | -| /v2/generate/text/async | 202 | [TextGenerateAsyncResponse][horde_sdk.ai_horde_api.apimodels.generate.text._async.TextGenerateAsyncResponse] | -| /v2/generate/text/pop | 200 | [TextGenerateJobPopResponse][horde_sdk.ai_horde_api.apimodels.generate.text._pop.TextGenerateJobPopResponse] | -| /v2/generate/text/status/{id} | 200 | [TextGenerateStatusResponse][horde_sdk.ai_horde_api.apimodels.generate.text._status.TextGenerateStatusResponse] | -| /v2/generate/text/submit | 200 | [JobSubmitResponse][horde_sdk.ai_horde_api.apimodels.base.JobSubmitResponse] | -| /v2/interrogate/async | 202 | [AlchemyAsyncResponse][horde_sdk.ai_horde_api.apimodels.alchemy._async.AlchemyAsyncResponse] | -| /v2/interrogate/pop | 200 | [AlchemyPopResponse][horde_sdk.ai_horde_api.apimodels.alchemy._pop.AlchemyPopResponse] | -| /v2/interrogate/status/{id} | 200 | [AlchemyStatusResponse][horde_sdk.ai_horde_api.apimodels.alchemy._status.AlchemyStatusResponse] | -| /v2/interrogate/submit | 200 | [AlchemyJobSubmitResponse][horde_sdk.ai_horde_api.apimodels.alchemy._submit.AlchemyJobSubmitResponse] | -| /v2/kudos/transfer | 200 | [KudosTransferResponse][horde_sdk.ai_horde_api.apimodels._kudos.KudosTransferResponse] | -| /v2/stats/img/models | 200 | [ImageStatsModelsResponse][horde_sdk.ai_horde_api.apimodels._stats.ImageStatsModelsResponse] | -| /v2/stats/img/totals | 200 | [ImageStatsModelsTotalResponse][horde_sdk.ai_horde_api.apimodels._stats.ImageStatsModelsTotalResponse] | -| /v2/stats/text/models | 200 | [TextStatsModelResponse][horde_sdk.ai_horde_api.apimodels._stats.TextStatsModelResponse] | -| /v2/stats/text/totals | 200 | [TextStatsModelsTotalResponse][horde_sdk.ai_horde_api.apimodels._stats.TextStatsModelsTotalResponse] | -| /v2/status/heartbeat | 200 | [AIHordeHeartbeatResponse][horde_sdk.ai_horde_api.apimodels._status.AIHordeHeartbeatResponse] | -| /v2/status/models | 200 | [HordeStatusModelsAllResponse][horde_sdk.ai_horde_api.apimodels._status.HordeStatusModelsAllResponse] | -| /v2/status/models/{model_name} | 200 | [HordeStatusModelsSingleResponse][horde_sdk.ai_horde_api.apimodels._status.HordeStatusModelsSingleResponse] | -| /v2/status/news | 200 | [NewsResponse][horde_sdk.ai_horde_api.apimodels._status.NewsResponse] | -| /v2/status/performance | 200 | [HordePerformanceResponse][horde_sdk.ai_horde_api.apimodels._status.HordePerformanceResponse] | -| /v2/users | 200 | [ListUsersDetailsResponse][horde_sdk.ai_horde_api.apimodels._users.ListUsersDetailsResponse] | -| /v2/users/{user_id} | 200 | [UserDetailsResponse][horde_sdk.ai_horde_api.apimodels._users.UserDetailsResponse] | -| /v2/workers | 200 | [AllWorkersDetailsResponse][horde_sdk.ai_horde_api.apimodels.workers._workers.AllWorkersDetailsResponse] | -| /v2/workers/name/{worker_name} | 200 | [SingleWorkerDetailsResponse][horde_sdk.ai_horde_api.apimodels.workers._workers.SingleWorkerDetailsResponse] | -| /v2/workers/{worker_id} | 200 | [SingleWorkerDetailsResponse][horde_sdk.ai_horde_api.apimodels.workers._workers.SingleWorkerDetailsResponse] | diff --git a/docs/api_to_sdk_payload_map.json b/docs/api_to_sdk_payload_map.json deleted file mode 100644 index 2cfd2d4b..00000000 --- a/docs/api_to_sdk_payload_map.json +++ /dev/null @@ -1,104 +0,0 @@ -{ - "/v2/documents/privacy": { - "GET": "horde_sdk.ai_horde_api.apimodels._documents.AIHordeGetPrivacyPolicyRequest" - }, - "/v2/documents/sponsors": { - "GET": "horde_sdk.ai_horde_api.apimodels._documents.AIHordeGetSponsorsRequest" - }, - "/v2/documents/terms": { - "GET": "horde_sdk.ai_horde_api.apimodels._documents.AIHordeGetTermsRequest" - }, - "/v2/status/heartbeat": { - "GET": "horde_sdk.ai_horde_api.apimodels._status.AIHordeHeartbeatRequest" - }, - "/v2/interrogate/async": { - "POST": "horde_sdk.ai_horde_api.apimodels.alchemy._async.AlchemyAsyncRequest" - }, - "/v2/interrogate/status/{id}": { - "DELETE": "horde_sdk.ai_horde_api.apimodels.alchemy._status.AlchemyDeleteRequest", - "GET": "horde_sdk.ai_horde_api.apimodels.alchemy._status.AlchemyStatusRequest" - }, - "/v2/interrogate/submit": { - "POST": "horde_sdk.ai_horde_api.apimodels.alchemy._submit.AlchemyJobSubmitRequest" - }, - "/v2/interrogate/pop": { - "POST": "horde_sdk.ai_horde_api.apimodels.alchemy._pop.AlchemyPopRequest" - }, - "/v2/workers": { - "GET": "horde_sdk.ai_horde_api.apimodels.workers._workers.AllWorkersDetailsRequest" - }, - "/v2/generate/status/{id}": { - "DELETE": "horde_sdk.ai_horde_api.apimodels.generate._status.DeleteImageGenerateRequest", - "GET": "horde_sdk.ai_horde_api.apimodels.generate._status.ImageGenerateStatusRequest" - }, - "/v2/generate/text/status/{id}": { - "DELETE": "horde_sdk.ai_horde_api.apimodels.generate.text._status.DeleteTextGenerateRequest", - "GET": "horde_sdk.ai_horde_api.apimodels.generate.text._status.TextGenerateStatusRequest" - }, - "/v2/workers/{worker_id}": { - "DELETE": "horde_sdk.ai_horde_api.apimodels.workers._workers.DeleteWorkerRequest", - "PUT": "horde_sdk.ai_horde_api.apimodels.workers._workers.ModifyWorkerRequest", - "GET": "horde_sdk.ai_horde_api.apimodels.workers._workers.SingleWorkerDetailsRequest" - }, - "/v2/find_user": { - "GET": "horde_sdk.ai_horde_api.apimodels._find_user.FindUserRequest" - }, - "/v2/status/performance": { - "GET": "horde_sdk.ai_horde_api.apimodels._status.HordePerformanceRequest" - }, - "/v2/status/models": { - "GET": "horde_sdk.ai_horde_api.apimodels._status.HordeStatusModelsAllRequest" - }, - "/v2/status/models/{model_name}": { - "GET": "horde_sdk.ai_horde_api.apimodels._status.HordeStatusModelsSingleRequest" - }, - "/v2/generate/async": { - "POST": "horde_sdk.ai_horde_api.apimodels.generate._async.ImageGenerateAsyncRequest" - }, - "/v2/generate/check/{id}": { - "GET": "horde_sdk.ai_horde_api.apimodels.generate._check.ImageGenerateCheckRequest" - }, - "/v2/generate/pop": { - "POST": "horde_sdk.ai_horde_api.apimodels.generate._pop.ImageGenerateJobPopRequest" - }, - "/v2/generate/submit": { - "POST": "horde_sdk.ai_horde_api.apimodels.generate._submit.ImageGenerationJobSubmitRequest" - }, - "/v2/stats/img/models": { - "GET": "horde_sdk.ai_horde_api.apimodels._stats.ImageStatsModelsRequest" - }, - "/v2/stats/img/totals": { - "GET": "horde_sdk.ai_horde_api.apimodels._stats.ImageStatsModelsTotalRequest" - }, - "/v2/kudos/transfer": { - "POST": "horde_sdk.ai_horde_api.apimodels._kudos.KudosTransferRequest" - }, - "/v2/users": { - "GET": "horde_sdk.ai_horde_api.apimodels._users.ListUsersDetailsRequest" - }, - "/v2/users/{user_id}": { - "PUT": "horde_sdk.ai_horde_api.apimodels._users.ModifyUserRequest", - "GET": "horde_sdk.ai_horde_api.apimodels._users.SingleUserDetailsRequest" - }, - "/v2/status/news": { - "GET": "horde_sdk.ai_horde_api.apimodels._status.NewsRequest" - }, - "/v2/workers/name/{worker_name}": { - "GET": "horde_sdk.ai_horde_api.apimodels.workers._workers.SingleWorkerNameDetailsRequest" - }, - "/v2/generate/text/async": { - "POST": "horde_sdk.ai_horde_api.apimodels.generate.text._async.TextGenerateAsyncRequest" - }, - "/v2/generate/text/pop": { - "POST": "horde_sdk.ai_horde_api.apimodels.generate.text._pop.TextGenerateJobPopRequest" - }, - "/v2/generate/text/submit": { - "POST": "horde_sdk.ai_horde_api.apimodels.generate.text._submit.TextGenerationJobSubmitRequest" - }, - "/v2/stats/text/models": { - "GET": "horde_sdk.ai_horde_api.apimodels._stats.TextStatsModelsRequest" - }, - "/v2/stats/text/totals": { - "GET": "horde_sdk.ai_horde_api.apimodels._stats.TextStatsModelsTotalRequest" - } -} diff --git a/docs/api_to_sdk_response_map.json b/docs/api_to_sdk_response_map.json deleted file mode 100644 index 74b0ac58..00000000 --- a/docs/api_to_sdk_response_map.json +++ /dev/null @@ -1,100 +0,0 @@ -{ - "/v2/documents/privacy": { - "200": "horde_sdk.ai_horde_api.apimodels._documents.HordeDocument" - }, - "/v2/documents/sponsors": { - "200": "horde_sdk.ai_horde_api.apimodels._documents.HordeDocument" - }, - "/v2/documents/terms": { - "200": "horde_sdk.ai_horde_api.apimodels._documents.HordeDocument" - }, - "/v2/status/heartbeat": { - "200": "horde_sdk.ai_horde_api.apimodels._status.AIHordeHeartbeatResponse" - }, - "/v2/interrogate/async": { - "202": "horde_sdk.ai_horde_api.apimodels.alchemy._async.AlchemyAsyncResponse" - }, - "/v2/interrogate/status/{id}": { - "200": "horde_sdk.ai_horde_api.apimodels.alchemy._status.AlchemyStatusResponse" - }, - "/v2/interrogate/submit": { - "200": "horde_sdk.ai_horde_api.apimodels.alchemy._submit.AlchemyJobSubmitResponse" - }, - "/v2/interrogate/pop": { - "200": "horde_sdk.ai_horde_api.apimodels.alchemy._pop.AlchemyPopResponse" - }, - "/v2/workers": { - "200": "horde_sdk.ai_horde_api.apimodels.workers._workers.AllWorkersDetailsResponse" - }, - "/v2/generate/status/{id}": { - "200": "horde_sdk.ai_horde_api.apimodels.generate._status.ImageGenerateStatusResponse" - }, - "/v2/generate/text/status/{id}": { - "200": "horde_sdk.ai_horde_api.apimodels.generate.text._status.TextGenerateStatusResponse" - }, - "/v2/workers/{worker_id}": { - "200": "horde_sdk.ai_horde_api.apimodels.workers._workers.SingleWorkerDetailsResponse" - }, - "/v2/find_user": { - "200": "horde_sdk.ai_horde_api.apimodels._users.UserDetailsResponse" - }, - "/v2/status/performance": { - "200": "horde_sdk.ai_horde_api.apimodels._status.HordePerformanceResponse" - }, - "/v2/status/models": { - "200": "horde_sdk.ai_horde_api.apimodels._status.HordeStatusModelsAllResponse" - }, - "/v2/status/models/{model_name}": { - "200": "horde_sdk.ai_horde_api.apimodels._status.HordeStatusModelsSingleResponse" - }, - "/v2/generate/async": { - "200": "horde_sdk.ai_horde_api.apimodels.generate._async.ImageGenerateAsyncDryRunResponse", - "202": "horde_sdk.ai_horde_api.apimodels.generate._async.ImageGenerateAsyncResponse" - }, - "/v2/generate/check/{id}": { - "200": "horde_sdk.ai_horde_api.apimodels.generate._check.ImageGenerateCheckResponse" - }, - "/v2/generate/pop": { - "200": "horde_sdk.ai_horde_api.apimodels.generate._pop.ImageGenerateJobPopResponse" - }, - "/v2/generate/submit": { - "200": "horde_sdk.ai_horde_api.apimodels.base.JobSubmitResponse" - }, - "/v2/stats/img/models": { - "200": "horde_sdk.ai_horde_api.apimodels._stats.ImageStatsModelsResponse" - }, - "/v2/stats/img/totals": { - "200": "horde_sdk.ai_horde_api.apimodels._stats.ImageStatsModelsTotalResponse" - }, - "/v2/kudos/transfer": { - "200": "horde_sdk.ai_horde_api.apimodels._kudos.KudosTransferResponse" - }, - "/v2/users": { - "200": "horde_sdk.ai_horde_api.apimodels._users.ListUsersDetailsResponse" - }, - "/v2/users/{user_id}": { - "200": "horde_sdk.ai_horde_api.apimodels._users.UserDetailsResponse" - }, - "/v2/status/news": { - "200": "horde_sdk.ai_horde_api.apimodels._status.NewsResponse" - }, - "/v2/workers/name/{worker_name}": { - "200": "horde_sdk.ai_horde_api.apimodels.workers._workers.SingleWorkerDetailsResponse" - }, - "/v2/generate/text/async": { - "200": "horde_sdk.ai_horde_api.apimodels.generate.text._async.TextGenerateAsyncDryRunResponse", - "202": "horde_sdk.ai_horde_api.apimodels.generate.text._async.TextGenerateAsyncResponse" - }, - "/v2/generate/text/pop": { - "200": "horde_sdk.ai_horde_api.apimodels.generate.text._pop.TextGenerateJobPopResponse" - }, - "/v2/generate/text/submit": { - "200": "horde_sdk.ai_horde_api.apimodels.base.JobSubmitResponse" - }, - "/v2/stats/text/models": { - "200": "horde_sdk.ai_horde_api.apimodels._stats.TextStatsModelResponse" - }, - "/v2/stats/text/totals": { - "200": "horde_sdk.ai_horde_api.apimodels._stats.TextStatsModelsTotalResponse" - } -} diff --git a/docs/build_docs.py b/docs/build_docs.py index 406b6838..2df3736c 100644 --- a/docs/build_docs.py +++ b/docs/build_docs.py @@ -81,14 +81,14 @@ def api_to_sdk_map_create_markdown() -> None: api_to_sdk_payload_map: dict[str, dict[str, str]] = {} api_to_sdk_response_map: dict[str, dict[str, str]] = {} - with open("docs/api_to_sdk_payload_map.json") as f: + with open("docs/ai-horde/api_to_sdk_payload_map.json") as f: api_to_sdk_payload_map = json.load(f) - with open("docs/api_to_sdk_response_map.json") as f: + with open("docs/ai-horde/api_to_sdk_response_map.json") as f: api_to_sdk_response_map = json.load(f) # Write the mapping page with the API Endpoint sorted alphabetically - with open("docs/api_to_sdk_map.md", "w") as f: + with open("docs/ai-horde/api_to_sdk_map.md", "w") as f: f.write("# AI-Horde API Model to SDK Class Map\n") f.write("This is a mapping of the AI-Horde API models (defined at ") f.write("[https://stablehorde.net/api/](https://stablehorde.net/api/), see also ") diff --git a/docs/concepts/backend_values_mappers.md b/docs/concepts/backend_values_mappers.md new file mode 100644 index 00000000..d1ef6b76 --- /dev/null +++ b/docs/concepts/backend_values_mappers.md @@ -0,0 +1,206 @@ +# Design Document: Implementing a `BackendValuesMapper` + +## Overview + +The `BackendValuesMapper` is an abstract base class designed to map values between backend representations and SDK representations. This is useful when the backend uses different naming conventions or representations for the same concepts as the SDK. The `ImageBackendValuesMapper` is a concrete implementation of this concept for image-related backends, and it serves as a template for implementing other `BackendValuesMapper` subclasses for image generation backends. + +Mappers are used to automatically provide the correct values to backends when using SDK parameter sets. + +This document outlines the steps and considerations for implementing a new `BackendValuesMapper` for a specific backend type using the existing `ImageBackendValuesMapper` as a reference, but you should adapt your implementation to the specific needs of your backend. + +--- + +## Key Classes and Concepts + +### 1. **Base Class: `BackendValuesMapper`** + +- Abstract base class that provides the foundation for all backend mappers. +- Generic over the type of parameter set it maps (`SDKParameterSetTypeVar`). + +### 2. **Abstract Image Class: `ImageBackendValuesMapper`** + +- Extends `BackendValuesMapper` and specializes it for image-related backends. +- Generic over three types: + - `BackendSamplersTypeVar`: Enum for backend samplers. + - `BackendSchedulersTypeVar`: Enum for backend schedulers. + - `BackendControlnetsTypeVar`: Enum for backend controlnets. + +### 3. **Mapping Logic** + +- Maps backend values to SDK values and vice versa using dictionaries. +- Provides validation methods to check if a value is valid for a given type. + +--- + +## Steps to Implement a New `BackendValuesMapper` + +### Step 1: Define the Backend-Specific Enums + +Create enums for the backend-specific representations of the values you want to map. These enums should inherit from `StrEnum` to ensure compatibility with the existing mapping logic. + +```python +from enum import auto() +from strenum import StrEnum + +class KNOWN_EXAMPLE_BACKEND_SAMPLERS(StrEnum): + sampler_a = auto() + sampler_b = "sampler_b_name_changed" # Keep enum field the same and update the value as a string if the backend changes + +class KNOWN_EXAMPLE_BACKEND_SCHEDULERS(StrEnum): + scheduler_x = auto() + scheduler_y = auto() + +class KNOWN_EXAMPLE_BACKEND_CONTROLNETS(StrEnum): + controlnet_1 = auto() + controlnet_2 = auto() +``` + +If your backend uses the SDK values (and they map 1:1), you should define the enums in the following way: + +```python +from horde_sdk.generation_parameters.image.consts import KNOWN_IMAGE_SAMPLERS, KNOWN_IMAGE_SCHEDULERS, KNOWN_IMAGE_CONTROLNETS + +KNOWN_EXAMPLE_BACKEND_SAMPLERS = KNOWN_IMAGE_SAMPLERS +KNOWN_EXAMPLE_BACKEND_SCHEDULERS = KNOWN_IMAGE_SCHEDULERS +KNOWN_EXAMPLE_BACKEND_CONTROLNETS = KNOWN_IMAGE_CONTROLNETS +``` + +... effectively aliasing the SDK enums. This allows later changes to the SDK enums to be reflected while keeping the possibility of having backend-specific values later on. + +--- + +### Step 2: Implement the Concrete Mapper Class + +Create a concrete implementation of `BackendValuesMapper`. This class should inherit from `BackendValuesMapper` and specialize it for the backend-specific enums. + +Be sure to define dictionaries to map backend values to SDK values and vice versa, preferable as members of your implementing class. These mappings should be comprehensive and account for all known values. + +> Note: The use of `ClassVar` is required if you make the mapping dictionaries class members. You can alternatively define them as instance members in the `__init__` method, though that would be marginally less efficient + +```python +from typing_extensions import ClassVar +from horde_sdk.backend_parsing.object_models import ImageBackendValuesMapper + +class ExampleBackendValuesMapper( + ImageBackendValuesMapper[ + KNOWN_EXAMPLE_BACKEND_SAMPLERS, + KNOWN_EXAMPLE_BACKEND_SCHEDULERS, + KNOWN_EXAMPLE_BACKEND_CONTROLNETS, + ], +): + """Mapper for Custom Backend values.""" + + _EXAMPLE_BACKEND_SAMPLERS_CONVERT_MAP: ClassVar[dict[KNOWN_EXAMPLE_BACKEND_SAMPLERS | str, KNOWN_IMAGE_SAMPLERS]] = { + KNOWN_EXAMPLE_BACKEND_SAMPLERS.sampler_a: KNOWN_IMAGE_SAMPLERS.k_euler, + KNOWN_EXAMPLE_BACKEND_SAMPLERS.sampler_b: KNOWN_IMAGE_SAMPLERS.k_lms, + } + _EXAMPLE_BACKEND_SCHEDULERS_CONVERT_MAP: ClassVar[dict[KNOWN_EXAMPLE_BACKEND_SCHEDULERS | str, KNOWN_IMAGE_SCHEDULERS]] = { + KNOWN_EXAMPLE_BACKEND_SCHEDULERS.scheduler_x: KNOWN_IMAGE_SCHEDULERS.k_dpm_2, + KNOWN_EXAMPLE_BACKEND_SCHEDULERS.scheduler_y: KNOWN_IMAGE_SCHEDULERS.k_dpmpp_sde, + } + _EXAMPLE_BACKEND_CONTROLNETS_CONVERT_MAP: ClassVar[dict[KNOWN_EXAMPLE_BACKEND_CONTROLNETS | str, KNOWN_IMAGE_CONTROLNETS]] = { + KNOWN_EXAMPLE_BACKEND_CONTROLNETS.controlnet_1: KNOWN_IMAGE_CONTROLNETS.canny, + KNOWN_EXAMPLE_BACKEND_CONTROLNETS.controlnet_2: KNOWN_IMAGE_CONTROLNETS.depth, + } + + def __init__(self) -> None: + super().__init__( + backend_samplers_type=KNOWN_EXAMPLE_BACKEND_SAMPLERS, + backend_schedulers_type=KNOWN_EXAMPLE_BACKEND_SCHEDULERS, + backend_controlnets_type=KNOWN_EXAMPLE_BACKEND_CONTROLNETS, + sdk_samplers_map=self._EXAMPLE_BACKEND_SAMPLERS_CONVERT_MAP, + sdk_schedulers_map=self._EXAMPLE_BACKEND_SCHEDULERS_CONVERT_MAP, + sdk_controlnets_map=self._EXAMPLE_BACKEND_CONTROLNETS_CONVERT_MAP, + ) +``` + +--- + +### Step 3: Add Mapping Methods (if not already provided) + +If additional mapping logic is required, you should implement the necessary methods in your concrete mapper class. This may include methods for validating values, converting between backend and SDK representations, and any other specific logic required by your backend. + +`ImageBackendValuesMapper` already provides image generation-specific mappings, but you may need to implement additional methods for your backend. + +```python +def map_to_sdk_sampler(self, backend_sampler: KNOWN_EXAMPLE_BACKEND_SAMPLERS) -> KNOWN_IMAGE_SAMPLERS: + """Maps a backend sampler to an SDK sampler.""" + ... + +def map_to_example_backend_sampler(self, sdk_sampler: KNOWN_IMAGE_SAMPLERS) -> KNOWN_EXAMPLE_BACKEND_SAMPLERS: + """Maps an SDK sampler to a backend sampler.""" + ... + +... +``` + +#### Understanding `_map_value` and `_is_valid_value` + +The `BackendValuesMapper` base class provides two utility methods, `_map_value` and `_is_valid_value`, which can be leveraged when implementing your custom mapper. These functions help work the the `StrEnum` based types for the backend and SDK values. + +- **`_map_value`**: + - This method handles the conversion of a value from one representation to another using a mapping dictionary. + - It supports both `StrEnum` and `str` types for input and output, ensuring flexibility in mapping. + - If the mapping dictionary is empty, it defaults to converting the value based on the target type's members. + - It is a generic function and provides static (and runtime, where appropriate) type checking for the input and output types. + - Example usage: + + ```python + def map_to_sdk_sampler(self, backend_sampler: BackendSamplersTypeVar | str) -> KNOWN_IMAGE_SAMPLERS: + return self._map_value( + value=backend_sampler, + mapping=self._to_sdk_sampler_map, + known_input_type=self._backend_samplers_type, + known_target_type=KNOWN_IMAGE_SAMPLERS, + ) + ``` + +- **`_is_valid_value`**: + - This method checks if a given value is valid for a specific `StrEnum` type. + - It validates both `StrEnum` and `str` inputs by checking against the enum's members and their values. + - Example usage: + + ```python + def is_valid_backend_sampler(self, backend_sampler: BackendSamplersTypeVar | str) -> bool: + return self._is_valid_value( + value=backend_sampler, + known_type=self._backend_samplers_type, + ) + ``` + +See the `ImageBackendValuesMapper` class for a complete example of how these methods are used. + +--- + +### Step 4: Write Unit Tests + +Ensure the mapper works as expected by writing unit tests for all mapping methods. + +```python +def test_map_to_sdk_sampler(): + mapper = CustomBackendValuesMapper() + assert mapper.map_to_sdk_sampler(KNOWN_EXAMPLE_BACKEND_SAMPLERS.sampler_a) == KNOWN_IMAGE_SAMPLERS.k_euler + +def test_map_to_EXAMPLE_BACKEND_sampler(): + mapper = CustomBackendValuesMapper() + assert mapper.map_to_EXAMPLE_BACKEND_sampler(KNOWN_IMAGE_SAMPLERS.k_euler) == KNOWN_EXAMPLE_BACKEND_SAMPLERS.sampler_a +``` + +--- + +## Example Usage + +```python +mapper = CustomBackendValuesMapper() + +# Map backend sampler to SDK sampler +sdk_sampler = mapper.map_to_sdk_sampler(KNOWN_EXAMPLE_BACKEND_SAMPLERS.sampler_a) + +# Map SDK sampler to backend sampler +backend_sampler = mapper.map_to_EXAMPLE_BACKEND_sampler(KNOWN_IMAGE_SAMPLERS.k_euler) + +# Validate backend sampler +is_valid = mapper.is_valid_EXAMPLE_BACKEND_sampler("sampler_a") +``` + +--- diff --git a/docs/concepts/client_design.md b/docs/concepts/client_design.md new file mode 100644 index 00000000..24cfdeef --- /dev/null +++ b/docs/concepts/client_design.md @@ -0,0 +1,60 @@ +# horde_sdk API Client design concepts + +## Generic API Client Class Hierarchy + +```mermaid + +classDiagram + class BaseHordeAPIClient { + <> + } + + class GenericHordeAPIManualClient + + class GenericAsyncHordeAPIManualClient + class GenericHordeAPISession + + class GenericAsyncHordeAPISession + + BaseHordeAPIClient <|-- GenericHordeAPIManualClient + BaseHordeAPIClient <|-- GenericAsyncHordeAPIManualClient + GenericHordeAPIManualClient <|-- GenericHordeAPISession + GenericAsyncHordeAPIManualClient <|-- GenericAsyncHordeAPISession + +``` + +## AI Horde API Client Class Hierarchy + +```mermaid +classDiagram + class GenericHordeAPIManualClient + class GenericAsyncHordeAPIManualClient + class GenericHordeAPISession + class GenericAsyncHordeAPISession + + class BaseAIHordeClient + class BaseAIHordeSimpleClient + + class AIHordeAPIManualClient + class AIHordeAPIAsyncManualClient + + class AIHordeAPIClientSession + class AIHordeAPIAsyncClientSession + + class AIHordeAPISimpleClient + class AIHordeAPIAsyncSimpleClient + + GenericHordeAPIManualClient <|-- AIHordeAPIManualClient + BaseAIHordeClient <|-- AIHordeAPIManualClient + + GenericAsyncHordeAPIManualClient <|-- AIHordeAPIAsyncManualClient + BaseAIHordeClient <|-- AIHordeAPIAsyncManualClient + + GenericHordeAPISession <|-- AIHordeAPIClientSession + GenericAsyncHordeAPISession <|-- AIHordeAPIAsyncClientSession + + BaseAIHordeSimpleClient <|-- AIHordeAPISimpleClient + BaseAIHordeSimpleClient <|-- AIHordeAPIAsyncSimpleClient + + AIHordeAPIAsyncSimpleClient o-- AIHordeAPIAsyncClientSession : _horde_client_session +``` diff --git a/docs/concepts/package_structure.md b/docs/concepts/package_structure.md new file mode 100644 index 00000000..cbd55fef --- /dev/null +++ b/docs/concepts/package_structure.md @@ -0,0 +1,155 @@ +# Package Structure + +```mermaid + graph TD + generation_parameters + generic_api + deploy_config --> generation_parameters + ai_horde_api --> generation_parameters + ai_horde_api --> generic_api + worker --> ai_horde_api + worker --> generic_api + worker --> generation_parameters + worker --> deploy_config +``` + +## API Support Packages + +```mermaid + graph TD + generic_api + generation_parameters + + + ai_horde_api + + generic_api --> ai_horde_api + generation_parameters --> ai_horde_api + + ai_horde_api --> apimodels + ai_horde_api --> clients + ai_horde_api --> consts + ai_horde_api --> exceptions + ai_horde_api --> endpoints + ai_horde_api --> metadata + ai_horde_api --> utils +``` + +API support packages do the following: + +- Implement one or more client types from `horde_sdk.generic_api.generic_clients`. + - This requires extending, as needed, the following classes from `horde_sdk.generic_api.metadata`: + - `GenericHeaderFields` + - `GenericAcceptTypes` + - `GenericPathFields` + - `GenericQueryFields` + - Adding values to these fields implies that python objects with fields of the same name are *always* passed to the API this way. + - e.g., adding a field named `api_key` to `GenericHeaderFields` implies that any request using that client with a field named `api_key` in its definition will always be passed as a header field named `api_key`. + - These are passed to client class constructors so the underlying shared client logic can handle the specific fields and headers for the API. + - `GENERIC_API_ENDPOINT_SUBPATH` from `horde_sdk.generic_api.endpoints` is used to determine url paths for the API. It must be extended to include all addressable endpoints for that API. See `horde_sdk.ai_horde_api.endpoints` for an example. +- API models for requests (including payloads and parameters) and responses +- Constants for API endpoints and other configurations + - Some examples include the valid values for parameters which accept only certain strings. + - This also includes consts for default timeout values, default anonymous API keys, etc. + +```bash +ai_horde_api/ +├── apimodels/ +├──── __init__.py +├──── ... (other API model files) +├── __init__.py +├── ai_horde_clients.py +├── consts.py +├── exceptions.py +├── endpoints.py +├── metadata.py +├── utils.py +├── ... (other files) +``` + +By convention, the following files within an api support package should always contain certain types of content: + +- `__init__.py` (at the root of the api support package) + - Must import **all** client classes, exceptions and members of the endpoint module. + - If there are few API models and that is reasonably expected to always be the case, they may also be imported here. +- `apimodels/` or `apimodels.py` + - Contains API models for requests and responses, as well as any other data structures used in that API's interactions. + - Contrast this with the classes found in the `horde_sdk.generation_parameters` module which contain data structures that may be used outside the context of a specific API (such as locally using a backend). + - `apimodels/__init__.py` or `apimodels.py` + - Must import **all** API models from the `apimodels/` and must be included in the `__all__` module variable. +- `*_clients.py` or `clients/` + - Contains client classes that implement the API interactions using the models defined in `apimodels/`. + - Must import **all** client classes from the `clients/` and must be included in the `__all__` module variable. +- `consts.py` + - Contains constants related to the API, such as endpoint URLs, default values, and other configuration settings. + - Must import **all** constants from the `consts.py` and must be included in the `__all__` module variable. +- `exceptions.py` + - Contains custom exceptions related to the API interactions. + - Must import **all** exceptions from the `exceptions.py` and must be included in the `__all__` module variable. +- `endpoints.py` + - Contains the endpoint subpaths for the API, which are used to construct full URLs for API requests. +- `metadata.py` + - Contains metadata classes that define the header fields, query parameters, and other metadata used in API requests. +- `utils.py` + - Contains utility functions related to the API interactions, such as helper functions for constructing requests or processing responses. + - Consider adding a `utils/` directory if there are numerous utility functions. + - Also consider adding generic (non-api-specific) utility functions to `horde_sdk.generic_api.utils` if they are applicable to multiple APIs or `horde_sdk.utils` if they have broad applicability. + +## Generation Parameters + +```mermaid + graph TD + generation_parameters + deploy_config --> generation_parameters + ai_horde_api --> generation_parameters + worker --> ai_horde_api + worker --> generation_parameters +``` + +- `generation_parameters/__init__.py` + - Must import **all** generation parameter classes as well as related constants/enums and must be included in the `__all__` module variable. +- `generation_parameters/generic/` + - Contains the generic base class `GenerationFeatureFlags` which serves as the base class for all generation feature flags. +- `generation_parameters/{generation_type}/__init__.py` + - Must import **all** generation parameter classes for that specific generation type as well as related constants/enums and must be included in the `__all__` module variable. +- `generation_parameters/{generation_type}/object_models.py` or `generation_parameters/{generation_type}/object_models/*` + - Contains data structures representing parameters for generation that are not specific to any one API. + - All top level classes (and classes which they contain) should always inherit from an appropriate base class in `horde_sdk.generation_parameters.generic.object_models`. + - There should always be at least one concrete `ComposedParameterSetBase` and at least one corresponding concrete `GenerationFeatureFlags`. +- `generation_parameters/{generation_type}/consts.py` + - Contains constants related to the generation parameters for that specific generation type, such as default values, valid options, and other configuration settings. + - Consider if these constants are applicable to multiple generation types and if so, place them in `horde_sdk.generation_parameters.generic.consts.py` instead. + +## Worker + +```mermaid + graph TD + worker + deploy_config --> worker + ai_horde_api --> worker + generic_api --> worker + generation_parameters --> worker +``` + +- `worker/__init__.py` + - Must import **all** generic and concrete generation classes + - Must import **all** generic and concrete job classes +- `worker/consts.py` + - Contains constants specific to workers, generations, or jobs. This includes default values, valid options, and other configuration settings. + - Consider if these constants are applicable to the API logic and if so, place them in `horde_sdk.generic_api.consts.py`. Also consider if they are broadly applicable and place them in `horde_sdk.consts.py` in that case. +- `worker/feature_flags.py` + - Contains the generic class `WorkerFeatureFlags` and concrete implementations for each supported worker type. + - This class is used to determine if given `GenerationFeatureFlags` are supported by the worker. +- `worker/dispatch/` + - Logic for interacting with worker APIs or other dispatch systems. + - This may include logic for dispatching jobs to workers, converting remote API response to SDK objects, and other related tasks. +- `worker/generations_base.py` + - Contains the base class for all generation types, `HordeSingleGeneration` + - This class is a state machine for the generation process and *reflects* the state of a generation rather than *managing* the state of a generation. + - However, it does enforce certain constraints on state transitions and other generation-level validations. +- `worker/generations.py` + - Contains the concrete implementations of `HordeSingleGeneration` for each supported generation type. + - These classes should generally be limited in terms of their own logic and when appropriate, delegate to the `HordeSingleGeneration` class's methods or implementations. +- `worker/jobs_base.py` + - Contains the base class for all job types, `HordeWorkerJob` + - `HordeWorkerJob` wraps `HordeSingleGeneration` diff --git a/docs/concepts/style_guide.md b/docs/concepts/style_guide.md new file mode 100644 index 00000000..a7aed285 --- /dev/null +++ b/docs/concepts/style_guide.md @@ -0,0 +1,509 @@ + +# Code Philosophy and Design Constraints + +## Table of Contents + +- [Code Philosophy and Design Constraints](#code-philosophy-and-design-constraints) + - [Table of Contents](#table-of-contents) + - [Too long; didn't read](#too-long-didnt-read) + - [General Principles](#general-principles) + - [Naming Conventions](#naming-conventions) + - [Module and Package Naming](#module-and-package-naming) + - [Variable, Function, Method and Class Naming](#variable-function-method-and-class-naming) + - [Class Prefixes, Suffixes, and other Naming Conventions](#class-prefixes-suffixes-and-other-naming-conventions) + - [General](#general) + - [API/Client Specific](#apiclient-specific) + - [Generation/Inference Specific](#generationinference-specific) + - [Error Handling](#error-handling) + - [Documentation](#documentation) + - [API Model Specific Documentation](#api-model-specific-documentation) + - [Function and Method Signatures](#function-and-method-signatures) + - [Object-Oriented Design](#object-oriented-design) + - [Method Overloading and Return Types](#method-overloading-and-return-types) + - [Control Flow and Readability](#control-flow-and-readability) + - [Data Structures, Models, and Constants](#data-structures-models-and-constants) + - ["KNOWN" Constants](#known-constants) + - [Imports and Module Export](#imports-and-module-export) + - [Pydantic BaseModel Usage](#pydantic-basemodel-usage) + - [API Model Verification](#api-model-verification) + +## Too long; didn't read + +If this is your first time contributing, consider this a document to intermittently reference as you work on the codebase rather than a document to memorize. Many of the guidelines are enforced by linting or testing tools, and many of the other rules can be followed by matching the patterns already present in the codebase. + +In brief: + +- Descriptive, unambiguous naming is required; avoid abbreviations and acronyms unless they are widely understood. +- Never silently handle exceptions; always log or re-raise, and avoid blanket or bare excepts. +- All public APIs must have Google-style docstrings. +- Readability first: Prefer guard clauses, clear control flow, meaningfully named boolean expressions and avoid deeply nested structures. +- Type hints are mandatory for all public functions, methods, class attributes, and module-level variables. +- Code should be written in a way that can be statically analyzed and linted. + - Avoid (whenever possible) magic strings/numbers and direct dictionary access by key literals. + - Prefer classes over dictionaries/tuples for data structures. + - Use `Enum`/`StrEnum` for fixed sets of values. + +The principals described in this document are principled and highly opinionated. They exist for the purpose of consistency and strive to improve maintainability. These standards do not purport to be the "best" way and they are not an attack on alternative approaches. Change proposals are welcome to this document if you feel that something is overly restrictive, missing, or could be improved. + +## General Principles + +- [PEP 20](https://peps.python.org/pep-0020/) should guide code design and implementation. + - In the case not explicitly covered by these guidelines, the spirit of the principles outlined in PEP 20 should be followed. +- **All** function arguments and return values, class attributes and fields, and module-level variables must be type hinted. + - See the [mypy type hint cheat sheet](https://mypy.readthedocs.io/en/stable/cheat_sheet_py3.html) for a primer. + - Local variables generally only need a type hint if mypy reports an error or warning about them, but it should be considered good practice to type hint them as well. + - Use Python 3.10+ union types (e.g., `int | str`) instead of `typing.Union[int, str]` and use `| None` for optional types (e.g., `int | None`). + +## Naming Conventions + +### Module and Package Naming + +- Modules and packages should be named using lower snake_case, and should always include an underscore between each significant word or abbreviation. + - Example module names: + - `ai_horde_api` + - `generic_api` + - `generation_parameters` +- Modules or packages must never be named using a python builtin library name nor should it use the name of a popular third-party library. + - Example **banned** module names: + - `logging` + - `json` + - `requests` + +### Variable, Function, Method and Class Naming + +- snake_case must be used for variables, fields, functions, and methods. + - An underscore must appear between each significant word or abbreviation. +- CamelCase should be used for classes. +- ALL_CAPS_WITH_UNDERSCORES should be used for constants. +- Private variables and methods should be prefixed with a single underscore (e.g., `_private_variable`). +- **Names must be descriptive** + - This is a firm requirement. Names should be descriptive enough that there is very little room for ambiguity, especially in context or with minimal explanation. This is not always possible to get completely right, but it must be clear that the developer made an effort to do so. + - One/two/three letter variable names must be avoided unless: + - They are in a very small scope (e.g., `i` for a loop) + - In a mathematical context (e.g., `x`, `y`, `z`) where the variables have no significant meaning or are known by convention (the constant `e` for Euler's number). + - The variable is named this way in an external module or library and the name is being preserved for consistency and clarity. + - Avoid abbreviations unless they are **widely** understood (e.g., `url`, `api`, `id`, etc.) **and** significantly improve readability + - For example `img`, `num`, `cnt`, `val`, are not considerably shorter. Writing these out fully improves readability in these cases. + - "Widely" does not mean "common in some big codebase" or "common in a narrow field". It does mean "common enough that most developers will understand it without needing to look it up". + - Some acceptable abbreviations include `id`, `db`, `param`, `anon`, and `obj`. + - Acronyms similarly should be avoided unless they are widely understood and significantly improve readability. + - For example, `HTTP`, `URL`, `API`, `JSON`, `XML`, `HTML` are acceptable acronyms. + - Avoid using acronyms that are specific to a particular domain, field, or python library. + - Acronyms of variables or concepts that appear in the codebase are also not acceptable. + + ```python + # Bad + hr = HordeRequest(...) + + # Good + horde_request = HordeRequest(...) + ``` + + - This avoids ambiguity (consider if the above function also included a `HordeResponse`) and improves readability. + +### Class Prefixes, Suffixes, and other Naming Conventions + +Many classes contain standardized prefixes, suffixes, or identifiers within their names to indicate their purpose or behavior. These conventions help developers quickly understand the role of a class within the SDK. + +#### General + +- **Generic** (Always a Prefix): Refers to a class that is not specific to a particular API implementation. It serves as a base or abstract class that can be extended for different API clients or may be general-purpose. + - Examples: `GenericHordeAPIManualClient`, `GenericAsyncHordeAPIManualClient` +- **Base** (Always a Prefix): Indicates a foundational class that provides core functionality and may be abstract or partially implemented. It is intended to be extended by more specific classes. + - Examples: `BaseAIHordeClient`, `BaseAIHordeSimpleClient` + +#### API/Client Specific + +- **Manual**: Refers to classes that do not have context management or automatic session handling. These classes require the user to clean up server resources manually. + - Examples: `AIHordeAPIManualClient`, `GenericHordeAPIManualClient` +- **Async**: Refers to classes that support asynchronous operations, allowing for non-blocking API calls and improved performance in concurrent environments. Generally, these classes will use `async` method definitions but may support synchronous operations as well. + - Examples: `AIHordeAPIAsyncManualClient`, `GenericAsyncHordeAPIManualClient` +- **HordeAPI**: Indicates a class that is specifically designed for a Horde-style API. This *does not* imply that the class is specific to the AI Horde API, but rather that it is designed to work with any API that follows the Horde-style conventions. + - Examples: `GenericHordeAPIManualClient`, `GenericAsyncHordeAPIManualClient` +- **AIHorde**: Refers to classes that are specifically designed for the AI Horde API. These classes are tailored to the specific requirements and features of the AI Horde API. + - Examples: `AIHordeAPIManualClient`, `AIHordeAPIAsyncManualClient` +- **Session** (Always a suffix): Indicates a class that manages a session with the API, handling authentication, connection management, and request/response handling. These classes should provide context management support (i.e., they should implement the `__enter__` and `__exit__` or `__aenter__` and `__aexit__` methods) to ensure proper cleanup of resources. + - Examples: `GenericHordeAPISession`, `GenericAsyncHordeAPISession` + +#### Generation/Inference Specific + +- **FeatureFlags**: Refers to classes which describe what features are required by a generation *or* what features are supported by a worker/backend. + - Example: `ImageGenerationFeatureFlags` +- **GenerationParameters**/**Parameters**: Refers to classes which describe the parameters required for a generation request. + - Example: `ImageGenerationParameters` +- **ParametersTemplate**: Refers to classes which describe the parameters required for a generation request, but have the property that **all fields are optional**. These classes are intended to be used as templates during the construction of certain other feature, such as user styles and chaining. + - Example: `ImageGenerationParametersTemplate` +- **Basic**: Classes which have the parameters shared across all (or virtually all) generation requests of that kind. Images, for example, universally have a width and height, so `BasicImageGenerationParameters` would be a class that contains those fields. + - Example: `BasicImageGenerationParameters` +- **KNOWN_**: `Enums` or `StrEnums` which describe a set of known values for a particular field or parameter. These are intended to be used as a way to validate input and provide a clear set of options for consumers. However, by convention, these are not *required* to be used, and consumers are free to use any valid value for the field or parameter as long as its type is correct. + - Example: `KNOWN_IMAGE_SAMPLERS`, `KNOWN_AUX_MODEL_SOURCE` + +## Error Handling + +- Never silently handle exceptions. Always log and/or re-raise them. +- Exceptions should be used for exceptional and unhandled cases, not for control flow. + - Exceptions should be raised for errors that are not expected to occur during normal operation of the program +- Bare `except:` statements are considered evil. + - This is because they catch all exceptions, including system-exiting exceptions like `KeyboardInterrupt` and `SystemExit`, which can lead to unexpected behavior and make debugging difficult. +- Do not blanket catch exceptions (e.g., `except Exception as e:`) unless you have a very good reason to do so. + - If you have a blanket catch, you should instead consider making the excepts opt-in (where the default is to `raise e`). + - This does not apply where you are cleaning up resources or performing finalization tasks (e.g., closing files, releasing locks, etc.) where you may need to catch multiple exceptions (such as in a `__exit__` method). + - If you must catch an exception, catch only the specific exception(s) that you expect to occur and can handle appropriately. + +## Documentation + +- All public modules, classes, methods, variables and fields must be documented with docstrings. + - Docstrings should be written in [google style](https://google.github.io/styleguide/pyguide.html#38-comments-and-docstrings). + - The first line of the docstring should be in the imperative mood and should be a brief summary of the method's purpose. + - Avoid merely restating the method name unless the method is narrow or trivial in scope. + - Methods marked with `@override` do not need a docstring because they inherit the docstring from the parent class. However, if the method's behavior is significantly different from the parent class, it should be documented with a docstring. + - However, if this is the case, it should be considered a code smell and the method or parent method should be refactored to avoid the need for the overriding behavior to be so different. + - Methods which perform validation of inputs (and therefore raise value or type errors) or have predictable failure modes should document these exceptions with a `Raises` section in the docstring. + - For example, methods which are known to rely on external resources (e.g., network calls, file I/O) should document the exceptions that may be raised in the event of a failure. + - It is **not** expected that every conceivable exception raised from a particular call hierarchy is documented, but rather those that are most likely to occur and can be reasonably anticipated. + - A method that opens a file should document `FileNotFoundError` and `IOError` in its `Raises` section, while network-related methods should document `ConnectionError`, `TimeoutError`, and other relevant exceptions. + +### API Model Specific Documentation + +Many docstrings in the SDK have additional requirements when they are related to API models or requests/responses. While these rules would be difficult to remember, they are luckily enforced by CI and the `horde_sdk.meta` module has helper functions to assist in generating the required docstrings. The correct docstrings will also be emitted by the `object_verify` tests. Be sure to run the tests with `-s` to see the output. + +- Children classes of `HordeAPIObject`, `HordeAPIData` which have a named model described in the API docs must have a docstring whose final line looks like this (for a model with the name `HordePerformance` on the v2 API): + + ```python + """... + + v2 API Model: `HordePerformance` + """ + ``` + + Where `HordePerformance` is the name of the model as described in the API docs. It must be followed by a carriage return. + +- Children classes of `HordeResponse` must have additional specific information, for example: + + When the response is returned from **one** endpoint: + + ```python + class HordePerformanceResponse(HordeResponseBaseModel): + """Information about the performance of the horde, such as worker counts and queue sizes. + + Represents the data returned from the /v2/status/performance endpoint with http status code 200. + + v2 API Model: `HordePerformance` + """ + ``` + + Where `/v2/status/performance` is the endpoint that returns the performance information. This must match the corresponding request class's `get_api_endpoint_subpath()` and `get_success_status_response_pairs()` values. + + When the response can be returned from **multiple** endpoints: + + ```python + class ResponseModelCollection(HordeResponseBaseModel): + """A collection of styles. + + Represents the data returned from the following endpoints and http status codes: + - /v2/collection_by_name/{collection_name} | CollectionByNameRequest [GET] -> 200 + - /v2/collections/{collection_id} | CollectionByIDRequest [GET] -> 200 + + v2 API Model: `ResponseModelCollection` + """ + ``` + + Where `/v2/collection_by_name/{collection_name}` and `/v2/collections/{collection_id}` are the endpoints that return the collection information. This must match the corresponding all of the requesting class's `get_api_endpoint_subpath()` and `get_success_status_response_pairs()` values. + +- Children classes of `HordeRequest` must have additional specific information, for example: + + ```python + class HordePerformanceRequest(BaseAIHordeRequest): + """Request performance information about the horde, such as worker counts and queue sizes. + + Represents a GET request to the /v2/status/performance endpoint. + """ + ``` + +- If the API does not name the model, `get_api_model_name(...)` must be overloaded to return `horde_sdk.consts._ANONYMOUS_MODEL` and the docstring should be: + + ```python + """... + + v2 API Model: `_ANONYMOUS_MODEL` + """ + ``` + +- If the model has is overloaded (has two or more conflicting representations), `get_api_model_name(...)` must be overloaded to return `horde_sdk.consts._OVERLOADED_MODEL` and the docstring should be: + + ```python + """... + + v2 API Model: `_OVERLOADED_MODEL` + """ + ``` + +## Function and Method Signatures + +- Class and method signatures should prefer keyword-only arguments especially when there are multiple arguments of the same type and/or when they are adjacent to each other. + - This improves readability and reduces the chance of passing arguments in the wrong order. Additionally, it improves the ability to add new arguments in the future without breaking existing code. + - For example: + + ```python + # Avoid this + def create_user(name: str, age: int, username: str, email: str): + ... + + create_user("John Doe", 30, "johndoe", "john.doe@example.com") + + + # Do this instead + def create_user(*, name: str, age: int, username: str, email: str): + ... + + create_user(name="John Doe", age=30, username="johndoe", email="john.doe@example.com") + ``` + +## Object-Oriented Design + +- Many Object Oriented Principals (OOPs) are used, especially with class design, but python's dynamic nature should embraced when doing so would be pythonic (widely understood idiomatic python code). + - Use inheritance when an object "is a" relationship exists. + - Try to use composition over inheritance when an object "has a" relationship exists. + - Use `@override` when overriding methods in a subclass. + +## Method Overloading and Return Types + +- Avoid surprisingly overloaded methods - methods with ambiguous or unexpected overloading. + - For example, if a method is overloaded to accept either a single object or a list of objects, it should be clear from the method name and documentation that this is the case. + - It would be best, however, that two separate methods are created instead. +- Methods should return a predictable type. + - This is technically enforced by type hints, but it is still important to consider the implications of multiple types, especially from the perspective of a consumer. + - Avoid returning `None` as a catch-all indication of failure. Either raise an exception or return a more specific value indicating the failure. + - This is especially true if you are returning `None` for multiple *kinds* of failure, "not found" errors, or timeouts. + - Of course, `None`/`null` has a widely understood meaning ('missing', 'unset') in most programming languages, and can be used whenever source data represents it this way or to indicate `None`'s usual meaning, just avoid overloading its meaning. + - If possible, accept abstract types (e.g., `Iterable`, `Mapping`) instead of concrete types (e.g., `list`, `dict`) to allow for more flexibility in the return type. + + ```python + # Avoid this + def mutate_items(items: list[Item]) -> list[Item]: + ... + # Do this instead + def mutate_items(items: Iterable[Item]) -> Iterable[Item]: + ... + ``` + + - However, if appropriate, make the function generic so the type hinting is preserved. + + ```python + from typing import TypeVar, Generic, Iterable + + T = TypeVar("T", bound=Iterable[Any]) + + def mutate_items(items: T) -> T: + ... + ``` + + - This allows the consumer to use any iterable type (e.g., `list`, `set`, `tuple`) and still have the type hints preserved. + - Type hint concrete types for return values to ensure that the consumer knows what to expect. + - Unless + - ... the method is specifically designed to return an abstract type + - ... is an abstract superclass **and** a more specific type is not required + - For example, if a method constructs and returns list, it should be type hinted as `list` and not `Iterable`. + + ```python + # Avoid this + def get_items(self) -> Iterable[Item]: + return [Item(), Item()] + + # Do this instead + def get_items(self) -> list[Item]: + return [Item(), Item()] + ``` + + - The use of `Any` should be extremely judicious and should not be used when a more specific type can be used. + - Careful consideration should be given to whether or not a consumer *might* care about the type of the return value. + - In the SDK, a good example of good `Any` usage is type hinting `HordeSingleGeneration[Any]`, where the generic parameter represents the resulting types from the generation (e.g., `str` for text, `bytes` for images, etc.). This patterns allows accurately typing `HordeSingleGeneration` when working with arbitrary generations in contexts that the resulting type is not important, for example, in high-level generic worker classes. + - With very few exceptions, `Any` should not be used as a return type hint *unless* it the object in question can have its type inferred by some other - obvious - means or in a more appropriate context. + - Generally, for methods which mutate or return a different type based on input, it should be clear from the method name and documentation that this is the case and which types can be expected based on different inputs. + - For example, the following method signature should be considered bad practice: + + ```python + # Bad + def get_items(self, as_list: bool = True) -> list[Item] | set[Item]: + ... + + # Good + def get_items(self) -> list[Item]: + ... + def get_unique_items(self) -> set[Item]: + ... + ``` + + - Methods which *can* return a list or a container should *always* return a list or container, even if it is a single item. + - For example, the following method signature should be considered bad practice: + + ```python + # Bad + def get_items(self) -> list[Item] | Item: + ... + + # Good + def get_items(self) -> list[Item]: + ... + ``` + + - Methods should avoid returning different container types *unless that is the purpose of the method*. + - For example, the following method signatures should be considered bad practice: + + ```python + # Bad + def get_items(self) -> list[Item] | set[Item]: + ... + + # Bad + def get_items(self, as_dict: bool = False) -> list[Item] | dict[Item]: + ... + + # Good + def get_items(self) -> list[Item]: + ... + def get_items_mapping(self) -> dict[Item]: + ... + ``` + + - Instead, consider using a single container type and providing a separate method to convert to another type if needed. + - However, this should not be considered a ban on returning different container types if the method is specifically designed to do so or if it is clear from the method name and documentation that this is the case. + +## Control Flow and Readability + +- Prefer guard clauses over deeply nested if statements. + - For example: + + ```python + # Avoid this + def process_item(item: Item): + if item is not None: + if item.is_valid(): + # process item + ... + # Do this instead + def process_item(item: Item): + if item is None or not item.is_valid(): + return + # process item + ... + + ``` + +- Prefer meaningfully named composite `bool` conditionals over complex multi-line `if` statements. + - For example: + + ```python + # Avoid this + def is_valid_item(item: Item) -> bool: + if (item.has_name() and item.has_value()) or + (item.has_description() and item.description_valid()): + if item.is_active(): + return True + return False + + # Do this instead + def is_valid_item(item: Item) -> bool: + has_name_and_value = item.has_name() and item.has_value() + has_description_and_valid = item.has_description() and item.description_valid() + + return (has_name_and_value or has_description_and_valid) and item.is_active() + ``` + +## Data Structures, Models, and Constants + +- Classes should be preferred over dictionaries or other anonymous data structures (e.g., raw 3 tuples). + - Classes should be used to represent complex data structures or objects, especially when they have behavior associated with them. + - `BaseModel` derived classes from [pydantic](https://docs.pydantic.dev/) should be used to represent data structures or objects when possible. + - This provides validation and conversion of data types, which is especially important when dealing with data from external sources such as APIs. + - However, when robust validation is not needed, or when performance is a (verifiable) concern, simple classes can be used instead. +- Use properties to provide read-only access to class attributes when appropriate. + - This implies you should avoid returning or exposing mutable members of a class, instead preferring to return a copy. + - For example, instead of: + + ```python + class Item: + def __init__(self, data: dict): + self.data = data + + def get_data(self) -> dict: + return self.data + ``` + + Prefer: + + ```python + class Item: + def __init__(self, data: dict): + self._data = data + + def get_data(self) -> dict: + return self._data.copy() + ``` + +- Magic strings or magic numbers should be considered evil. + - Magic strings or numbers are values that are hard-coded into the code and have no clear meaning. + - `StrEnum`s should be used to represent strings with a specific set of valid values and regular constants can be used for isolated (unconnected) values. + - `Enum`s should be used to represent numbers with a specific set of valid values and regular constants can be used for isolated (unconnected) values. + - If many constants relate to each other, they should be grouped into a class. + + ```python + # Avoid this + MAX_RETRIES = 5 + TIMEOUT = 30 + JITTER = 0.1 + ... + + # Good + class APIConfig: + MAX_RETRIES = 5 + TIMEOUT = 30 + JITTER = 0.1 + ... + ``` + +### "KNOWN" Constants + +- For consumer convenience, parameters which have a fixed set of known values should be defined as constants in an appropriate `consts.py` file. These constants should be named with the `KNOWN_` prefix and should be defined as `StrEnum`s or `Enum`s as appropriate. +- However, these values should **always be considered optional**. Consumers of the SDK should be able to use any valid value for the parameter as long as its type is correct. It would be ideal, but not required, that classes or functions which require these parameters validate them against the live API at runtime. + - ***Rationale***: This prevents the SDK from needing to be updated every time a new value is added to an API, and allows consumers to use any valid value without needing to wait for an SDK update. + +## Imports and Module Export + +- Star imports are considered evil. +- Significant namespaces must explicitly export all public members via `__all__` + - This includes classes, functions, and variables that are intended to be used by consumers of the module. + +## Pydantic BaseModel Usage + +- `BaseModel` derived classes should be used in a DataClass-like manner. + - They should not implement any methods or properties which require any side effects or have any state. + - ***Rationale***: [pydantic](https://docs.pydantic.dev)'s `BaseModel` provides robust validation and conversion of data types. Mutating the state of a model bypasses this validation and conversion. + - However, methods which perform validation or conversion of the data are allowed, but should be narrow in scope. + - **Exception**: Values should never be coerced to `None` for optional fields when passed a non-`None` value. + - ***Rationale***: If an invalid value is passed to a field, it should be raised as an error. This is especially important when implementing a "Template" class (which all fields are optional) - if a child class overrides a field to be non-optional but the parent class is coercing it to `None`, this can cause bugs or run-time errors. + - As a general rule, if a function in a `BaseModel` derived class extends beyond `isinstance` or value checking, it probably should be moved to a different class or utility function. + - `BaseModel` derived classes used as API responses or members of an API response should be frozen. Additionally, classes meant to represent data set by a client, server, or worker should also be frozen. + - **Important**: This is not a blanket requirement for all `BaseModel` derived classes. Consider carefully how likely it would that a consumer would want to modify the data in the model and if doing so would have unintended consequences difficult for consumers to understand. + - ***Rationale***: Classes of this kind being frozen reflects the fact that they are not meant to be modified after creation. Workers receiving jobs from the server (and specified by the client) should never need to modify that job data. Raising errors in the case a worker attempts to modify job data prevents accidental modification and prevents a category of bugs. + - This is done by setting the field `model_config` to an instance of a `ConfigDict`. + - See `get_default_frozen_model_config_dict()` in the top-level namespace (`horde_sdk.__init__.py`) for the default frozen model config. + - This function should be used whenever possible to set the `model_config` field. + - Using this function ensures that the model config is consistent across the codebase. + - Further, the CI/Testing relies on using this function to ensure that the models are frozen in tests. + - If you set your own `model_config`, you must ensure that it is consistent with the behavior of `get_default_frozen_model_config_dict()`. + +## API Model Verification + +These rules are specific to (remote) API models (for example, any class in an `apimodels` namespace). + +1. **All `HordeAPIObject` and `HordeAPIData` sub-classes must be imported** in their appropriate apimodels `__init__.py` files so that they match the live API surface. + - For example, all AI Horde API models must be imported into `horde_sdk.ai_horde_api/apimodels/__init__.py`. + - ***Rationale***: This ensures that the SDK's API surface matches the live API surface and that all models are properly documented and tested. The testing relies on these imports to function correctly. Further, this ensures that all models are appropriately exposed for consumers of the SDK. +2. **Any model in the API docs** must be defined in the SDK. Unreferenced or missing models will cause a test failure. +3. **All endpoints in the API docs** must be handled or marked as ignored (e.g., admin-only or deprecated). Unknown or unaddressed endpoints raise errors during testing. +4. **All models must have docstrings** conforming to the style documented above. If they do not, these tests will fail and suggest required changes. +5. **All models (including requests and responses) must be instantiable from example JSON** found in the test data directories. If instantiation fails, the test will provide details on what's wrong. +6. **All request/response types must be hashable** if they aren't explicitly marked as unhashable (`@Unhashable`), which ensures they can be properly used in collections. +7. **All request/response types must be equatable** if they aren't explicitly marked as unequatable (`@Unequatable`), which ensures they can be properly compared in tests. +8. **Example payloads, response data, and production responses** must be valid according to their corresponding model validation rules, ensuring that the models accurately represent real-world data. diff --git a/docs/definitions.md b/docs/definitions.md new file mode 100644 index 00000000..b3a986af --- /dev/null +++ b/docs/definitions.md @@ -0,0 +1,29 @@ +# horde_sdk specific definitions + +See also the [naming section of the style guide](concepts/style_guide.md#naming-conventions) for additional explanations of certain terms. + +## API + +### Clients + +#### ManualClient + +A manual client interacts with an API but does not provide any context management. Clients of this kind do not automatically cancel in-process jobs, for example. + +#### ClientSession + +Client session classes provide context management for API operations. They automatically cancel or follow-up on any in-process operations when exiting the context. Client sessions generally inherit from ManualClient. + +## Jobs/Generations/Inference + +### Generation + +A generation is a single instance of inference or in the case of alchemy, a single instance of post-processing. Generations can result in one or more logical outputs and may or may not be batched. For example, a text generation may result in a single string output, while an image generation may result in one or multiple images being returned. Generations may be a single step in a multi-step process. Not to be confused with a job. + +### Job + +A job is a collection of one or more generations that are executed together. Jobs can be thought of as a batch of generations that are processed in parallel or sequentially, depending on the API and client implementation. A job may consist of multiple generations that share common parameters or context. Jobs also contain metadata from the dispatch source, such as the job ID, generation IDs, whether the job is subject to NSFW checks, etc. Not to be confused with a generation. + +### "batch", "batch_size", "n_iter", "number_expected_results", etc + +These terms refer to the number of results that are expected to be returned from a generation during a *single run of inference*. In the case of image generation, the user may request multiple images which share all parameters and are generated at the same time. If the user requests 4 images using 25 steps, only 25 steps of inference run. Contrast this with a job that may consist of multiple generations, each with their own parameters and context, which run in parallel or sequentially each with their own inference steps. diff --git a/docs/developers.md b/docs/developers.md new file mode 100644 index 00000000..cefde983 --- /dev/null +++ b/docs/developers.md @@ -0,0 +1,21 @@ +# Developer's Guide + +This guide is targeted to those who wish to contribute to the development of the Horde SDK. The following is a list of documents and resources you should review before contributing: + +## General SDK Concepts + +- [Contributing Guide](https://github.com/Haidra-Org/horde-sdk/blob/main/CONTRIBUTING.md) + - Explains the basic expectations for contributing to the Horde SDK as well as basic environment setup, code quality tools, and testing. +- [Style Guide](concepts/style_guide.md) + - Provides guidelines for code style and system design expectations. +- [Package Structure](concepts/package_structure.md) + - Explains the file and package structure of the Horde SDK and touches on the relationships between them. +- [Definitions](definitions.md) + - Provides definitions for key terms used throughout the Horde ecosystem, including users, requests, jobs, and workers. + +## AI Horde Concepts + +- [AI Horde Request/Job Lifecycle](haidra-assets/docs/workers.md) + - Describes the lifecycle of a request and job within the AI Horde ecosystem, including how kudos are awarded. +- [Kudos System](haidra-assets/docs/kudos.md) + - Explains the kudos system, its purpose, and how it facilitates fair resource exchange within the AI Horde. diff --git a/docs/examples.md b/docs/examples.md index c1bec64e..fa5a811d 100644 --- a/docs/examples.md +++ b/docs/examples.md @@ -1,3 +1,3 @@ -# Example Clients +# Example Code -See `examples/` (https://github.com/Haidra-Org/horde-sdk/tree/main/examples) for a complete list. These examples are all made in mind with your current working directory as `horde_sdk` (e.g., `cd horde_sdk`). +See `examples/` () for a complete list. These examples are all made in mind with your current working directory as `horde_sdk` (e.g., `cd horde_sdk`). diff --git a/docs/getting_started.md b/docs/getting_started.md index b999ac45..a5bdf614 100644 --- a/docs/getting_started.md +++ b/docs/getting_started.md @@ -23,7 +23,7 @@ pip install horde_sdk 2. Find the `*Request` object type appropriate to what you want to do. (see also: [naming](getting_started.md#naming)) - These objects types are always found in the `apimodels` namespace of the `*_api` sub package. - - e.g., [ImageGenerateAsyncRequest][horde_sdk.ai_horde_api.apimodels.generate._async.ImageGenerateAsyncRequest] + - e.g., [ImageGenerateAsyncRequest][horde_sdk.ai_horde_api.apimodels.generate.async_.ImageGenerateAsyncRequest] - **Note** that there is always one or more response types mapped to a request. You can get the default success response `type` like so: @@ -36,7 +36,7 @@ pip install horde_sdk >>> image_gen_request.get_success_response_type() ``` - Accordingly, the [ImageGenerateAsyncResponse][horde_sdk.ai_horde_api.apimodels.generate._async.ImageGenerateAsyncResponse] type is expected to be the return type from the API. + Accordingly, the [ImageGenerateAsyncResponse][horde_sdk.ai_horde_api.apimodels.generate.async_.ImageGenerateAsyncResponse] type is expected to be the return type from the API.
@@ -54,7 +54,7 @@ image_generate_async_request = ImageGenerateAsyncRequest( params=ImageGenerationInputPayload( width=512, height=768, - sampler_name=KNOWN_SAMPLERS.k_euler_a, + sampler_name=KNOWN_IMAGE_SAMPLERS.k_euler_a, clip_skip=1, n=2, ), @@ -66,7 +66,7 @@ image_generate_async_request = ImageGenerateAsyncRequest( Simple Client: ``` python simple_client = AIHordeAPISimpleClient() - status_response, job_id = simple_client.image_generate_request( + status_response, gen_id = simple_client.image_generate_request( ImageGenerateAsyncRequest( apikey=ANON_API_KEY, prompt="A cat in a hat", @@ -100,7 +100,7 @@ image_generate_async_request = ImageGenerateAsyncRequest( ### API Expectations #### Important note about manual clients -A few endpoints, such as `/v2/generate/async` ([ImageGenerateAsyncRequest][horde_sdk.ai_horde_api.apimodels.generate._async.ImageGenerateAsyncRequest]), will have their operations live on the API server until they are retrieved or cancelled (in this case, with either a [ImageGenerateStatusRequest][horde_sdk.ai_horde_api.apimodels.generate._status.ImageGenerateStatusRequest] or [DeleteImageGenerateRequest][horde_sdk.ai_horde_api.apimodels.generate._status.DeleteImageGenerateRequest]). If you use a manual client, you are assuming responsibility for making a best-effort for cleaning up errant requests, especially if your implementation crashes. If you use a simple client, you do not have to worry about this, as [context handlers][horde_sdk.generic_api.generic_clients.GenericHordeAPISession] take care of this. +A few endpoints, such as `/v2/generate/async` ([ImageGenerateAsyncRequest][horde_sdk.ai_horde_api.apimodels.generate.async_.ImageGenerateAsyncRequest]), will have their operations live on the API server until they are retrieved or cancelled (in this case, with either a [ImageGenerateStatusRequest][horde_sdk.ai_horde_api.apimodels.generate.status.ImageGenerateStatusRequest] or [DeleteImageGenerateRequest][horde_sdk.ai_horde_api.apimodels.generate.status.DeleteImageGenerateRequest]). If you use a manual client, you are assuming responsibility for making a best-effort for cleaning up errant requests, especially if your implementation crashes. If you use a simple client, you do not have to worry about this, as [context handlers][horde_sdk.generic_api.generic_clients.GenericHordeAPISession] take care of this. ### Typing diff --git a/docs/haidra-assets b/docs/haidra-assets new file mode 160000 index 00000000..31160295 --- /dev/null +++ b/docs/haidra-assets @@ -0,0 +1 @@ +Subproject commit 31160295b67422dcb33573846a8af509d90039e1 diff --git a/docs/horde_sdk/_telemetry/.pages b/docs/horde_sdk/_telemetry/.pages new file mode 100644 index 00000000..8610a190 --- /dev/null +++ b/docs/horde_sdk/_telemetry/.pages @@ -0,0 +1 @@ +title: _telemetry diff --git a/docs/horde_sdk/_telemetry/metrics.md b/docs/horde_sdk/_telemetry/metrics.md new file mode 100644 index 00000000..a800886b --- /dev/null +++ b/docs/horde_sdk/_telemetry/metrics.md @@ -0,0 +1,2 @@ +# metrics +::: horde_sdk._telemetry.metrics diff --git a/docs/horde_sdk/ai_horde_api/apimodels/_documents.md b/docs/horde_sdk/ai_horde_api/apimodels/_documents.md deleted file mode 100644 index 5ccea2ff..00000000 --- a/docs/horde_sdk/ai_horde_api/apimodels/_documents.md +++ /dev/null @@ -1,2 +0,0 @@ -# _documents -::: horde_sdk.ai_horde_api.apimodels._documents diff --git a/docs/horde_sdk/ai_horde_api/apimodels/_find_user.md b/docs/horde_sdk/ai_horde_api/apimodels/_find_user.md deleted file mode 100644 index 0075fdc0..00000000 --- a/docs/horde_sdk/ai_horde_api/apimodels/_find_user.md +++ /dev/null @@ -1,2 +0,0 @@ -# _find_user -::: horde_sdk.ai_horde_api.apimodels._find_user diff --git a/docs/horde_sdk/ai_horde_api/apimodels/_kudos.md b/docs/horde_sdk/ai_horde_api/apimodels/_kudos.md deleted file mode 100644 index 57ee0e86..00000000 --- a/docs/horde_sdk/ai_horde_api/apimodels/_kudos.md +++ /dev/null @@ -1,2 +0,0 @@ -# _kudos -::: horde_sdk.ai_horde_api.apimodels._kudos diff --git a/docs/horde_sdk/ai_horde_api/apimodels/_stats.md b/docs/horde_sdk/ai_horde_api/apimodels/_stats.md deleted file mode 100644 index af66e459..00000000 --- a/docs/horde_sdk/ai_horde_api/apimodels/_stats.md +++ /dev/null @@ -1,2 +0,0 @@ -# _stats -::: horde_sdk.ai_horde_api.apimodels._stats diff --git a/docs/horde_sdk/ai_horde_api/apimodels/_status.md b/docs/horde_sdk/ai_horde_api/apimodels/_status.md deleted file mode 100644 index a4f711bc..00000000 --- a/docs/horde_sdk/ai_horde_api/apimodels/_status.md +++ /dev/null @@ -1,2 +0,0 @@ -# _status -::: horde_sdk.ai_horde_api.apimodels._status diff --git a/docs/horde_sdk/ai_horde_api/apimodels/_styles.md b/docs/horde_sdk/ai_horde_api/apimodels/_styles.md deleted file mode 100644 index f879c89c..00000000 --- a/docs/horde_sdk/ai_horde_api/apimodels/_styles.md +++ /dev/null @@ -1,2 +0,0 @@ -# _styles -::: horde_sdk.ai_horde_api.apimodels._styles diff --git a/docs/horde_sdk/ai_horde_api/apimodels/_users.md b/docs/horde_sdk/ai_horde_api/apimodels/_users.md deleted file mode 100644 index 8d4f8e42..00000000 --- a/docs/horde_sdk/ai_horde_api/apimodels/_users.md +++ /dev/null @@ -1,2 +0,0 @@ -# _users -::: horde_sdk.ai_horde_api.apimodels._users diff --git a/docs/horde_sdk/ai_horde_api/apimodels/alchemy/_async.md b/docs/horde_sdk/ai_horde_api/apimodels/alchemy/_async.md deleted file mode 100644 index 4bc35317..00000000 --- a/docs/horde_sdk/ai_horde_api/apimodels/alchemy/_async.md +++ /dev/null @@ -1,2 +0,0 @@ -# _async -::: horde_sdk.ai_horde_api.apimodels.alchemy._async diff --git a/docs/horde_sdk/ai_horde_api/apimodels/alchemy/_pop.md b/docs/horde_sdk/ai_horde_api/apimodels/alchemy/_pop.md deleted file mode 100644 index 22596b34..00000000 --- a/docs/horde_sdk/ai_horde_api/apimodels/alchemy/_pop.md +++ /dev/null @@ -1,2 +0,0 @@ -# _pop -::: horde_sdk.ai_horde_api.apimodels.alchemy._pop diff --git a/docs/horde_sdk/ai_horde_api/apimodels/alchemy/_status.md b/docs/horde_sdk/ai_horde_api/apimodels/alchemy/_status.md deleted file mode 100644 index a3d10284..00000000 --- a/docs/horde_sdk/ai_horde_api/apimodels/alchemy/_status.md +++ /dev/null @@ -1,2 +0,0 @@ -# _status -::: horde_sdk.ai_horde_api.apimodels.alchemy._status diff --git a/docs/horde_sdk/ai_horde_api/apimodels/alchemy/_submit.md b/docs/horde_sdk/ai_horde_api/apimodels/alchemy/_submit.md deleted file mode 100644 index b7e3175c..00000000 --- a/docs/horde_sdk/ai_horde_api/apimodels/alchemy/_submit.md +++ /dev/null @@ -1,2 +0,0 @@ -# _submit -::: horde_sdk.ai_horde_api.apimodels.alchemy._submit diff --git a/docs/horde_sdk/ai_horde_api/apimodels/alchemy/async_.md b/docs/horde_sdk/ai_horde_api/apimodels/alchemy/async_.md new file mode 100644 index 00000000..6481bb99 --- /dev/null +++ b/docs/horde_sdk/ai_horde_api/apimodels/alchemy/async_.md @@ -0,0 +1,2 @@ +# async_ +::: horde_sdk.ai_horde_api.apimodels.alchemy.async_ diff --git a/docs/horde_sdk/ai_horde_api/apimodels/alchemy/pop.md b/docs/horde_sdk/ai_horde_api/apimodels/alchemy/pop.md new file mode 100644 index 00000000..783d846c --- /dev/null +++ b/docs/horde_sdk/ai_horde_api/apimodels/alchemy/pop.md @@ -0,0 +1,2 @@ +# pop +::: horde_sdk.ai_horde_api.apimodels.alchemy.pop diff --git a/docs/horde_sdk/ai_horde_api/apimodels/alchemy/status.md b/docs/horde_sdk/ai_horde_api/apimodels/alchemy/status.md new file mode 100644 index 00000000..7959c5ed --- /dev/null +++ b/docs/horde_sdk/ai_horde_api/apimodels/alchemy/status.md @@ -0,0 +1,2 @@ +# status +::: horde_sdk.ai_horde_api.apimodels.alchemy.status diff --git a/docs/horde_sdk/ai_horde_api/apimodels/alchemy/submit.md b/docs/horde_sdk/ai_horde_api/apimodels/alchemy/submit.md new file mode 100644 index 00000000..47ddf137 --- /dev/null +++ b/docs/horde_sdk/ai_horde_api/apimodels/alchemy/submit.md @@ -0,0 +1,2 @@ +# submit +::: horde_sdk.ai_horde_api.apimodels.alchemy.submit diff --git a/docs/horde_sdk/ai_horde_api/apimodels/collections.md b/docs/horde_sdk/ai_horde_api/apimodels/collections.md new file mode 100644 index 00000000..942c31f8 --- /dev/null +++ b/docs/horde_sdk/ai_horde_api/apimodels/collections.md @@ -0,0 +1,2 @@ +# collections +::: horde_sdk.ai_horde_api.apimodels.collections diff --git a/docs/horde_sdk/ai_horde_api/apimodels/documents.md b/docs/horde_sdk/ai_horde_api/apimodels/documents.md new file mode 100644 index 00000000..a45fe3f2 --- /dev/null +++ b/docs/horde_sdk/ai_horde_api/apimodels/documents.md @@ -0,0 +1,2 @@ +# documents +::: horde_sdk.ai_horde_api.apimodels.documents diff --git a/docs/horde_sdk/ai_horde_api/apimodels/filters.md b/docs/horde_sdk/ai_horde_api/apimodels/filters.md new file mode 100644 index 00000000..bdd21e4a --- /dev/null +++ b/docs/horde_sdk/ai_horde_api/apimodels/filters.md @@ -0,0 +1,2 @@ +# filters +::: horde_sdk.ai_horde_api.apimodels.filters diff --git a/docs/horde_sdk/ai_horde_api/apimodels/find_user.md b/docs/horde_sdk/ai_horde_api/apimodels/find_user.md new file mode 100644 index 00000000..8bf88f32 --- /dev/null +++ b/docs/horde_sdk/ai_horde_api/apimodels/find_user.md @@ -0,0 +1,2 @@ +# find_user +::: horde_sdk.ai_horde_api.apimodels.find_user diff --git a/docs/horde_sdk/ai_horde_api/apimodels/generate/_async.md b/docs/horde_sdk/ai_horde_api/apimodels/generate/_async.md deleted file mode 100644 index 23cf93f1..00000000 --- a/docs/horde_sdk/ai_horde_api/apimodels/generate/_async.md +++ /dev/null @@ -1,2 +0,0 @@ -# _async -::: horde_sdk.ai_horde_api.apimodels.generate._async diff --git a/docs/horde_sdk/ai_horde_api/apimodels/generate/_check.md b/docs/horde_sdk/ai_horde_api/apimodels/generate/_check.md deleted file mode 100644 index 705d0cd1..00000000 --- a/docs/horde_sdk/ai_horde_api/apimodels/generate/_check.md +++ /dev/null @@ -1,2 +0,0 @@ -# _check -::: horde_sdk.ai_horde_api.apimodels.generate._check diff --git a/docs/horde_sdk/ai_horde_api/apimodels/generate/_pop.md b/docs/horde_sdk/ai_horde_api/apimodels/generate/_pop.md deleted file mode 100644 index dd462b54..00000000 --- a/docs/horde_sdk/ai_horde_api/apimodels/generate/_pop.md +++ /dev/null @@ -1,2 +0,0 @@ -# _pop -::: horde_sdk.ai_horde_api.apimodels.generate._pop diff --git a/docs/horde_sdk/ai_horde_api/apimodels/generate/_progress.md b/docs/horde_sdk/ai_horde_api/apimodels/generate/_progress.md deleted file mode 100644 index 226956ef..00000000 --- a/docs/horde_sdk/ai_horde_api/apimodels/generate/_progress.md +++ /dev/null @@ -1,2 +0,0 @@ -# _progress -::: horde_sdk.ai_horde_api.apimodels.generate._progress diff --git a/docs/horde_sdk/ai_horde_api/apimodels/generate/_status.md b/docs/horde_sdk/ai_horde_api/apimodels/generate/_status.md deleted file mode 100644 index 71b819ca..00000000 --- a/docs/horde_sdk/ai_horde_api/apimodels/generate/_status.md +++ /dev/null @@ -1,2 +0,0 @@ -# _status -::: horde_sdk.ai_horde_api.apimodels.generate._status diff --git a/docs/horde_sdk/ai_horde_api/apimodels/generate/_submit.md b/docs/horde_sdk/ai_horde_api/apimodels/generate/_submit.md deleted file mode 100644 index 6226cd33..00000000 --- a/docs/horde_sdk/ai_horde_api/apimodels/generate/_submit.md +++ /dev/null @@ -1,2 +0,0 @@ -# _submit -::: horde_sdk.ai_horde_api.apimodels.generate._submit diff --git a/docs/horde_sdk/ai_horde_api/apimodels/generate/async_.md b/docs/horde_sdk/ai_horde_api/apimodels/generate/async_.md new file mode 100644 index 00000000..9986636e --- /dev/null +++ b/docs/horde_sdk/ai_horde_api/apimodels/generate/async_.md @@ -0,0 +1,2 @@ +# async_ +::: horde_sdk.ai_horde_api.apimodels.generate.async_ diff --git a/docs/horde_sdk/ai_horde_api/apimodels/generate/check.md b/docs/horde_sdk/ai_horde_api/apimodels/generate/check.md new file mode 100644 index 00000000..423410ca --- /dev/null +++ b/docs/horde_sdk/ai_horde_api/apimodels/generate/check.md @@ -0,0 +1,2 @@ +# check +::: horde_sdk.ai_horde_api.apimodels.generate.check diff --git a/docs/horde_sdk/ai_horde_api/apimodels/generate/pop.md b/docs/horde_sdk/ai_horde_api/apimodels/generate/pop.md new file mode 100644 index 00000000..d6b2a23c --- /dev/null +++ b/docs/horde_sdk/ai_horde_api/apimodels/generate/pop.md @@ -0,0 +1,2 @@ +# pop +::: horde_sdk.ai_horde_api.apimodels.generate.pop diff --git a/docs/horde_sdk/ai_horde_api/apimodels/generate/progress.md b/docs/horde_sdk/ai_horde_api/apimodels/generate/progress.md new file mode 100644 index 00000000..a4fcb855 --- /dev/null +++ b/docs/horde_sdk/ai_horde_api/apimodels/generate/progress.md @@ -0,0 +1,2 @@ +# progress +::: horde_sdk.ai_horde_api.apimodels.generate.progress diff --git a/docs/horde_sdk/ai_horde_api/apimodels/generate/rate.md b/docs/horde_sdk/ai_horde_api/apimodels/generate/rate.md new file mode 100644 index 00000000..13d8261d --- /dev/null +++ b/docs/horde_sdk/ai_horde_api/apimodels/generate/rate.md @@ -0,0 +1,2 @@ +# rate +::: horde_sdk.ai_horde_api.apimodels.generate.rate diff --git a/docs/horde_sdk/ai_horde_api/apimodels/generate/status.md b/docs/horde_sdk/ai_horde_api/apimodels/generate/status.md new file mode 100644 index 00000000..639e230a --- /dev/null +++ b/docs/horde_sdk/ai_horde_api/apimodels/generate/status.md @@ -0,0 +1,2 @@ +# status +::: horde_sdk.ai_horde_api.apimodels.generate.status diff --git a/docs/horde_sdk/ai_horde_api/apimodels/generate/submit.md b/docs/horde_sdk/ai_horde_api/apimodels/generate/submit.md new file mode 100644 index 00000000..39d36380 --- /dev/null +++ b/docs/horde_sdk/ai_horde_api/apimodels/generate/submit.md @@ -0,0 +1,2 @@ +# submit +::: horde_sdk.ai_horde_api.apimodels.generate.submit diff --git a/docs/horde_sdk/ai_horde_api/apimodels/generate/text/_async.md b/docs/horde_sdk/ai_horde_api/apimodels/generate/text/_async.md deleted file mode 100644 index 3c48ebf6..00000000 --- a/docs/horde_sdk/ai_horde_api/apimodels/generate/text/_async.md +++ /dev/null @@ -1,2 +0,0 @@ -# _async -::: horde_sdk.ai_horde_api.apimodels.generate.text._async diff --git a/docs/horde_sdk/ai_horde_api/apimodels/generate/text/_pop.md b/docs/horde_sdk/ai_horde_api/apimodels/generate/text/_pop.md deleted file mode 100644 index 4ec6d455..00000000 --- a/docs/horde_sdk/ai_horde_api/apimodels/generate/text/_pop.md +++ /dev/null @@ -1,2 +0,0 @@ -# _pop -::: horde_sdk.ai_horde_api.apimodels.generate.text._pop diff --git a/docs/horde_sdk/ai_horde_api/apimodels/generate/text/_status.md b/docs/horde_sdk/ai_horde_api/apimodels/generate/text/_status.md deleted file mode 100644 index c307f715..00000000 --- a/docs/horde_sdk/ai_horde_api/apimodels/generate/text/_status.md +++ /dev/null @@ -1,2 +0,0 @@ -# _status -::: horde_sdk.ai_horde_api.apimodels.generate.text._status diff --git a/docs/horde_sdk/ai_horde_api/apimodels/generate/text/_submit.md b/docs/horde_sdk/ai_horde_api/apimodels/generate/text/_submit.md deleted file mode 100644 index d2803ef4..00000000 --- a/docs/horde_sdk/ai_horde_api/apimodels/generate/text/_submit.md +++ /dev/null @@ -1,2 +0,0 @@ -# _submit -::: horde_sdk.ai_horde_api.apimodels.generate.text._submit diff --git a/docs/horde_sdk/ai_horde_api/apimodels/generate/text/async_.md b/docs/horde_sdk/ai_horde_api/apimodels/generate/text/async_.md new file mode 100644 index 00000000..b387ffbc --- /dev/null +++ b/docs/horde_sdk/ai_horde_api/apimodels/generate/text/async_.md @@ -0,0 +1,2 @@ +# async_ +::: horde_sdk.ai_horde_api.apimodels.generate.text.async_ diff --git a/docs/horde_sdk/ai_horde_api/apimodels/generate/text/pop.md b/docs/horde_sdk/ai_horde_api/apimodels/generate/text/pop.md new file mode 100644 index 00000000..d9041f12 --- /dev/null +++ b/docs/horde_sdk/ai_horde_api/apimodels/generate/text/pop.md @@ -0,0 +1,2 @@ +# pop +::: horde_sdk.ai_horde_api.apimodels.generate.text.pop diff --git a/docs/horde_sdk/ai_horde_api/apimodels/generate/text/status.md b/docs/horde_sdk/ai_horde_api/apimodels/generate/text/status.md new file mode 100644 index 00000000..49d885ef --- /dev/null +++ b/docs/horde_sdk/ai_horde_api/apimodels/generate/text/status.md @@ -0,0 +1,2 @@ +# status +::: horde_sdk.ai_horde_api.apimodels.generate.text.status diff --git a/docs/horde_sdk/ai_horde_api/apimodels/generate/text/submit.md b/docs/horde_sdk/ai_horde_api/apimodels/generate/text/submit.md new file mode 100644 index 00000000..8235d3e6 --- /dev/null +++ b/docs/horde_sdk/ai_horde_api/apimodels/generate/text/submit.md @@ -0,0 +1,2 @@ +# submit +::: horde_sdk.ai_horde_api.apimodels.generate.text.submit diff --git a/docs/horde_sdk/ai_horde_api/apimodels/kudos.md b/docs/horde_sdk/ai_horde_api/apimodels/kudos.md new file mode 100644 index 00000000..f104fca8 --- /dev/null +++ b/docs/horde_sdk/ai_horde_api/apimodels/kudos.md @@ -0,0 +1,2 @@ +# kudos +::: horde_sdk.ai_horde_api.apimodels.kudos diff --git a/docs/horde_sdk/ai_horde_api/apimodels/operations.md b/docs/horde_sdk/ai_horde_api/apimodels/operations.md new file mode 100644 index 00000000..32abd3d2 --- /dev/null +++ b/docs/horde_sdk/ai_horde_api/apimodels/operations.md @@ -0,0 +1,2 @@ +# operations +::: horde_sdk.ai_horde_api.apimodels.operations diff --git a/docs/horde_sdk/ai_horde_api/apimodels/sharedkeys.md b/docs/horde_sdk/ai_horde_api/apimodels/sharedkeys.md new file mode 100644 index 00000000..ebc8fde9 --- /dev/null +++ b/docs/horde_sdk/ai_horde_api/apimodels/sharedkeys.md @@ -0,0 +1,2 @@ +# sharedkeys +::: horde_sdk.ai_horde_api.apimodels.sharedkeys diff --git a/docs/horde_sdk/ai_horde_api/apimodels/stats.md b/docs/horde_sdk/ai_horde_api/apimodels/stats.md new file mode 100644 index 00000000..3c204fce --- /dev/null +++ b/docs/horde_sdk/ai_horde_api/apimodels/stats.md @@ -0,0 +1,2 @@ +# stats +::: horde_sdk.ai_horde_api.apimodels.stats diff --git a/docs/horde_sdk/ai_horde_api/apimodels/status.md b/docs/horde_sdk/ai_horde_api/apimodels/status.md new file mode 100644 index 00000000..f714d086 --- /dev/null +++ b/docs/horde_sdk/ai_horde_api/apimodels/status.md @@ -0,0 +1,2 @@ +# status +::: horde_sdk.ai_horde_api.apimodels.status diff --git a/docs/horde_sdk/ai_horde_api/apimodels/styles.md b/docs/horde_sdk/ai_horde_api/apimodels/styles.md new file mode 100644 index 00000000..967a3193 --- /dev/null +++ b/docs/horde_sdk/ai_horde_api/apimodels/styles.md @@ -0,0 +1,2 @@ +# styles +::: horde_sdk.ai_horde_api.apimodels.styles diff --git a/docs/horde_sdk/ai_horde_api/apimodels/teams.md b/docs/horde_sdk/ai_horde_api/apimodels/teams.md new file mode 100644 index 00000000..5ae09461 --- /dev/null +++ b/docs/horde_sdk/ai_horde_api/apimodels/teams.md @@ -0,0 +1,2 @@ +# teams +::: horde_sdk.ai_horde_api.apimodels.teams diff --git a/docs/horde_sdk/ai_horde_api/apimodels/users.md b/docs/horde_sdk/ai_horde_api/apimodels/users.md new file mode 100644 index 00000000..ec9c1a24 --- /dev/null +++ b/docs/horde_sdk/ai_horde_api/apimodels/users.md @@ -0,0 +1,2 @@ +# users +::: horde_sdk.ai_horde_api.apimodels.users diff --git a/docs/horde_sdk/ai_horde_api/apimodels/workers/_workers.md b/docs/horde_sdk/ai_horde_api/apimodels/workers/_workers.md deleted file mode 100644 index dea5d962..00000000 --- a/docs/horde_sdk/ai_horde_api/apimodels/workers/_workers.md +++ /dev/null @@ -1,2 +0,0 @@ -# _workers -::: horde_sdk.ai_horde_api.apimodels.workers._workers diff --git a/docs/horde_sdk/ai_horde_api/apimodels/workers/messages.md b/docs/horde_sdk/ai_horde_api/apimodels/workers/messages.md new file mode 100644 index 00000000..e08abd05 --- /dev/null +++ b/docs/horde_sdk/ai_horde_api/apimodels/workers/messages.md @@ -0,0 +1,2 @@ +# messages +::: horde_sdk.ai_horde_api.apimodels.workers.messages diff --git a/docs/horde_sdk/ai_horde_api/apimodels/workers/workers.md b/docs/horde_sdk/ai_horde_api/apimodels/workers/workers.md new file mode 100644 index 00000000..e644b313 --- /dev/null +++ b/docs/horde_sdk/ai_horde_api/apimodels/workers/workers.md @@ -0,0 +1,2 @@ +# workers +::: horde_sdk.ai_horde_api.apimodels.workers.workers diff --git a/docs/horde_sdk/ai_horde_api/model_records.md b/docs/horde_sdk/ai_horde_api/model_records.md new file mode 100644 index 00000000..a725425a --- /dev/null +++ b/docs/horde_sdk/ai_horde_api/model_records.md @@ -0,0 +1,2 @@ +# model_records +::: horde_sdk.ai_horde_api.model_records diff --git a/docs/horde_sdk/ai_horde_worker/.pages b/docs/horde_sdk/ai_horde_worker/.pages deleted file mode 100644 index bd629ca3..00000000 --- a/docs/horde_sdk/ai_horde_worker/.pages +++ /dev/null @@ -1 +0,0 @@ -title: ai_horde_worker diff --git a/docs/horde_sdk/ai_horde_worker/bridge_data.md b/docs/horde_sdk/ai_horde_worker/bridge_data.md deleted file mode 100644 index b5234bd9..00000000 --- a/docs/horde_sdk/ai_horde_worker/bridge_data.md +++ /dev/null @@ -1,2 +0,0 @@ -# bridge_data -::: horde_sdk.ai_horde_worker.bridge_data diff --git a/docs/horde_sdk/ai_horde_worker/locale_info/bridge_data_fields.md b/docs/horde_sdk/ai_horde_worker/locale_info/bridge_data_fields.md deleted file mode 100644 index 77dae166..00000000 --- a/docs/horde_sdk/ai_horde_worker/locale_info/bridge_data_fields.md +++ /dev/null @@ -1,2 +0,0 @@ -# bridge_data_fields -::: horde_sdk.ai_horde_worker.locale_info.bridge_data_fields diff --git a/docs/horde_sdk/ai_horde_worker/model_meta.md b/docs/horde_sdk/ai_horde_worker/model_meta.md deleted file mode 100644 index 882c0392..00000000 --- a/docs/horde_sdk/ai_horde_worker/model_meta.md +++ /dev/null @@ -1,2 +0,0 @@ -# model_meta -::: horde_sdk.ai_horde_worker.model_meta diff --git a/docs/horde_sdk/ai_horde_worker/worker_client.md b/docs/horde_sdk/ai_horde_worker/worker_client.md deleted file mode 100644 index beed79b0..00000000 --- a/docs/horde_sdk/ai_horde_worker/worker_client.md +++ /dev/null @@ -1,2 +0,0 @@ -# worker_client -::: horde_sdk.ai_horde_worker.worker_client diff --git a/docs/horde_sdk/backend_parsing/.pages b/docs/horde_sdk/backend_parsing/.pages new file mode 100644 index 00000000..8396e3fb --- /dev/null +++ b/docs/horde_sdk/backend_parsing/.pages @@ -0,0 +1 @@ +title: backend_parsing diff --git a/docs/horde_sdk/backend_parsing/alchemy/.pages b/docs/horde_sdk/backend_parsing/alchemy/.pages new file mode 100644 index 00000000..ed76939b --- /dev/null +++ b/docs/horde_sdk/backend_parsing/alchemy/.pages @@ -0,0 +1 @@ +title: alchemy diff --git a/docs/horde_sdk/backend_parsing/consts.md b/docs/horde_sdk/backend_parsing/consts.md new file mode 100644 index 00000000..412373f1 --- /dev/null +++ b/docs/horde_sdk/backend_parsing/consts.md @@ -0,0 +1,2 @@ +# consts +::: horde_sdk.backend_parsing.consts diff --git a/docs/horde_sdk/backend_parsing/image/.pages b/docs/horde_sdk/backend_parsing/image/.pages new file mode 100644 index 00000000..1382a2c9 --- /dev/null +++ b/docs/horde_sdk/backend_parsing/image/.pages @@ -0,0 +1 @@ +title: image diff --git a/docs/horde_sdk/backend_parsing/image/comfyui/.pages b/docs/horde_sdk/backend_parsing/image/comfyui/.pages new file mode 100644 index 00000000..a01d5626 --- /dev/null +++ b/docs/horde_sdk/backend_parsing/image/comfyui/.pages @@ -0,0 +1 @@ +title: comfyui diff --git a/docs/horde_sdk/backend_parsing/image/comfyui/hordelib.md b/docs/horde_sdk/backend_parsing/image/comfyui/hordelib.md new file mode 100644 index 00000000..f85c6a5d --- /dev/null +++ b/docs/horde_sdk/backend_parsing/image/comfyui/hordelib.md @@ -0,0 +1,2 @@ +# hordelib +::: horde_sdk.backend_parsing.image.comfyui.hordelib diff --git a/docs/horde_sdk/backend_parsing/image/consts.md b/docs/horde_sdk/backend_parsing/image/consts.md new file mode 100644 index 00000000..01e1e3b2 --- /dev/null +++ b/docs/horde_sdk/backend_parsing/image/consts.md @@ -0,0 +1,2 @@ +# consts +::: horde_sdk.backend_parsing.image.consts diff --git a/docs/horde_sdk/backend_parsing/multimodal/.pages b/docs/horde_sdk/backend_parsing/multimodal/.pages new file mode 100644 index 00000000..37b24126 --- /dev/null +++ b/docs/horde_sdk/backend_parsing/multimodal/.pages @@ -0,0 +1 @@ +title: multimodal diff --git a/docs/horde_sdk/backend_parsing/object_models.md b/docs/horde_sdk/backend_parsing/object_models.md new file mode 100644 index 00000000..186ccc98 --- /dev/null +++ b/docs/horde_sdk/backend_parsing/object_models.md @@ -0,0 +1,2 @@ +# object_models +::: horde_sdk.backend_parsing.object_models diff --git a/docs/horde_sdk/backend_parsing/text/.pages b/docs/horde_sdk/backend_parsing/text/.pages new file mode 100644 index 00000000..05126b0c --- /dev/null +++ b/docs/horde_sdk/backend_parsing/text/.pages @@ -0,0 +1 @@ +title: text diff --git a/docs/horde_sdk/backend_parsing/video/.pages b/docs/horde_sdk/backend_parsing/video/.pages new file mode 100644 index 00000000..af760f63 --- /dev/null +++ b/docs/horde_sdk/backend_parsing/video/.pages @@ -0,0 +1 @@ +title: video diff --git a/docs/horde_sdk/deploy_config/.pages b/docs/horde_sdk/deploy_config/.pages new file mode 100644 index 00000000..383a0d80 --- /dev/null +++ b/docs/horde_sdk/deploy_config/.pages @@ -0,0 +1 @@ +title: deploy_config diff --git a/docs/horde_sdk/deploy_config/workers/.pages b/docs/horde_sdk/deploy_config/workers/.pages new file mode 100644 index 00000000..54554601 --- /dev/null +++ b/docs/horde_sdk/deploy_config/workers/.pages @@ -0,0 +1 @@ +title: workers diff --git a/docs/horde_sdk/generation_parameters/.pages b/docs/horde_sdk/generation_parameters/.pages new file mode 100644 index 00000000..104b7474 --- /dev/null +++ b/docs/horde_sdk/generation_parameters/.pages @@ -0,0 +1 @@ +title: generation_parameters diff --git a/docs/horde_sdk/generation_parameters/alchemy/.pages b/docs/horde_sdk/generation_parameters/alchemy/.pages new file mode 100644 index 00000000..ed76939b --- /dev/null +++ b/docs/horde_sdk/generation_parameters/alchemy/.pages @@ -0,0 +1 @@ +title: alchemy diff --git a/docs/horde_sdk/generation_parameters/alchemy/consts.md b/docs/horde_sdk/generation_parameters/alchemy/consts.md new file mode 100644 index 00000000..69be139e --- /dev/null +++ b/docs/horde_sdk/generation_parameters/alchemy/consts.md @@ -0,0 +1,2 @@ +# consts +::: horde_sdk.generation_parameters.alchemy.consts diff --git a/docs/horde_sdk/generation_parameters/alchemy/object_models.md b/docs/horde_sdk/generation_parameters/alchemy/object_models.md new file mode 100644 index 00000000..a7015e26 --- /dev/null +++ b/docs/horde_sdk/generation_parameters/alchemy/object_models.md @@ -0,0 +1,2 @@ +# object_models +::: horde_sdk.generation_parameters.alchemy.object_models diff --git a/docs/horde_sdk/generation_parameters/generic/.pages b/docs/horde_sdk/generation_parameters/generic/.pages new file mode 100644 index 00000000..ac9137fc --- /dev/null +++ b/docs/horde_sdk/generation_parameters/generic/.pages @@ -0,0 +1 @@ +title: generic diff --git a/docs/horde_sdk/generation_parameters/generic/consts.md b/docs/horde_sdk/generation_parameters/generic/consts.md new file mode 100644 index 00000000..f7d35a31 --- /dev/null +++ b/docs/horde_sdk/generation_parameters/generic/consts.md @@ -0,0 +1,2 @@ +# consts +::: horde_sdk.generation_parameters.generic.consts diff --git a/docs/horde_sdk/generation_parameters/generic/object_models.md b/docs/horde_sdk/generation_parameters/generic/object_models.md new file mode 100644 index 00000000..a030b1aa --- /dev/null +++ b/docs/horde_sdk/generation_parameters/generic/object_models.md @@ -0,0 +1,2 @@ +# object_models +::: horde_sdk.generation_parameters.generic.object_models diff --git a/docs/horde_sdk/generation_parameters/image/.pages b/docs/horde_sdk/generation_parameters/image/.pages new file mode 100644 index 00000000..1382a2c9 --- /dev/null +++ b/docs/horde_sdk/generation_parameters/image/.pages @@ -0,0 +1 @@ +title: image diff --git a/docs/horde_sdk/generation_parameters/image/consts.md b/docs/horde_sdk/generation_parameters/image/consts.md new file mode 100644 index 00000000..c4c57f5c --- /dev/null +++ b/docs/horde_sdk/generation_parameters/image/consts.md @@ -0,0 +1,2 @@ +# consts +::: horde_sdk.generation_parameters.image.consts diff --git a/docs/horde_sdk/generation_parameters/image/object_models.md b/docs/horde_sdk/generation_parameters/image/object_models.md new file mode 100644 index 00000000..d5fa871e --- /dev/null +++ b/docs/horde_sdk/generation_parameters/image/object_models.md @@ -0,0 +1,2 @@ +# object_models +::: horde_sdk.generation_parameters.image.object_models diff --git a/docs/horde_sdk/generation_parameters/multimodal/.pages b/docs/horde_sdk/generation_parameters/multimodal/.pages new file mode 100644 index 00000000..37b24126 --- /dev/null +++ b/docs/horde_sdk/generation_parameters/multimodal/.pages @@ -0,0 +1 @@ +title: multimodal diff --git a/docs/horde_sdk/generation_parameters/templates/.pages b/docs/horde_sdk/generation_parameters/templates/.pages new file mode 100644 index 00000000..d7ea1dfe --- /dev/null +++ b/docs/horde_sdk/generation_parameters/templates/.pages @@ -0,0 +1 @@ +title: templates diff --git a/docs/horde_sdk/generation_parameters/text/.pages b/docs/horde_sdk/generation_parameters/text/.pages new file mode 100644 index 00000000..05126b0c --- /dev/null +++ b/docs/horde_sdk/generation_parameters/text/.pages @@ -0,0 +1 @@ +title: text diff --git a/docs/horde_sdk/generation_parameters/text/consts.md b/docs/horde_sdk/generation_parameters/text/consts.md new file mode 100644 index 00000000..ad6740fd --- /dev/null +++ b/docs/horde_sdk/generation_parameters/text/consts.md @@ -0,0 +1,2 @@ +# consts +::: horde_sdk.generation_parameters.text.consts diff --git a/docs/horde_sdk/generation_parameters/text/object_models.md b/docs/horde_sdk/generation_parameters/text/object_models.md new file mode 100644 index 00000000..72670996 --- /dev/null +++ b/docs/horde_sdk/generation_parameters/text/object_models.md @@ -0,0 +1,2 @@ +# object_models +::: horde_sdk.generation_parameters.text.object_models diff --git a/docs/horde_sdk/generation_parameters/video/.pages b/docs/horde_sdk/generation_parameters/video/.pages new file mode 100644 index 00000000..af760f63 --- /dev/null +++ b/docs/horde_sdk/generation_parameters/video/.pages @@ -0,0 +1 @@ +title: video diff --git a/docs/horde_sdk/object_models.md b/docs/horde_sdk/object_models.md new file mode 100644 index 00000000..23a7276d --- /dev/null +++ b/docs/horde_sdk/object_models.md @@ -0,0 +1,2 @@ +# object_models +::: horde_sdk.object_models diff --git a/docs/horde_sdk/utils.md b/docs/horde_sdk/utils.md deleted file mode 100644 index d07844f4..00000000 --- a/docs/horde_sdk/utils.md +++ /dev/null @@ -1,2 +0,0 @@ -# utils -::: horde_sdk.utils diff --git a/docs/horde_sdk/utils/.pages b/docs/horde_sdk/utils/.pages new file mode 100644 index 00000000..2c9da378 --- /dev/null +++ b/docs/horde_sdk/utils/.pages @@ -0,0 +1 @@ +title: utils diff --git a/docs/horde_sdk/utils/image_utils.md b/docs/horde_sdk/utils/image_utils.md new file mode 100644 index 00000000..35c7b708 --- /dev/null +++ b/docs/horde_sdk/utils/image_utils.md @@ -0,0 +1,2 @@ +# image_utils +::: horde_sdk.utils.image_utils diff --git a/docs/horde_sdk/worker/.pages b/docs/horde_sdk/worker/.pages new file mode 100644 index 00000000..16530e21 --- /dev/null +++ b/docs/horde_sdk/worker/.pages @@ -0,0 +1 @@ +title: worker diff --git a/docs/horde_sdk/worker/consts.md b/docs/horde_sdk/worker/consts.md new file mode 100644 index 00000000..d5e714a8 --- /dev/null +++ b/docs/horde_sdk/worker/consts.md @@ -0,0 +1,2 @@ +# consts +::: horde_sdk.worker.consts diff --git a/docs/horde_sdk/worker/dispatch/.pages b/docs/horde_sdk/worker/dispatch/.pages new file mode 100644 index 00000000..43bfe9fb --- /dev/null +++ b/docs/horde_sdk/worker/dispatch/.pages @@ -0,0 +1 @@ +title: dispatch diff --git a/docs/horde_sdk/worker/dispatch/ai_horde/.pages b/docs/horde_sdk/worker/dispatch/ai_horde/.pages new file mode 100644 index 00000000..d0cdb5df --- /dev/null +++ b/docs/horde_sdk/worker/dispatch/ai_horde/.pages @@ -0,0 +1 @@ +title: ai_horde diff --git a/docs/horde_sdk/worker/dispatch/ai_horde/alchemy/.pages b/docs/horde_sdk/worker/dispatch/ai_horde/alchemy/.pages new file mode 100644 index 00000000..ed76939b --- /dev/null +++ b/docs/horde_sdk/worker/dispatch/ai_horde/alchemy/.pages @@ -0,0 +1 @@ +title: alchemy diff --git a/docs/horde_sdk/worker/dispatch/ai_horde/alchemy/convert.md b/docs/horde_sdk/worker/dispatch/ai_horde/alchemy/convert.md new file mode 100644 index 00000000..c576c45a --- /dev/null +++ b/docs/horde_sdk/worker/dispatch/ai_horde/alchemy/convert.md @@ -0,0 +1,2 @@ +# convert +::: horde_sdk.worker.dispatch.ai_horde.alchemy.convert diff --git a/docs/horde_sdk/worker/dispatch/ai_horde/bridge_data.md b/docs/horde_sdk/worker/dispatch/ai_horde/bridge_data.md new file mode 100644 index 00000000..32e736b6 --- /dev/null +++ b/docs/horde_sdk/worker/dispatch/ai_horde/bridge_data.md @@ -0,0 +1,2 @@ +# bridge_data +::: horde_sdk.worker.dispatch.ai_horde.bridge_data diff --git a/docs/horde_sdk/worker/dispatch/ai_horde/image/.pages b/docs/horde_sdk/worker/dispatch/ai_horde/image/.pages new file mode 100644 index 00000000..1382a2c9 --- /dev/null +++ b/docs/horde_sdk/worker/dispatch/ai_horde/image/.pages @@ -0,0 +1 @@ +title: image diff --git a/docs/horde_sdk/worker/dispatch/ai_horde/image/convert.md b/docs/horde_sdk/worker/dispatch/ai_horde/image/convert.md new file mode 100644 index 00000000..4a354da2 --- /dev/null +++ b/docs/horde_sdk/worker/dispatch/ai_horde/image/convert.md @@ -0,0 +1,2 @@ +# convert +::: horde_sdk.worker.dispatch.ai_horde.image.convert diff --git a/docs/horde_sdk/worker/dispatch/ai_horde/multimodal/.pages b/docs/horde_sdk/worker/dispatch/ai_horde/multimodal/.pages new file mode 100644 index 00000000..37b24126 --- /dev/null +++ b/docs/horde_sdk/worker/dispatch/ai_horde/multimodal/.pages @@ -0,0 +1 @@ +title: multimodal diff --git a/docs/horde_sdk/worker/dispatch/ai_horde/pop_strategy.md b/docs/horde_sdk/worker/dispatch/ai_horde/pop_strategy.md new file mode 100644 index 00000000..0ad0f5c6 --- /dev/null +++ b/docs/horde_sdk/worker/dispatch/ai_horde/pop_strategy.md @@ -0,0 +1,2 @@ +# pop_strategy +::: horde_sdk.worker.dispatch.ai_horde.pop_strategy diff --git a/docs/horde_sdk/worker/dispatch/ai_horde/text/.pages b/docs/horde_sdk/worker/dispatch/ai_horde/text/.pages new file mode 100644 index 00000000..05126b0c --- /dev/null +++ b/docs/horde_sdk/worker/dispatch/ai_horde/text/.pages @@ -0,0 +1 @@ +title: text diff --git a/docs/horde_sdk/worker/dispatch/ai_horde/text/convert.md b/docs/horde_sdk/worker/dispatch/ai_horde/text/convert.md new file mode 100644 index 00000000..8a2278ac --- /dev/null +++ b/docs/horde_sdk/worker/dispatch/ai_horde/text/convert.md @@ -0,0 +1,2 @@ +# convert +::: horde_sdk.worker.dispatch.ai_horde.text.convert diff --git a/docs/horde_sdk/worker/dispatch/ai_horde_parameters.md b/docs/horde_sdk/worker/dispatch/ai_horde_parameters.md new file mode 100644 index 00000000..b313c295 --- /dev/null +++ b/docs/horde_sdk/worker/dispatch/ai_horde_parameters.md @@ -0,0 +1,2 @@ +# ai_horde_parameters +::: horde_sdk.worker.dispatch.ai_horde_parameters diff --git a/docs/horde_sdk/worker/dispatch/base.md b/docs/horde_sdk/worker/dispatch/base.md new file mode 100644 index 00000000..05330abb --- /dev/null +++ b/docs/horde_sdk/worker/dispatch/base.md @@ -0,0 +1,2 @@ +# base +::: horde_sdk.worker.dispatch.base diff --git a/docs/horde_sdk/worker/dispatch/pop_strategy.md b/docs/horde_sdk/worker/dispatch/pop_strategy.md new file mode 100644 index 00000000..f686791a --- /dev/null +++ b/docs/horde_sdk/worker/dispatch/pop_strategy.md @@ -0,0 +1,2 @@ +# pop_strategy +::: horde_sdk.worker.dispatch.pop_strategy diff --git a/docs/horde_sdk/worker/exceptions.md b/docs/horde_sdk/worker/exceptions.md new file mode 100644 index 00000000..723ae6e8 --- /dev/null +++ b/docs/horde_sdk/worker/exceptions.md @@ -0,0 +1,2 @@ +# exceptions +::: horde_sdk.worker.exceptions diff --git a/docs/horde_sdk/worker/feature_flags.md b/docs/horde_sdk/worker/feature_flags.md new file mode 100644 index 00000000..758d9305 --- /dev/null +++ b/docs/horde_sdk/worker/feature_flags.md @@ -0,0 +1,2 @@ +# feature_flags +::: horde_sdk.worker.feature_flags diff --git a/docs/horde_sdk/worker/generations.md b/docs/horde_sdk/worker/generations.md new file mode 100644 index 00000000..b9d9d73b --- /dev/null +++ b/docs/horde_sdk/worker/generations.md @@ -0,0 +1,2 @@ +# generations +::: horde_sdk.worker.generations diff --git a/docs/horde_sdk/worker/generations_base.md b/docs/horde_sdk/worker/generations_base.md new file mode 100644 index 00000000..be1be353 --- /dev/null +++ b/docs/horde_sdk/worker/generations_base.md @@ -0,0 +1,2 @@ +# generations_base +::: horde_sdk.worker.generations_base diff --git a/docs/horde_sdk/worker/job_base.md b/docs/horde_sdk/worker/job_base.md new file mode 100644 index 00000000..6bb841ce --- /dev/null +++ b/docs/horde_sdk/worker/job_base.md @@ -0,0 +1,2 @@ +# job_base +::: horde_sdk.worker.job_base diff --git a/docs/horde_sdk/worker/jobs.md b/docs/horde_sdk/worker/jobs.md new file mode 100644 index 00000000..d9919ed9 --- /dev/null +++ b/docs/horde_sdk/worker/jobs.md @@ -0,0 +1,2 @@ +# jobs +::: horde_sdk.worker.jobs diff --git a/docs/horde_sdk/ai_horde_worker/locale_info/.pages b/docs/horde_sdk/worker/locale_info/.pages similarity index 100% rename from docs/horde_sdk/ai_horde_worker/locale_info/.pages rename to docs/horde_sdk/worker/locale_info/.pages diff --git a/docs/horde_sdk/worker/locale_info/bridge_data_fields.md b/docs/horde_sdk/worker/locale_info/bridge_data_fields.md new file mode 100644 index 00000000..9e376555 --- /dev/null +++ b/docs/horde_sdk/worker/locale_info/bridge_data_fields.md @@ -0,0 +1,2 @@ +# bridge_data_fields +::: horde_sdk.worker.locale_info.bridge_data_fields diff --git a/docs/horde_sdk/worker/model_meta.md b/docs/horde_sdk/worker/model_meta.md new file mode 100644 index 00000000..cef07635 --- /dev/null +++ b/docs/horde_sdk/worker/model_meta.md @@ -0,0 +1,2 @@ +# model_meta +::: horde_sdk.worker.model_meta diff --git a/docs/horde_sdk/worker/worker.md b/docs/horde_sdk/worker/worker.md new file mode 100644 index 00000000..525a4f5f --- /dev/null +++ b/docs/horde_sdk/worker/worker.md @@ -0,0 +1,2 @@ +# worker +::: horde_sdk.worker.worker diff --git a/docs/horde_sdk/worker/worker_client.md b/docs/horde_sdk/worker/worker_client.md new file mode 100644 index 00000000..9f2bc57a --- /dev/null +++ b/docs/horde_sdk/worker/worker_client.md @@ -0,0 +1,2 @@ +# worker_client +::: horde_sdk.worker.worker_client diff --git a/docs/request_field_names_and_descriptions.json b/docs/request_field_names_and_descriptions.json deleted file mode 100644 index 02b6d755..00000000 --- a/docs/request_field_names_and_descriptions.json +++ /dev/null @@ -1,1545 +0,0 @@ -{ - "AIHordeGetPrivacyPolicyRequest": { - "format": { - "description": "The format of the document to return. Default is markdown.", - "types": [ - "horde_sdk.ai_horde_api.apimodels._documents.DocumentFormat", - "str" - ] - }, - "accept": { - "description": "The 'accept' header field.", - "types": [ - "horde_sdk.generic_api.metadata.GenericAcceptTypes" - ] - }, - "client_agent": { - "description": "The requesting client's agent. You should set this to reflect the name, version and contact information\nfor your client.", - "types": [ - "str" - ] - } - }, - "AIHordeGetSponsorsRequest": { - "format": { - "description": "The format of the document to return. Default is markdown.", - "types": [ - "horde_sdk.ai_horde_api.apimodels._documents.DocumentFormat", - "str" - ] - }, - "accept": { - "description": "The 'accept' header field.", - "types": [ - "horde_sdk.generic_api.metadata.GenericAcceptTypes" - ] - }, - "client_agent": { - "description": "The requesting client's agent. You should set this to reflect the name, version and contact information\nfor your client.", - "types": [ - "str" - ] - } - }, - "AIHordeGetTermsRequest": { - "format": { - "description": "The format of the document to return. Default is markdown.", - "types": [ - "horde_sdk.ai_horde_api.apimodels._documents.DocumentFormat", - "str" - ] - }, - "accept": { - "description": "The 'accept' header field.", - "types": [ - "horde_sdk.generic_api.metadata.GenericAcceptTypes" - ] - }, - "client_agent": { - "description": "The requesting client's agent. You should set this to reflect the name, version and contact information\nfor your client.", - "types": [ - "str" - ] - } - }, - "AIHordeHeartbeatRequest": { - "accept": { - "description": "The 'accept' header field.", - "types": [ - "horde_sdk.generic_api.metadata.GenericAcceptTypes" - ] - }, - "client_agent": { - "description": "The requesting client's agent. You should set this to reflect the name, version and contact information\nfor your client.", - "types": [ - "str" - ] - } - }, - "AlchemyAsyncRequest": { - "apikey": { - "description": "Defaults to `ANON_API_KEY`. See also `.is_api_key_required()`", - "types": [ - "str", - "None" - ] - }, - "accept": { - "description": "The 'accept' header field.", - "types": [ - "horde_sdk.generic_api.metadata.GenericAcceptTypes" - ] - }, - "client_agent": { - "description": "The requesting client's agent. You should set this to reflect the name, version and contact information\nfor your client.", - "types": [ - "str" - ] - }, - "forms": { - "description": "The list of forms (types of post-processing/interrogation/captioning/etc) to request.", - "types": [ - "list[horde_sdk.ai_horde_api.apimodels.alchemy._async.AlchemyAsyncRequestFormItem]" - ] - }, - "source_image": { - "description": "The public URL of the source image or a base64 string to use.", - "types": [ - "str" - ] - }, - "slow_workers": { - "description": "Whether to use the slower workers. Costs additional kudos if `False`.", - "types": [ - "bool" - ] - }, - "extra_slow_workers": { - "description": "Whether to use the super slow workers.", - "types": [ - "bool" - ] - } - }, - "AlchemyDeleteRequest": { - "id_": { - "description": "The UUID for this job. Use this to post the results in the future.", - "types": [ - "horde_sdk.ai_horde_api.fields.JobID" - ] - }, - "accept": { - "description": "The 'accept' header field.", - "types": [ - "horde_sdk.generic_api.metadata.GenericAcceptTypes" - ] - }, - "client_agent": { - "description": "The requesting client's agent. You should set this to reflect the name, version and contact information\nfor your client.", - "types": [ - "str" - ] - } - }, - "AlchemyJobSubmitRequest": { - "apikey": { - "description": "Defaults to `ANON_API_KEY`. See also `.is_api_key_required()`", - "types": [ - "str", - "None" - ] - }, - "id_": { - "description": "The UUID for this job. Use this to post the results in the future.", - "types": [ - "horde_sdk.ai_horde_api.fields.JobID" - ] - }, - "accept": { - "description": "The 'accept' header field.", - "types": [ - "horde_sdk.generic_api.metadata.GenericAcceptTypes" - ] - }, - "client_agent": { - "description": "The requesting client's agent. You should set this to reflect the name, version and contact information\nfor your client.", - "types": [ - "str" - ] - }, - "result": { - "description": "The result of the alchemy job.", - "types": [ - "str" - ] - }, - "state": { - "description": "The state of this generation. See `GENERATION_STATE` for more information.", - "types": [ - "horde_sdk.ai_horde_api.consts.GENERATION_STATE" - ] - } - }, - "AlchemyPopRequest": { - "apikey": { - "description": "Defaults to `ANON_API_KEY`. See also `.is_api_key_required()`", - "types": [ - "str", - "None" - ] - }, - "accept": { - "description": "The 'accept' header field.", - "types": [ - "horde_sdk.generic_api.metadata.GenericAcceptTypes" - ] - }, - "client_agent": { - "description": "The requesting client's agent. You should set this to reflect the name, version and contact information\nfor your client.", - "types": [ - "str" - ] - }, - "name": { - "description": "The name of the request. This is used to identify the request in the logs.", - "types": [ - "str" - ] - }, - "priority_usernames": { - "description": "The usernames that should be prioritized for this request.", - "types": [ - "list[str]" - ] - }, - "forms": { - "description": "The types of alchemy that should be generated.", - "types": [ - "list[horde_sdk.ai_horde_api.consts.KNOWN_ALCHEMY_TYPES]" - ] - } - }, - "AlchemyStatusRequest": { - "apikey": { - "description": "Defaults to `ANON_API_KEY`. See also `.is_api_key_required()`", - "types": [ - "str", - "None" - ] - }, - "id_": { - "description": "The UUID for this job. Use this to post the results in the future.", - "types": [ - "horde_sdk.ai_horde_api.fields.JobID" - ] - }, - "accept": { - "description": "The 'accept' header field.", - "types": [ - "horde_sdk.generic_api.metadata.GenericAcceptTypes" - ] - }, - "client_agent": { - "description": "The requesting client's agent. You should set this to reflect the name, version and contact information\nfor your client.", - "types": [ - "str" - ] - } - }, - "AllWorkersDetailsRequest": { - "apikey": { - "description": "Defaults to `ANON_API_KEY`. See also `.is_api_key_required()`", - "types": [ - "str", - "None" - ] - }, - "accept": { - "description": "The 'accept' header field.", - "types": [ - "horde_sdk.generic_api.metadata.GenericAcceptTypes" - ] - }, - "client_agent": { - "description": "The requesting client's agent. You should set this to reflect the name, version and contact information\nfor your client.", - "types": [ - "str" - ] - }, - "type_": { - "description": "Filter workers by type. Default is 'all' which returns all workers.", - "types": [ - "horde_sdk.ai_horde_api.consts.WORKER_TYPE" - ] - }, - "name": { - "description": "Returns a worker matching the exact name provided. Case insensitive.", - "types": [ - "str", - "None" - ] - } - }, - "DeleteImageGenerateRequest": { - "id_": { - "description": "The UUID for this job. Use this to post the results in the future.", - "types": [ - "horde_sdk.ai_horde_api.fields.JobID" - ] - }, - "accept": { - "description": "The 'accept' header field.", - "types": [ - "horde_sdk.generic_api.metadata.GenericAcceptTypes" - ] - }, - "client_agent": { - "description": "The requesting client's agent. You should set this to reflect the name, version and contact information\nfor your client.", - "types": [ - "str" - ] - } - }, - "DeleteTextGenerateRequest": { - "id_": { - "description": "The UUID for this job. Use this to post the results in the future.", - "types": [ - "horde_sdk.ai_horde_api.fields.JobID" - ] - }, - "accept": { - "description": "The 'accept' header field.", - "types": [ - "horde_sdk.generic_api.metadata.GenericAcceptTypes" - ] - }, - "client_agent": { - "description": "The requesting client's agent. You should set this to reflect the name, version and contact information\nfor your client.", - "types": [ - "str" - ] - } - }, - "DeleteWorkerRequest": { - "worker_id": { - "description": "The UUID of the worker in question for this request.", - "types": [ - "str", - "horde_sdk.ai_horde_api.fields.WorkerID" - ] - }, - "apikey": { - "description": "Defaults to `ANON_API_KEY`. See also `.is_api_key_required()`", - "types": [ - "str", - "None" - ] - }, - "accept": { - "description": "The 'accept' header field.", - "types": [ - "horde_sdk.generic_api.metadata.GenericAcceptTypes" - ] - }, - "client_agent": { - "description": "The requesting client's agent. You should set this to reflect the name, version and contact information\nfor your client.", - "types": [ - "str" - ] - } - }, - "FindUserRequest": { - "apikey": { - "description": "Defaults to `ANON_API_KEY`. See also `.is_api_key_required()`", - "types": [ - "str", - "None" - ] - }, - "accept": { - "description": "The 'accept' header field.", - "types": [ - "horde_sdk.generic_api.metadata.GenericAcceptTypes" - ] - }, - "client_agent": { - "description": "The requesting client's agent. You should set this to reflect the name, version and contact information\nfor your client.", - "types": [ - "str" - ] - } - }, - "HordePerformanceRequest": { - "accept": { - "description": "The 'accept' header field.", - "types": [ - "horde_sdk.generic_api.metadata.GenericAcceptTypes" - ] - }, - "client_agent": { - "description": "The requesting client's agent. You should set this to reflect the name, version and contact information\nfor your client.", - "types": [ - "str" - ] - } - }, - "HordeStatusModelsAllRequest": { - "accept": { - "description": "The 'accept' header field.", - "types": [ - "horde_sdk.generic_api.metadata.GenericAcceptTypes" - ] - }, - "client_agent": { - "description": "The requesting client's agent. You should set this to reflect the name, version and contact information\nfor your client.", - "types": [ - "str" - ] - }, - "type_": { - "description": "The type of model to filter by.", - "types": [ - "horde_sdk.ai_horde_api.consts.MODEL_TYPE" - ] - }, - "min_count": { - "description": "Filter only models that have at least this amount of threads serving.", - "types": [ - "int", - "None" - ] - }, - "max_count": { - "description": "Filter only models that have at most this amount of threads serving.", - "types": [ - "int", - "None" - ] - }, - "model_state": { - "description": "If 'known', only show stats for known models in the model reference. If 'custom' only show stats for custom\nmodels. If 'all' shows stats for all models.", - "types": [ - "horde_sdk.ai_horde_api.consts.MODEL_STATE" - ] - } - }, - "HordeStatusModelsSingleRequest": { - "accept": { - "description": "The 'accept' header field.", - "types": [ - "horde_sdk.generic_api.metadata.GenericAcceptTypes" - ] - }, - "client_agent": { - "description": "The requesting client's agent. You should set this to reflect the name, version and contact information\nfor your client.", - "types": [ - "str" - ] - }, - "model_name": { - "description": "The name of the model to request.", - "types": [ - "str" - ] - } - }, - "ImageGenerateAsyncRequest": { - "trusted_workers": { - "description": "When true, only trusted workers will serve this request. When False, Evaluating workers will also be used\nwhich can increase speed but adds more risk!", - "types": [ - "bool" - ] - }, - "slow_workers": { - "description": "When True, allows slower workers to pick up this request. Disabling this incurs an extra kudos cost.", - "types": [ - "bool" - ] - }, - "extra_slow_workers": { - "description": "When True, allows extra slow workers to pick up this request.", - "types": [ - "bool" - ] - }, - "workers": { - "description": "A list of worker IDs to use for this request. If empty, any worker can pick up the request. Using this incurs\nand extra kudos cost.", - "types": [ - "list[str]" - ] - }, - "worker_blacklist": { - "description": "If true, the worker list will be treated as a blacklist instead of a whitelist.", - "types": [ - "list[str]" - ] - }, - "models": { - "description": "The generative models to use for this request.", - "types": [ - "list[str]" - ] - }, - "dry_run": { - "description": "If true, the request will not be processed, but will return a response with the estimated kudos cost.", - "types": [ - "bool" - ] - }, - "apikey": { - "description": "Defaults to `ANON_API_KEY`. See also `.is_api_key_required()`", - "types": [ - "str", - "None" - ] - }, - "accept": { - "description": "The 'accept' header field.", - "types": [ - "horde_sdk.generic_api.metadata.GenericAcceptTypes" - ] - }, - "client_agent": { - "description": "The requesting client's agent. You should set this to reflect the name, version and contact information\nfor your client.", - "types": [ - "str" - ] - }, - "prompt": { - "description": "The prompt which will be sent to Stable Diffusion to generate an image.", - "types": [ - "str" - ] - }, - "params": { - "description": "The parameters for the image generation.", - "types": [ - "horde_sdk.ai_horde_api.apimodels.generate._async.ImageGenerationInputPayload", - "None" - ] - }, - "nsfw": { - "description": "Set to true if this request is NSFW. This will skip workers which censor images.", - "types": [ - "bool", - "None" - ] - }, - "censor_nsfw": { - "description": "If the request is SFW, and the worker accidentally generates NSFW, it will send back a censored image.", - "types": [ - "bool" - ] - }, - "r2": { - "description": "If True, the image will be sent via cloudflare r2 download link.", - "types": [ - "bool" - ] - }, - "shared": { - "description": "If True, The image will be shared with LAION for improving their dataset. This will also reduce your\nkudos consumption by 2. For anonymous users, this is always True.", - "types": [ - "bool" - ] - }, - "replacement_filter": { - "description": "If enabled, suspicious prompts are sanitized through a string replacement filter instead.", - "types": [ - "bool" - ] - }, - "source_image": { - "description": "The public URL of the source image or a base64 string to use.", - "types": [ - "str", - "None" - ] - }, - "source_processing": { - "description": "If source_image is provided, specifies how to process it.", - "types": [ - "horde_sdk.ai_horde_api.consts.KNOWN_SOURCE_PROCESSING" - ] - }, - "source_mask": { - "description": "If source_processing is set to 'inpainting' or 'outpainting', this parameter can be optionally provided as the\nBase64-encoded webp mask of the areas to inpaint. If this arg is not passed, the inpainting/outpainting mask has to\nbe embedded as alpha channel.", - "types": [ - "str", - "None" - ] - }, - "extra_source_images": { - "description": "Additional uploaded images which can be used for further operations.", - "types": [ - "list[horde_sdk.ai_horde_api.apimodels.base.ExtraSourceImageEntry]", - "None" - ] - } - }, - "ImageGenerateCheckRequest": { - "id_": { - "description": "The UUID for this job. Use this to post the results in the future.", - "types": [ - "horde_sdk.ai_horde_api.fields.JobID" - ] - }, - "accept": { - "description": "The 'accept' header field.", - "types": [ - "horde_sdk.generic_api.metadata.GenericAcceptTypes" - ] - }, - "client_agent": { - "description": "The requesting client's agent. You should set this to reflect the name, version and contact information\nfor your client.", - "types": [ - "str" - ] - } - }, - "ImageGenerateJobPopRequest": { - "amount": { - "description": "The number of jobs to pop at the same time.", - "types": [ - "int", - "None" - ] - }, - "bridge_agent": { - "description": "The worker name, version and website.", - "types": [ - "str", - "None" - ] - }, - "models": { - "description": "The models this worker can generate.", - "types": [ - "list[str]" - ] - }, - "name": { - "description": "The Name of the Worker.", - "types": [ - "str" - ] - }, - "nsfw": { - "description": "Whether this worker can generate NSFW requests or not.", - "types": [ - "bool", - "None" - ] - }, - "priority_usernames": { - "description": "The usernames that should be prioritized by this worker.", - "types": [ - "list[str]", - "None" - ] - }, - "require_upfront_kudos": { - "description": "If True, this worker will only pick up requests where the owner has the required kudos to consume already available.", - "types": [ - "bool", - "None" - ] - }, - "threads": { - "description": "How many threads this worker is running. This is used to accurately the current power available in the horde.", - "types": [ - "int", - "None" - ] - }, - "apikey": { - "description": "Defaults to `ANON_API_KEY`. See also `.is_api_key_required()`", - "types": [ - "str", - "None" - ] - }, - "accept": { - "description": "The 'accept' header field.", - "types": [ - "horde_sdk.generic_api.metadata.GenericAcceptTypes" - ] - }, - "client_agent": { - "description": "The requesting client's agent. You should set this to reflect the name, version and contact information\nfor your client.", - "types": [ - "str" - ] - }, - "bridge_version": { - "description": "The version of the bridge this worker is running.", - "types": [ - "int", - "None" - ] - }, - "max_pixels": { - "description": "The maximum number of pixels this worker can generate.", - "types": [ - "int" - ] - }, - "blacklist": { - "description": "The list of words this worker will not accept in a prompt.", - "types": [ - "list[str]" - ] - }, - "allow_img2img": { - "description": "Whether this worker can generate img2img.", - "types": [ - "bool" - ] - }, - "allow_painting": { - "description": "Whether this worker can generate inpainting/outpainting.", - "types": [ - "bool" - ] - }, - "allow_unsafe_ipaddr": { - "description": "Whether this worker will generate from unsafe/VPN IP addresses.", - "types": [ - "bool" - ] - }, - "allow_post_processing": { - "description": "Whether this worker can do post-processing.", - "types": [ - "bool" - ] - }, - "allow_controlnet": { - "description": "Whether this worker can generate using controlnets.", - "types": [ - "bool" - ] - }, - "allow_sdxl_controlnet": { - "description": "Whether this worker can generate using SDXL controlnets.", - "types": [ - "bool" - ] - }, - "allow_lora": { - "description": "Whether this worker can generate using Loras.", - "types": [ - "bool" - ] - }, - "extra_slow_worker": { - "description": "Marks the worker as extra slow.", - "types": [ - "bool" - ] - }, - "limit_max_steps": { - "description": "Prevents the worker picking up jobs with more steps than the model average.", - "types": [ - "bool" - ] - } - }, - "ImageGenerateStatusRequest": { - "id_": { - "description": "The UUID for this job. Use this to post the results in the future.", - "types": [ - "horde_sdk.ai_horde_api.fields.JobID" - ] - }, - "accept": { - "description": "The 'accept' header field.", - "types": [ - "horde_sdk.generic_api.metadata.GenericAcceptTypes" - ] - }, - "client_agent": { - "description": "The requesting client's agent. You should set this to reflect the name, version and contact information\nfor your client.", - "types": [ - "str" - ] - } - }, - "ImageGenerationJobSubmitRequest": { - "apikey": { - "description": "Defaults to `ANON_API_KEY`. See also `.is_api_key_required()`", - "types": [ - "str", - "None" - ] - }, - "id_": { - "description": "The UUID for this job. Use this to post the results in the future.", - "types": [ - "horde_sdk.ai_horde_api.fields.JobID" - ] - }, - "accept": { - "description": "The 'accept' header field.", - "types": [ - "horde_sdk.generic_api.metadata.GenericAcceptTypes" - ] - }, - "client_agent": { - "description": "The requesting client's agent. You should set this to reflect the name, version and contact information\nfor your client.", - "types": [ - "str" - ] - }, - "generation": { - "description": "R2 result was uploaded to R2, else the string of the result.", - "types": [ - "str" - ] - }, - "state": { - "description": "The state of this generation.", - "types": [ - "horde_sdk.ai_horde_api.consts.GENERATION_STATE" - ] - }, - "seed": { - "description": "The seed for this generation.", - "types": [ - "int" - ] - }, - "censored": { - "description": "If True, this resulting image has been censored.", - "types": [ - "bool" - ] - }, - "gen_metadata": { - "description": "Extra metadata about faulted or defaulted components of the generation", - "types": [ - "list[horde_sdk.ai_horde_api.apimodels.base.GenMetadataEntry]", - "None" - ] - } - }, - "ImageStatsModelsRequest": { - "accept": { - "description": "The 'accept' header field.", - "types": [ - "horde_sdk.generic_api.metadata.GenericAcceptTypes" - ] - }, - "client_agent": { - "description": "The requesting client's agent. You should set this to reflect the name, version and contact information\nfor your client.", - "types": [ - "str" - ] - }, - "model_state": { - "description": "The state of the models to get stats for. Known models are models that are known to the system.", - "types": [ - "horde_sdk.ai_horde_api.consts.MODEL_STATE" - ] - } - }, - "ImageStatsModelsTotalRequest": { - "accept": { - "description": "The 'accept' header field.", - "types": [ - "horde_sdk.generic_api.metadata.GenericAcceptTypes" - ] - }, - "client_agent": { - "description": "The requesting client's agent. You should set this to reflect the name, version and contact information\nfor your client.", - "types": [ - "str" - ] - } - }, - "KudosTransferRequest": { - "apikey": { - "description": "Defaults to `ANON_API_KEY`. See also `.is_api_key_required()`", - "types": [ - "str", - "None" - ] - }, - "accept": { - "description": "The 'accept' header field.", - "types": [ - "horde_sdk.generic_api.metadata.GenericAcceptTypes" - ] - }, - "client_agent": { - "description": "The requesting client's agent. You should set this to reflect the name, version and contact information\nfor your client.", - "types": [ - "str" - ] - }, - "username": { - "description": "The username of the user to transfer Kudos to.", - "types": [ - "str" - ] - }, - "amount": { - "description": "The amount of Kudos to transfer.", - "types": [ - "float" - ] - } - }, - "ListUsersDetailsRequest": { - "accept": { - "description": "The 'accept' header field.", - "types": [ - "horde_sdk.generic_api.metadata.GenericAcceptTypes" - ] - }, - "client_agent": { - "description": "The requesting client's agent. You should set this to reflect the name, version and contact information\nfor your client.", - "types": [ - "str" - ] - }, - "page": { - "description": "The page number to request. There are up to 25 users per page.", - "types": [ - "int" - ] - }, - "sort": { - "description": "The field to sort the users by. The default is by kudos.", - "types": [ - "str" - ] - } - }, - "ModifyUserRequest": { - "apikey": { - "description": "Defaults to `ANON_API_KEY`. See also `.is_api_key_required()`", - "types": [ - "str", - "None" - ] - }, - "admin_comment": { - "description": "Add further information about this user for the other admins.", - "types": [ - "str", - "None" - ] - }, - "concurrency": { - "description": "The amount of concurrent request this user can have.", - "types": [ - "int", - "None" - ] - }, - "contact": { - "description": "Contact details for the horde admins to reach the user in case of emergency. This is only visible to horde\nmoderators.", - "types": [ - "str", - "None" - ] - }, - "customizer": { - "description": "When set to true, the user will be able to serve custom Stable Diffusion models which do not exist in the\nOfficial AI Horde Model Reference.", - "types": [ - "bool", - "None" - ] - }, - "education": { - "description": "When set to true, the user is considered an education account and some options become more restrictive.", - "types": [ - "bool", - "None" - ] - }, - "filtered": { - "description": "When set to true, the replacement filter will always be applied against this user", - "types": [ - "bool", - "None" - ] - }, - "flagged": { - "description": "When set to true, the user cannot transfer kudos and all their workers are put into permanent maintenance.", - "types": [ - "bool", - "None" - ] - }, - "moderator": { - "description": "Set to true to make this user a horde moderator.", - "types": [ - "bool", - "None" - ] - }, - "monthly_kudos": { - "description": "When specified, will start assigning the user monthly kudos, starting now!", - "types": [ - "int", - "None" - ] - }, - "public_workers": { - "description": "Set to true to make this user display their worker IDs.", - "types": [ - "bool", - "None" - ] - }, - "service": { - "description": "When set to true, the user is considered a service account proxying the requests for other users.", - "types": [ - "bool", - "None" - ] - }, - "special": { - "description": "When set to true, The user can send special payloads.", - "types": [ - "bool", - "None" - ] - }, - "trusted": { - "description": "When set to true,the user and their servers will not be affected by suspicion.", - "types": [ - "bool", - "None" - ] - }, - "usage_multiplier": { - "description": "The amount by which to multiply the users kudos consumption.", - "types": [ - "float", - "None" - ] - }, - "username": { - "description": "When specified, will change the username. No profanity allowed!", - "types": [ - "str", - "None" - ] - }, - "vpn": { - "description": "When set to true, the user will be able to onboard workers behind a VPN. This should be used as a temporary\nsolution until the user is trusted.", - "types": [ - "bool", - "None" - ] - }, - "worker_invited": { - "description": "Set to the amount of workers this user is allowed to join to the horde when in worker invite-only mode.", - "types": [ - "int", - "None" - ] - }, - "kudos": { - "description": "The amount of kudos to modify (can be negative).", - "types": [ - "float", - "None" - ] - }, - "reset_suspicion": { - "description": "Set the user's suspicion back to 0.", - "types": [ - "bool", - "None" - ] - }, - "user_id": { - "description": "The user's ID, as a `str`, but only containing numeric values.", - "types": [ - "str" - ] - }, - "accept": { - "description": "The 'accept' header field.", - "types": [ - "horde_sdk.generic_api.metadata.GenericAcceptTypes" - ] - }, - "client_agent": { - "description": "The requesting client's agent. You should set this to reflect the name, version and contact information\nfor your client.", - "types": [ - "str" - ] - } - }, - "ModifyWorkerRequest": { - "worker_id": { - "description": "The UUID of the worker in question for this request.", - "types": [ - "str", - "horde_sdk.ai_horde_api.fields.WorkerID" - ] - }, - "apikey": { - "description": "Defaults to `ANON_API_KEY`. See also `.is_api_key_required()`", - "types": [ - "str", - "None" - ] - }, - "accept": { - "description": "The 'accept' header field.", - "types": [ - "horde_sdk.generic_api.metadata.GenericAcceptTypes" - ] - }, - "client_agent": { - "description": "The requesting client's agent. You should set this to reflect the name, version and contact information\nfor your client.", - "types": [ - "str" - ] - }, - "info": { - "description": "You can optionally provide a server note which will be seen in the server details. No profanity allowed!", - "types": [ - "str", - "None" - ] - }, - "maintenance": { - "description": "Set to true to put this worker into maintenance.", - "types": [ - "bool", - "None" - ] - }, - "maintenance_msg": { - "description": "If maintenance is True, you can optionally provide a message to be used instead of the default maintenance\nmessage, so that the owner is informed.", - "types": [ - "str", - "None" - ] - }, - "name": { - "description": "When this is set, it will change the worker's name. No profanity allowed!", - "types": [ - "str", - "None" - ] - }, - "paused": { - "description": "(Mods only) Set to true to pause this worker.", - "types": [ - "bool", - "None" - ] - }, - "team": { - "description": "The team towards which this worker contributes kudos. It an empty string ('') is passed, it will leave the", - "types": [ - "str", - "None" - ] - } - }, - "NewsRequest": { - "accept": { - "description": "The 'accept' header field.", - "types": [ - "horde_sdk.generic_api.metadata.GenericAcceptTypes" - ] - }, - "client_agent": { - "description": "The requesting client's agent. You should set this to reflect the name, version and contact information\nfor your client.", - "types": [ - "str" - ] - } - }, - "SingleUserDetailsRequest": { - "user_id": { - "description": "The user's ID, as a `str`, but only containing numeric values.", - "types": [ - "str" - ] - }, - "accept": { - "description": "The 'accept' header field.", - "types": [ - "horde_sdk.generic_api.metadata.GenericAcceptTypes" - ] - }, - "client_agent": { - "description": "The requesting client's agent. You should set this to reflect the name, version and contact information\nfor your client.", - "types": [ - "str" - ] - } - }, - "SingleWorkerDetailsRequest": { - "apikey": { - "description": "Defaults to `ANON_API_KEY`. See also `.is_api_key_required()`", - "types": [ - "str", - "None" - ] - }, - "worker_id": { - "description": "The UUID of the worker in question for this request.", - "types": [ - "str", - "horde_sdk.ai_horde_api.fields.WorkerID" - ] - }, - "accept": { - "description": "The 'accept' header field.", - "types": [ - "horde_sdk.generic_api.metadata.GenericAcceptTypes" - ] - }, - "client_agent": { - "description": "The requesting client's agent. You should set this to reflect the name, version and contact information\nfor your client.", - "types": [ - "str" - ] - } - }, - "SingleWorkerNameDetailsRequest": { - "apikey": { - "description": "Defaults to `ANON_API_KEY`. See also `.is_api_key_required()`", - "types": [ - "str", - "None" - ] - }, - "worker_name": { - "description": "The name of the worker in question for this request.", - "types": [ - "str" - ] - }, - "accept": { - "description": "The 'accept' header field.", - "types": [ - "horde_sdk.generic_api.metadata.GenericAcceptTypes" - ] - }, - "client_agent": { - "description": "The requesting client's agent. You should set this to reflect the name, version and contact information\nfor your client.", - "types": [ - "str" - ] - } - }, - "TextGenerateAsyncRequest": { - "trusted_workers": { - "description": "When true, only trusted workers will serve this request. When False, Evaluating workers will also be used\nwhich can increase speed but adds more risk!", - "types": [ - "bool" - ] - }, - "slow_workers": { - "description": "When True, allows slower workers to pick up this request. Disabling this incurs an extra kudos cost.", - "types": [ - "bool" - ] - }, - "extra_slow_workers": { - "description": "When True, allows extra slow workers to pick up this request.", - "types": [ - "bool" - ] - }, - "workers": { - "description": "A list of worker IDs to use for this request. If empty, any worker can pick up the request. Using this incurs\nand extra kudos cost.", - "types": [ - "list[str]" - ] - }, - "worker_blacklist": { - "description": "If true, the worker list will be treated as a blacklist instead of a whitelist.", - "types": [ - "list[str]" - ] - }, - "models": { - "description": "The generative models to use for this request.", - "types": [ - "list[str]" - ] - }, - "dry_run": { - "description": "If true, the request will not be processed, but will return a response with the estimated kudos cost.", - "types": [ - "bool" - ] - }, - "apikey": { - "description": "Defaults to `ANON_API_KEY`. See also `.is_api_key_required()`", - "types": [ - "str", - "None" - ] - }, - "accept": { - "description": "The 'accept' header field.", - "types": [ - "horde_sdk.generic_api.metadata.GenericAcceptTypes" - ] - }, - "client_agent": { - "description": "The requesting client's agent. You should set this to reflect the name, version and contact information\nfor your client.", - "types": [ - "str" - ] - }, - "params": { - "description": "The parameters to use for the generation.", - "types": [ - "horde_sdk.ai_horde_api.apimodels.generate.text._async.ModelGenerationInputKobold", - "None" - ] - }, - "prompt": { - "description": "The prompt which will be sent to KoboldAI to generate text.", - "types": [ - "str", - "None" - ] - }, - "allow_downgrade": { - "description": "When true and the request requires upfront kudos and the account does not have enough The request will be\ndowngraded in max context and max tokens so that it does not need upfront kudos.", - "types": [ - "bool", - "None" - ] - }, - "disable_batching": { - "description": "When true, This request will not use batching. This will allow you to retrieve accurate seeds.\nFeature is restricted to Trusted users and Patreons.", - "types": [ - "bool", - "None" - ] - }, - "extra_source_images": { - "description": "Any extra source images that should be used for this request; e.g., for multi-modal models.", - "types": [ - "list[horde_sdk.ai_horde_api.apimodels.base.ExtraSourceImageEntry]", - "None" - ] - }, - "proxied_account": { - "description": "If using a service account as a proxy, provide this value to identify the actual account from which this\nrequest is coming from.", - "types": [ - "str", - "None" - ] - }, - "softprompt": { - "description": "Specify which softprompt needs to be used to service this request.", - "types": [ - "str", - "None" - ] - }, - "webhook": { - "description": "Provide a URL where the AI Horde will send a POST call after each delivered generation.\nThe request will include the details of the job as well as the request ID.", - "types": [ - "str", - "None" - ] - } - }, - "TextGenerateJobPopRequest": { - "amount": { - "description": "The number of jobs to pop at the same time.", - "types": [ - "int", - "None" - ] - }, - "bridge_agent": { - "description": "The worker name, version and website.", - "types": [ - "str", - "None" - ] - }, - "models": { - "description": "The models this worker can generate.", - "types": [ - "list[str]" - ] - }, - "name": { - "description": "The Name of the Worker.", - "types": [ - "str" - ] - }, - "nsfw": { - "description": "Whether this worker can generate NSFW requests or not.", - "types": [ - "bool", - "None" - ] - }, - "priority_usernames": { - "description": "The usernames that should be prioritized by this worker.", - "types": [ - "list[str]", - "None" - ] - }, - "require_upfront_kudos": { - "description": "If True, this worker will only pick up requests where the owner has the required kudos to consume already available.", - "types": [ - "bool", - "None" - ] - }, - "threads": { - "description": "How many threads this worker is running. This is used to accurately the current power available in the horde.", - "types": [ - "int", - "None" - ] - }, - "max_length": { - "description": "The maximum amount of tokens this worker can generate.", - "types": [ - "int" - ] - }, - "max_context_length": { - "description": "The max amount of context to submit to this AI for sampling.", - "types": [ - "int" - ] - }, - "softprompts": { - "description": "The available softprompt files on this worker for the currently running model.", - "types": [ - "list[str]", - "None" - ] - }, - "apikey": { - "description": "Defaults to `ANON_API_KEY`. See also `.is_api_key_required()`", - "types": [ - "str", - "None" - ] - }, - "accept": { - "description": "The 'accept' header field.", - "types": [ - "horde_sdk.generic_api.metadata.GenericAcceptTypes" - ] - }, - "client_agent": { - "description": "The requesting client's agent. You should set this to reflect the name, version and contact information\nfor your client.", - "types": [ - "str" - ] - } - }, - "TextGenerateStatusRequest": { - "id_": { - "description": "The UUID for this job. Use this to post the results in the future.", - "types": [ - "horde_sdk.ai_horde_api.fields.JobID" - ] - }, - "accept": { - "description": "The 'accept' header field.", - "types": [ - "horde_sdk.generic_api.metadata.GenericAcceptTypes" - ] - }, - "client_agent": { - "description": "The requesting client's agent. You should set this to reflect the name, version and contact information\nfor your client.", - "types": [ - "str" - ] - } - }, - "TextGenerationJobSubmitRequest": { - "apikey": { - "description": "Defaults to `ANON_API_KEY`. See also `.is_api_key_required()`", - "types": [ - "str", - "None" - ] - }, - "id_": { - "description": "The UUID for this job. Use this to post the results in the future.", - "types": [ - "horde_sdk.ai_horde_api.fields.JobID" - ] - }, - "accept": { - "description": "The 'accept' header field.", - "types": [ - "horde_sdk.generic_api.metadata.GenericAcceptTypes" - ] - }, - "client_agent": { - "description": "The requesting client's agent. You should set this to reflect the name, version and contact information\nfor your client.", - "types": [ - "str" - ] - }, - "generation": { - "description": "R2 result was uploaded to R2, else the string of the result.", - "types": [ - "str" - ] - }, - "state": { - "description": "The state of this generation.", - "types": [ - "horde_sdk.ai_horde_api.consts.GENERATION_STATE" - ] - }, - "gen_metadata": { - "description": "Extra metadata about faulted or defaulted components of the generation", - "types": [ - "list[horde_sdk.ai_horde_api.apimodels.base.GenMetadataEntry]", - "None" - ] - } - }, - "TextStatsModelsRequest": { - "accept": { - "description": "The 'accept' header field.", - "types": [ - "horde_sdk.generic_api.metadata.GenericAcceptTypes" - ] - }, - "client_agent": { - "description": "The requesting client's agent. You should set this to reflect the name, version and contact information\nfor your client.", - "types": [ - "str" - ] - } - }, - "TextStatsModelsTotalRequest": { - "accept": { - "description": "The 'accept' header field.", - "types": [ - "horde_sdk.generic_api.metadata.GenericAcceptTypes" - ] - }, - "client_agent": { - "description": "The requesting client's agent. You should set this to reflect the name, version and contact information\nfor your client.", - "types": [ - "str" - ] - } - } -} diff --git a/docs/stylesheets/extra.css b/docs/stylesheets/extra.css index 4324dd10..c4c2cb85 100644 --- a/docs/stylesheets/extra.css +++ b/docs/stylesheets/extra.css @@ -1,3 +1,6 @@ +.md-grid { + max-width: 73rem; +} .doc-object-name { font-weight: bold; letter-spacing: 0.0em; diff --git a/docs/worker/base_transitions.yaml b/docs/worker/base_transitions.yaml new file mode 100644 index 00000000..21c850a3 --- /dev/null +++ b/docs/worker/base_transitions.yaml @@ -0,0 +1,52 @@ +NOT_STARTED: +- PRELOADING +- GENERATING +- PENDING_POST_PROCESSING +- POST_PROCESSING +- ERROR +PRELOADING: +- PRELOADING_COMPLETE +- ERROR +PRELOADING_COMPLETE: +- GENERATING +- PENDING_POST_PROCESSING +- POST_PROCESSING +- ERROR +GENERATING: +- PENDING_POST_PROCESSING +- POST_PROCESSING +- PENDING_SAFETY_CHECK +- SAFETY_CHECKING +- ERROR +PENDING_POST_PROCESSING: +- POST_PROCESSING +- ERROR +POST_PROCESSING: +- PENDING_SAFETY_CHECK +- SAFETY_CHECKING +- ERROR +PENDING_SAFETY_CHECK: +- SAFETY_CHECKING +- ERROR +SAFETY_CHECKING: +- PENDING_SUBMIT +- ERROR +PENDING_SUBMIT: +- SUBMITTING +- ERROR +SUBMITTING: +- SUBMIT_COMPLETE +- ERROR +SUBMIT_COMPLETE: +- COMPLETE +COMPLETE: [] +ABORTED: +- REPORTED_FAILED +- ERROR +REPORTED_FAILED: [] +ERROR: +- ABORTED +USER_REQUESTED_ABORT: +- USER_ABORT_COMPLETE +USER_ABORT_COMPLETE: [] +ABANDONED: [] diff --git a/docs/worker/black_box_transitions.yaml b/docs/worker/black_box_transitions.yaml new file mode 100644 index 00000000..5ff86fdf --- /dev/null +++ b/docs/worker/black_box_transitions.yaml @@ -0,0 +1,36 @@ +NOT_STARTED: +- GENERATING +- ERROR +GENERATING: +- PENDING_SUBMIT +- PENDING_SAFETY_CHECK +- COMPLETE +- ERROR +PENDING_SAFETY_CHECK: +- SAFETY_CHECKING +- ERROR +SAFETY_CHECKING: +- PENDING_SUBMIT +- COMPLETE +- ERROR +PENDING_SUBMIT: +- SUBMITTING +- ERROR +SUBMITTING: +- SUBMIT_COMPLETE +- COMPLETE +- ERROR +SUBMIT_COMPLETE: +- COMPLETE +COMPLETE: [] +ABORTED: +- REPORTED_FAILED +- ERROR +REPORTED_FAILED: [] +ERROR: +- ABORTED +USER_REQUESTED_ABORT: +- USER_ABORT_COMPLETE +- ABANDONED +USER_ABORT_COMPLETE: [] +ABANDONED: [] diff --git a/docs/worker/generation_parameters.md b/docs/worker/generation_parameters.md new file mode 100644 index 00000000..bdf00d1e --- /dev/null +++ b/docs/worker/generation_parameters.md @@ -0,0 +1,97 @@ +# Generation Parameters + +In `horde-sdk`, **generation parameters** are structured objects that define the configuration and options for a single round of inference or post-processing, such as generating an image or running an alchemy operation. These parameters are designed to be flexible, extensible, and type-safe, supporting a wide variety of generation workflows. + +## Object Hierarchy and Template Classes + +The generation parameter system is built around a hierarchy of Pydantic models. At the core of this design is the use of **template classes** — base models whose fields are all optional (type hinted in the code as `| None`). These template classes provide a flexible foundation for representing partial or evolving parameter sets. More concrete subclasses then override or specialize these fields, marking them as required or setting defaults as appropriate for specific use cases. + +### Why Template Classes? + +The primary reason for using template classes is to allow for **partial specification** of parameters. This is specifically required for the "Chain Flows" (dynamic workflows) in the horde-sdk, where it may not be possible to set a given field to a reasonable default ahead of time. For example, "prompt" is always required for image generation, but Chain Flows can be configured to use a prompt generated from a previous step, which may not be known at the time of parameter construction. By using a template class, we can enjoy the flexibility of having all fields optional, while still being able to enforce requirements in concrete subclasses by using Pydantic's validation features. + +## Core Classes + +### `ComposedParameterSetBase` + +This is the base for all classes which contain **all** values required by the backend to execute a generation. These classes may further contain classes which are children of `GenerationParameterComponentBase`, which represent generation components such as LoRas, Controlnets, or other specialized processing steps which are self-contained and have several parameters of their own. + +### `GenerationParameterComponentBase` + +Implementations of this class represent groups of parameters which pertain to a specific auxiliary processing step or component in the generation workflow. This can include features like LoRas, but may also include back-end specific sets of parameters or groups of closely related parameters. These classes may also be used when a parameter requires a list of a multiple related values. + +### `BasicModelGenerationParameters` + +A child class of `GenerationParameterComponentBase`, this class is meant to be a base for all model-based generation parameters. It includes fields that are common across different types of generation, such as `model`, `model_baseline`, `model_filename` and `model_hash`. Any generative process that requires a model should inherit from this class to ensure that these common fields are present. + +Processes (still called "generations" in the code) that do not require a model + +### `BasicImageGenerationParametersTemplate` + +This is the template class for image generation parameters. All fields are optional, allowing for flexible construction and partial validation. + +> Note: This implementation differs from the actual horde-sdk, and is provided here for illustrative purposes. The actual implementation may have additional fields or different defaults. + +```python +class BasicImageGenerationParametersTemplate(BasicModelGenerationParameters): + prompt: str | None = None + seed: str | None = None + height: int | None = Field(default=None, multiple_of=64, ge=64) + width: int | None = Field(default=None, multiple_of=64, ge=64) + steps: int | None = Field(default=None, ge=1) + cfg_scale: float | None = Field(default=None, ge=0) + sampler_name: KNOWN_IMAGE_SAMPLERS | str | None = Field(default=None) + scheduler: KNOWN_IMAGE_SCHEDULERS | str | None = Field(default=None) + clip_skip: int | None = Field(default=None) + clip_skip_representation: CLIP_SKIP_REPRESENTATION | None = Field(default=None) + denoising_strength: float | None = Field(default=None, ge=0, le=1) +``` + +### `BasicImageGenerationParameters` + +This is a concrete subclass of the template, making certain fields required and setting sensible defaults. It represents the minimum viable set of parameters for an image generation. + +```python +class BasicImageGenerationParameters(BasicImageGenerationParametersTemplate): + prompt: str + height: int | None = Field(default=DEFAULT_BASELINE_RESOLUTION, multiple_of=64, ge=64) + width: int | None = Field(default=DEFAULT_BASELINE_RESOLUTION, multiple_of=64, ge=64) + clip_skip_representation: CLIP_SKIP_REPRESENTATION | None = Field(default=CLIP_SKIP_REPRESENTATION.NEGATIVE_OFFSET) +``` + +### 3. Composed and Component Classes + +- **ComposedParameterSetBase:** Used for parameter sets that combine multiple components. +- **GenerationParameterComponentBase:** Base class for parameter components (e.g., Controlnet, Remix, HiresFix). + +#### Example: Image Generation Parameters + +```python +class ImageGenerationParametersTemplate(ComposedParameterSetBase): + batch_size: int | None = Field(default=None, ge=1) + tiling: bool | None = None + source_processing: KNOWN_IMAGE_SOURCE_PROCESSING | str | None = None + base_params: BasicImageGenerationParameters | None = None + additional_params: list[GenerationParameterComponentBase] | None = None + # ...other fields... +``` + +Concrete implementation: + +```python +class ImageGenerationParameters(ImageGenerationParametersTemplate): + result_ids: list[ID_TYPES] + base_params: BasicImageGenerationParameters + batch_size: int | None = Field(default=1, ge=1) + # ...validators to enforce required fields... +``` + +## Feature Flags + +Feature flags are used to describe the capabilities or requirements of a worker or generation. These are modeled as Pydantic classes, such as: + +- `GenerationFeatureFlags` (base) +- `ImageGenerationFeatureFlags` (image-specific) +- `ControlnetFeatureFlags` (controlnet-specific) + +These classes help communicate what features are available or required for a given generation. diff --git a/docs/worker/generation_states_flow.md b/docs/worker/generation_states_flow.md new file mode 100644 index 00000000..b94a68a9 --- /dev/null +++ b/docs/worker/generation_states_flow.md @@ -0,0 +1,194 @@ +# Generation States Flow + +## Typical States Flow + +This is visual depiction of the `base_generate_progress_transitions` map found in `horde_sdk/ai_horde_worker/consts.py`. + +You should also see the [worker loop](../haidra-assets/docs/worker_loop.md) and [job lifecycle explanation](../haidra-assets/docs/workers.md) for additional details. + +```mermaid +--- +title: Worker States Flow (without error states) +--- +flowchart TD + + NOT_STARTED@{shape: subproc} + PRELOADING@{shape: subproc} + PRELOADING_COMPLETE@{shape: subproc} + PENDING_POST_PROCESSING@{ shape: subproc} + POST_PROCESSING@{ shape: subproc} + GENERATING@{ shape: subproc} + PENDING_SAFETY_CHECK@{ shape: subproc} + SAFETY_CHECKING@{ shape: subproc} + PENDING_SUBMIT@{ shape: subproc} + SUBMITTING@{ shape: subproc} + SUBMIT_COMPLETE@{ shape: subproc} + COMPLETE@{ shape: subproc} + + NOT_STARTED --> D1 + D1@{label: "Is preloading required?" } + D2@{label: "Is generation required?" } + D3@{label: "Is post-processing required?" } + D1 -->|Yes| PRELOADING + D1 -->|No| D2 + D2 -->|Yes| GENERATING + D2 -->|No| D3 + D3 -->|Yes| PENDING_POST_PROCESSING + D3 -->|No| PENDING_SAFETY_CHECK + + + PRELOADING --> PRELOADING_COMPLETE + + PRELOADING_COMPLETE --> D2 + + GENERATING --> D3 + + PENDING_POST_PROCESSING--> POST_PROCESSING + + POST_PROCESSING --> D11@{label: "Is safety check required?" } + D11 -->|Yes| PENDING_SAFETY_CHECK + D11 -->|No| D12@{label: "Is submit required?" } + + PENDING_SAFETY_CHECK --> SAFETY_CHECKING + + SAFETY_CHECKING --> D12 + D12 -->|Yes| PENDING_SUBMIT + D12 -->|No| COMPLETE + + PENDING_SUBMIT --> SUBMITTING + + SUBMITTING --> SUBMIT_COMPLETE + + SUBMIT_COMPLETE --> COMPLETE + + linkStyle 1 stroke:#27ae60,stroke-width:3px,color:#27ae60 + linkStyle 2 stroke:#e74c3c,stroke-width:3px + linkStyle 3 stroke:#27ae60,stroke-width:3px,color:#27ae60 + linkStyle 4 stroke:#e74c3c,stroke-width:3px + linkStyle 5 stroke:#27ae60,stroke-width:3px,color:#27ae60 + linkStyle 6 stroke:#e74c3c,stroke-width:3px + linkStyle 12 stroke:#27ae60,stroke-width:3px,color:#27ae60 + linkStyle 13 stroke:#e74c3c,stroke-width:3px + linkStyle 16 stroke:#27ae60,stroke-width:3px,color:#27ae60 + linkStyle 17 stroke:#e74c3c,stroke-width:3px + +``` + +--- + +```mermaid +--- +title: Worker Error States Flow +--- + +flowchart TD + style ERROR stroke-dasharray: 5 5 + style ABORTED stroke-dasharray: 5 5 + style USER_REQUESTED_ABORT stroke-dasharray: 5 5 + + ERROR["`Error + (_valid from all states_)`"] --> ABORTED + ERROR --> ABANDONED + + ABORTED["`Aborted + (_valid from all states_)`"] --> REPORTED_FAILED + ABORTED --> ERROR + + USER_REQUESTED_ABORT["`User Requested Abort + (_valid from all states_)`"] --> USER_ABORT_COMPLETE + + REPORTED_FAILED + + USER_ABORT_COMPLETE + + ABANDONED + +``` + +--- + +`ERROR`, `ABORTED` and `USER_REQUESTED_ABORT` states are always valid to transition to. If transitioning to `ERROR`, it is **only** permissible to transition to the state from which the error occurred, or to `ABORTED`. If transitioning to `ABORTED`, it is only permissible to transition to `REPORTED_FAILED` or `USER_REQUESTED_ABORT`. + +Consider the following good and bad examples of error transitions: + +Good: + +- `NOT_STARTED` -> `PRELOADING` -> `ERROR` -> `PRELOADING` -> `PRELOADING_COMPLETE` -> ... + - In this case, the error occurred during preloading, and the worker was able to recover and continue. +- `NOT_STARTED` -> `PRELOADING` -> `ERROR` -> `PRELOADING` -> `ERROR` -> `ABORTED` -> `REPORTED_FAILED` + - Here, the worker encountered an error during preloading, attempted to recover, but failed again and then aborted the job. Note that you can set the intended number of retries in worker job configuration. See the `HordeWorkerJobConfig` class and the `state_error_limits` arg in a generation class constructor for more details. +- `NOT_STARTED` -> `PRELOADING` -> `USER_REQUESTED_ABORT` -> `USER_ABORT_COMPLETE` + - In this case, the user who created the job requested an abort, and the worker was able to complete the abort process successfully. + +Bad: + +- `NOT_STARTED` -> `PRELOADING` -> `ERROR` -> `GENERATING` + - If an error occurs, you have to explicitly handle it and you must transition *back* to the state from which the error occurred, or to `ABORTED`. In this case, the worker is trying to continue generating after an error occurred during preloading, which is not allowed. The correct transition would be to go back to `PRELOADING` or to `ABORTED`. +- `NOT_STARTED` -> `PRELOADING` -> `ERROR` -> `ERROR` + - This is not allowed, as you cannot transition to `ERROR` from `ERROR`. You must handle the error and transition to a valid state, such as `ABORTED` or back to the state from which the error occurred. If this situation occurs to you frequently, you will need to review your flow and control to ensure that errors and exceptions are handled properly. Consider checking the current state before transitioning to `ERROR` and if it is already `ERROR` consider logging the error and aborting the job instead. + +## Black Box States Flow + +Depending on the worker backend, it may not always be possible to track all of the states. For example, it may be that the backend silently handles `PRELOADING` without a callback or hook to detect that it has started or completed. Further, some backends may ever only support a single model, so `PRELOADING` may not be applicable at all. In these cases, it is appropriate to use `black_box_mode` for these `HordeSingleGeneration` class instances. + +In this case, the flow is simplified to the following (where safety checks, even if required, are also an optional state) + +--- + +```mermaid +--- +title: Black Box States (not including error states) +--- +flowchart TD + NOT_STARTED --> GENERATING + + GENERATING --> PENDING_SUBMIT + GENERATING --> PENDING_SAFETY_CHECK + GENERATING --> COMPLETE + + PENDING_SAFETY_CHECK --> SAFETY_CHECKING + + SAFETY_CHECKING --> PENDING_SUBMIT + SAFETY_CHECKING --> COMPLETE + + PENDING_SUBMIT --> SUBMITTING + + SUBMITTING --> SUBMIT_COMPLETE + SUBMITTING --> COMPLETE + + SUBMIT_COMPLETE --> COMPLETE +``` + +--- + +```mermaid +--- +title: Black Box Error States +--- + +flowchart TD + style ERROR stroke-dasharray: 5 5 + style ABORTED stroke-dasharray: 5 5 + style USER_REQUESTED_ABORT stroke-dasharray: 5 5 + + ERROR["`Error + (_valid from all states_)`"] --> ABORTED + ERROR --> ABANDONED + + ABORTED --> REPORTED_FAILED + ABORTED --> ABANDONED + + USER_REQUESTED_ABORT["`User Requested Abort + (_valid from all states_)`"] --> USER_ABORT_COMPLETE + USER_REQUESTED_ABORT --> ABANDONED + + USER_ABORT_COMPLETE + + REPORTED_FAILED + + ABANDONED +``` + +--- + +Note that a generation may still require additional steps, such as post-processing or safety checking, but it is assumed that these steps are handled internally by the backend and do not require explicit state transitions in the worker. The worker will still report the final state as `COMPLETE` or `FAILED` based on the outcome of the generation. It is incumbent on the implementor to ensure that these steps have happened as intended. diff --git a/docs/worker/generations.md b/docs/worker/generations.md new file mode 100644 index 00000000..af88942f --- /dev/null +++ b/docs/worker/generations.md @@ -0,0 +1,526 @@ +# Generations + +In horde-sdk, **generations** are discrete units of work representing a single round of inference or post-processing, such as generating an image, producing text, or running an alchemy operation. Generations are not the same as jobs — a job may consist of multiple generations and contains additional metadata specific to the dispatch source. **Workers** are the entities that execute these generations, managing their lifecycle and state transitions. + +See also the [relevant definitions](../definitions.md#generation). + +## The `HordeSingleGeneration` Class: Core Abstraction + +At the heart of the generation workflow is the `HordeSingleGeneration` class. This is an abstract base class that provides a **state machine** for managing the lifecycle of a generation. All specific generation types (e.g., image, text) inherit from this class, ensuring a consistent interface and behavior across the SDK. + +Note that `HordeSingleGeneration` and its subclasses are *representations of state* and do not perform any actual inference or post-processing. They are designed to be used in conjunction with or by worker code that handles the execution of the generation logic, typically by using backend software (such as ComfyUI or KoboldCPP) to perform the actual work. + +### Purpose + +- **Lifecycle Management:** Tracks the progress of a generation from start to finish, enforcing valid state transitions (e.g., from "not started" to "generating", "safety checking", "complete", etc.). +- **Error Handling:** Provides mechanisms to handle errors, aborts, and recovery, with configurable limits on error states. +- **Extensibility:** Designed to be subclassed for specific generation types (e.g., `ImageSingleGeneration`, `TextSingleGeneration`), allowing custom logic while retaining core state management. +- **Standardized Interface:** Ensures that all generations follow a common pattern, making it easier to implement and maintain different types of generations for consistent behavior and improved observability. + +### Key Elements + +- **State Machine:** The class enforces a strict sequence of method calls, representing each step in the generation process. States are defined by the `GENERATION_PROGRESS` enum, and transitions are validated to prevent invalid workflows. +- **Thread Safety:** Internal state changes are protected by locks, making the class safe for use in concurrent environments. +- **Callbacks:** You can register callbacks to be notified when the generation enters specific states. +- **Batch and Result Tracking:** Each generation tracks its unique ID, batch size, and result IDs, making it easy to manage multiple outputs. +- **Logging:** Extra logging can be enabled for debugging and tracing state transitions. + +### How to Use + +1. **Subclass or Use a Concrete Implementation:** You typically use a subclass like `ImageSingleGeneration` or `TextSingleGeneration`, which inherit from `HordeSingleGeneration` and add type-specific logic. +2. **Initialize with Parameters:** Pass in generation parameters, result type, and optional safety rules. The class will validate your setup and prepare the state machine. +3. **Follow the State Sequence:** Call the appropriate methods in order (e.g., `on_preloading()`, `on_generating()`, `set_work_result()`, `on_safety_checking()`, etc.). The class will enforce correct transitions and raise errors if steps are skipped or repeated incorrectly. +4. **Handle Results and Errors:** Inspect results, safety checks, and use error handling methods (`on_error()`, `on_abort()`) as needed. + +## Generation States and State Transition Methods + +The lifecycle of a generation is managed by a **state machine**. Each state represents a step in the generation process, and transitions between states are strictly controlled to ensure correct workflow execution. + +### Possible States + +The main states (defined by the `GENERATION_PROGRESS` enum) include: + +- **NOT_STARTED**: Initial state before any work begins. +- **PRELOADING**: Resources are being loaded/prepared. For inference, this would be the model loading phase. +- **PRELOADING_COMPLETE**: Preloading has finished, and the generation is ready to start. +- **GENERATING**: The actual generation (e.g., inference) is in progress. +- **PENDING_POST_PROCESSING**: Waiting to start post-processing. (if applicable) +- **POST_PROCESSING**: Post-processing is underway. (if applicable) +- **PENDING_SAFETY_CHECK**: Awaiting safety check. (if applicable) +- **SAFETY_CHECKING**: Safety check is in progress. (if applicable) +- **PENDING_SUBMIT**: Ready to submit results. (if applicable) +- **SUBMITTING**: Submission is in progress. (if applicable) +- **SUBMIT_COMPLETE**: Submission finished. (if applicable) +- **COMPLETE**: Generation is fully complete. +- **ERROR**: An error has occurred. +- **ABORTED**: The generation was aborted. +- **USER_REQUESTED_ABORT**: User requested an abort. +- **USER_ABORT_COMPLETE**: User abort process is complete. +- **ABANDONED**: The generation was abandoned. + +### State Transition Methods + +Each state has a corresponding method in `HordeSingleGeneration` to transition to it. These methods enforce the correct order and validate transitions: + +- `on_preloading()` → `PRELOADING` +- `on_preloading_complete()` → `PRELOADING_COMPLETE` +- `on_generating()` → `GENERATING` +- `on_generation_work_complete()` → `PENDING_POST_PROCESSING` or next logical state +- `on_post_processing()` → `POST_PROCESSING` +- `on_post_processing_complete()` → next logical state +- `on_pending_safety_check()` → `PENDING_SAFETY_CHECK` +- `on_safety_checking()` → `SAFETY_CHECKING` +- `on_safety_check_complete(batch_index, safety_result)` → advances after safety check +- `on_pending_submit()` → `PENDING_SUBMIT` +- `on_submitting()` → `SUBMITTING` +- `on_submit_complete()` → `SUBMIT_COMPLETE` +- `on_complete()` → `COMPLETE` +- `on_error(failed_message, failure_exception)` → `ERROR` +- `on_abort(failed_message, failure_exception)` → `ABORTED` +- `on_user_requested_abort()` → `USER_REQUESTED_ABORT` +- `on_user_abort_complete()` → `USER_ABORT_COMPLETE` + +These functions return the transitioned to state. + +#### Important Notes on State Transitions + +- **Strict Order:** You must call state transition methods in the correct sequence. For example, you cannot call `on_generating()` before `on_preloading_complete()`. This strictness helps catch workflow errors early. Out-of-order calls almost always indicate a problem in your logic or resource handling. +- **Error Handling:** If an error occurs at any step, use `on_error(failed_message="...")` to move the generation into the `ERROR` state. This lets you handle errors gracefully and, if needed, recover. To continue after an error, you must return to the most recent valid state before proceeding. + - For example, if you are in the `GENERATING` state and an error occurs, call `on_error()` to enter `ERROR`, then call `on_generating()` again to retry. +- **Dynamic States:** Some transition methods, such as `on_generation_work_complete()`, may lead to different next states depending on context (e.g., whether post-processing is required). The class handles these decisions internally, so you don’t need to manually select the next state. +- **Manual States:** You can use the generic `step(state)` or `on_state(state)` methods to transition to a specific state. However, you are responsible for ensuring the transition is valid—these methods bypass some of the built-in checks. + - **Black-box Mode:** If enabled, most validation checks are skipped. This is useful for testing or when the backend provides limited observability (for example, if inference and post-processing without clear signals of the transition). In black-box mode, you can jump directly between states (e.g., from `GENERATING` to `SAFETY_CHECKING`) without following the usual sequence, but this is not recommended unless you fully understand the implications. + +### Example: Typical State Sequence + +A typical workflow might look like: + +```python +generation.on_preloading() +generation.on_preloading_complete() +generation.on_generating() +generation.set_work_result(result) +generation.on_generation_work_complete() +generation.on_safety_checking() +generation.on_safety_check_complete(batch_index=0, safety_result=safety_result) +generation.on_submitting() +generation.on_submit_complete() +``` + +If an error occurs at any step, use: + +```python +generation.on_error(failed_message="Something went wrong") +``` + +## Example Using `ImageSingleGeneration` for Image Generation Workflows + +The `ImageSingleGeneration` class in horde_sdk models the lifecycle of an image generation job. It enforces a strict state machine, so you must call its methods in the correct order for a successful workflow. + +### 1. Prepare Generation Parameters + +Start by constructing the necessary generation parameters, including the prompt and any upscaling/post-processing options. + +````python +from uuid import uuid4 +from horde_sdk.generation_parameters.image import BasicImageGenerationParameters, ImageGenerationParameters +from horde_sdk.generation_parameters.alchemy import AlchemyParameters, UpscaleAlchemyParameters +from horde_sdk.generation_parameters.alchemy.consts import KNOWN_UPSCALERS + +prompt = "A beautiful landscape with mountains and a river" +result_id = str(uuid4()) + +generation_params = ImageGenerationParameters( + result_ids=[result_id], + batch_size=1, + base_params=BasicImageGenerationParameters(prompt=prompt), + alchemy_params=AlchemyParameters( + upscalers=[ + UpscaleAlchemyParameters( + result_id=result_id, + source_image=b"dummy_image_bytes", + upscaler=KNOWN_UPSCALERS.RealESRGAN_x4plus, + ) + ], + ), +) +```` + +### 2. Create the Generation Object + +Instantiate `ImageSingleGeneration` with your parameters and safety rules. + +````python +from horde_sdk.safety import SafetyRules +from horde_sdk.worker.generations import ImageSingleGeneration + +generation = ImageSingleGeneration( + generation_parameters=generation_params, + generation_id=str(uuid4()), + safety_rules=SafetyRules(should_censor_nsfw=True), +) +```` + +### 3. State Transition Sequence + +The generation object expects you to follow a specific sequence of method calls. Here’s the typical order: + +#### a. Preloading (Optional) + +If your workflow requires resource preloading, signal this: + +````python +generation.on_preloading() +# ...load resources... +generation.on_preloading_complete() +```` + +#### b. Start Generation + +Signal the start of generation, set the result, and mark work complete: + +````python +generation.on_generating() +# ...call backend/model... +generation.set_work_result(result=b"simulated_image_bytes") +generation.on_generation_work_complete() +```` + +#### c. Safety Checking + +Transition to safety checking and record the result: + +````python +from horde_sdk.safety import SafetyResult +import random + +generation.on_safety_checking() +safety_result = SafetyResult(is_csam=False, is_nsfw=random.choice([True, False])) +generation.on_safety_check_complete(batch_index=0, safety_result=safety_result) +```` + +### d. Submitting Results + +Finalize the workflow by submitting the result: + +````python +generation.on_submitting() +# ...submit to API/storage... +generation.on_submit_complete() +```` + +### 4. Handling Results + +After submission, you can inspect the safety results and handle accordingly: + +````python +if safety_result.is_nsfw: + print("Image flagged as NSFW.") +else: + print("Image passed safety check.") +```` + +### 5. Error Handling + +If a step fails, use `generation.on_error(failed_message="...")` to signal an error. The state machine will allow a limited number of recoveries before aborting. + +### 6. Full Example + +Here’s a minimal, complete workflow: + +````python +""" +Minimal example: Using horde_sdk to generate an image and perform a safety check. +Demonstrates the required state transitions for a valid workflow. +""" + +import time +import random +from uuid import uuid4 + +from horde_sdk.generation_parameters.image import BasicImageGenerationParameters, ImageGenerationParameters +from horde_sdk.generation_parameters.alchemy import AlchemyParameters, UpscaleAlchemyParameters +from horde_sdk.generation_parameters.alchemy.consts import KNOWN_UPSCALERS +from horde_sdk.safety import SafetyResult, SafetyRules +from horde_sdk.worker.generations import ImageSingleGeneration + + +def main() -> None: + # Step 1: Prepare generation parameters + prompt = "A beautiful landscape with mountains and a river" + result_id = str(uuid4()) + generation_params = ImageGenerationParameters( + result_ids=[result_id], + batch_size=1, + base_params=BasicImageGenerationParameters(prompt=prompt), + alchemy_params=AlchemyParameters( + upscalers=[ + UpscaleAlchemyParameters( + result_id=result_id, + source_image=b"dummy_image_bytes", + upscaler=KNOWN_UPSCALERS.RealESRGAN_x4plus, + ) + ], + ), + ) + + # Step 2: Create a generation object + generation = ImageSingleGeneration( + generation_parameters=generation_params, + generation_id=str(uuid4()), + safety_rules=SafetyRules(should_censor_nsfw=True), + ) + + # Step 3: Preloading (optional, but shown in advanced example) + print("Preloading resources...") + generation.on_preloading() + time.sleep(0.5) + generation.on_preloading_complete() + + # Step 4: Start generation + print("Generating image...") + generation.on_generating() + time.sleep(1) + generation.on_generation_work_complete() + # Step 4.1: Start post-processing + generation.on_post_processing() + time.sleep(0.5) + generation.on_post_processing_complete() + generation.set_work_result(result=b"simulated_image_bytes") + + # Step 5: Safety check + print("Performing safety check...") + generation.on_safety_checking() + time.sleep(1) + safety_result = SafetyResult(is_csam=False, is_nsfw=random.choice([True, False])) + generation.on_safety_check_complete(batch_index=0, safety_result=safety_result) + + # Step 6: Submitting result + print("Submitting result...") + generation.on_submitting() + time.sleep(0.5) + generation.on_submit_complete() + + # Step 7: Handle result + if safety_result.is_nsfw: + print("Image flagged as NSFW.") + else: + print("Image passed safety check.") + + print("Done.") + + +if __name__ == "__main__": + main() + +```` + +--- + +### Key Points + +- **State transitions are required**; skipping steps will raise errors. +- **Batch generation**: For multiple images, repeat safety check and submission for each batch index. +- **Error recovery**: Use `on_error()` if a step fails, but note the allowed number of recoveries. + +For more advanced workflows (concurrency, post-processing, error handling), see the full example in `image_generation_advanced.py`. + +--- + +This guide should help you get started with the basic usage and required method calls for the generation class in horde_sdk. + + +## Visualizing Worker States Flow + +### Typical States Flow + +This is visual depiction of the `base_generate_progress_transitions` map found in `horde_sdk/ai_horde_worker/consts.py`. + +You should also see the [worker loop](../haidra-assets/docs/worker_loop.md) and [job lifecycle explanation](../haidra-assets/docs/workers.md) for additional details. + +```mermaid +--- +title: Worker States Flow (without error states) +--- +flowchart TD + + NOT_STARTED@{shape: subproc} + PRELOADING@{shape: subproc} + PRELOADING_COMPLETE@{shape: subproc} + PENDING_POST_PROCESSING@{ shape: subproc} + POST_PROCESSING@{ shape: subproc} + GENERATING@{ shape: subproc} + PENDING_SAFETY_CHECK@{ shape: subproc} + SAFETY_CHECKING@{ shape: subproc} + PENDING_SUBMIT@{ shape: subproc} + SUBMITTING@{ shape: subproc} + SUBMIT_COMPLETE@{ shape: subproc} + COMPLETE@{ shape: subproc} + + NOT_STARTED --> D1 + D1@{label: "Is preloading required?" } + D2@{label: "Is generation required?" } + D3@{label: "Is post-processing required?" } + D1 -->|Yes| PRELOADING + D1 -->|No| D2 + D2 -->|Yes| GENERATING + D2 -->|No| D3 + D3 -->|Yes| PENDING_POST_PROCESSING + D3 -->|No| PENDING_SAFETY_CHECK + + + PRELOADING --> PRELOADING_COMPLETE + + PRELOADING_COMPLETE --> D2 + + GENERATING --> D3 + + PENDING_POST_PROCESSING--> POST_PROCESSING + + POST_PROCESSING --> D11@{label: "Is safety check required?" } + D11 -->|Yes| PENDING_SAFETY_CHECK + D11 -->|No| D12@{label: "Is submit required?" } + + PENDING_SAFETY_CHECK --> SAFETY_CHECKING + + SAFETY_CHECKING --> D12 + D12 -->|Yes| PENDING_SUBMIT + D12 -->|No| COMPLETE + + PENDING_SUBMIT --> SUBMITTING + + SUBMITTING --> SUBMIT_COMPLETE + + SUBMIT_COMPLETE --> COMPLETE + + linkStyle 1 stroke:#27ae60,stroke-width:3px,color:#27ae60 + linkStyle 2 stroke:#e74c3c,stroke-width:3px + linkStyle 3 stroke:#27ae60,stroke-width:3px,color:#27ae60 + linkStyle 4 stroke:#e74c3c,stroke-width:3px + linkStyle 5 stroke:#27ae60,stroke-width:3px,color:#27ae60 + linkStyle 6 stroke:#e74c3c,stroke-width:3px + linkStyle 12 stroke:#27ae60,stroke-width:3px,color:#27ae60 + linkStyle 13 stroke:#e74c3c,stroke-width:3px + linkStyle 16 stroke:#27ae60,stroke-width:3px,color:#27ae60 + linkStyle 17 stroke:#e74c3c,stroke-width:3px + +``` + +--- + +```mermaid +--- +title: Worker Error States Flow +--- + +flowchart TD + style ERROR stroke-dasharray: 5 5 + style ABORTED stroke-dasharray: 5 5 + style USER_REQUESTED_ABORT stroke-dasharray: 5 5 + + ERROR["`Error + (_valid from all states_)`"] --> ABORTED + ERROR --> ABANDONED + + ABORTED["`Aborted + (_valid from all states_)`"] --> REPORTED_FAILED + ABORTED --> ERROR + + USER_REQUESTED_ABORT["`User Requested Abort + (_valid from all states_)`"] --> USER_ABORT_COMPLETE + + REPORTED_FAILED + + USER_ABORT_COMPLETE + + ABANDONED + +``` + +--- + +`ERROR`, `ABORTED` and `USER_REQUESTED_ABORT` states are always valid to transition to. If transitioning to `ERROR`, it is **only** permissible to transition to the state from which the error occurred, or to `ABORTED`. If transitioning to `ABORTED`, it is only permissible to transition to `REPORTED_FAILED` or `USER_REQUESTED_ABORT`. + +Consider the following good and bad examples of error transitions: + +Good: + +- `NOT_STARTED` -> `PRELOADING` -> `ERROR` -> `PRELOADING` -> `PRELOADING_COMPLETE` -> ... + - In this case, the error occurred during preloading, and the worker was able to recover and continue. +- `NOT_STARTED` -> `PRELOADING` -> `ERROR` -> `PRELOADING` -> `ERROR` -> `ABORTED` -> `REPORTED_FAILED` + - Here, the worker encountered an error during preloading, attempted to recover, but failed again and then aborted the job. Note that you can set the intended number of retries in worker job configuration. See the `HordeWorkerJobConfig` class and the `state_error_limits` arg in a generation class constructor for more details. +- `NOT_STARTED` -> `PRELOADING` -> `USER_REQUESTED_ABORT` -> `USER_ABORT_COMPLETE` + - In this case, the user who created the job requested an abort, and the worker was able to complete the abort process successfully. + +Bad: + +- `NOT_STARTED` -> `PRELOADING` -> `ERROR` -> `GENERATING` + - If an error occurs, you have to explicitly handle it and you must transition *back* to the state from which the error occurred, or to `ABORTED`. In this case, the worker is trying to continue generating after an error occurred during preloading, which is not allowed. The correct transition would be to go back to `PRELOADING` or to `ABORTED`. +- `NOT_STARTED` -> `PRELOADING` -> `ERROR` -> `ERROR` + - This is not allowed, as you cannot transition to `ERROR` from `ERROR`. You must handle the error and transition to a valid state, such as `ABORTED` or back to the state from which the error occurred. If this situation occurs to you frequently, you will need to review your flow and control to ensure that errors and exceptions are handled properly. Consider checking the current state before transitioning to `ERROR` and if it is already `ERROR` consider logging the error and aborting the job instead. + +### Black Box States Flow + +Depending on the worker backend, it may not always be possible to track all of the states. For example, it may be that the backend silently handles `PRELOADING` without a callback or hook to detect that it has started or completed. Further, some backends may ever only support a single model, so `PRELOADING` may not be applicable at all. In these cases, it is appropriate to use `black_box_mode` for these `HordeSingleGeneration` class instances. + +In this case, the flow is simplified to the following (where safety checks, even if required, are also an optional state) + +--- + +```mermaid +--- +title: Black Box States (not including error states) +--- +flowchart TD + NOT_STARTED --> GENERATING + + GENERATING --> PENDING_SUBMIT + GENERATING --> PENDING_SAFETY_CHECK + GENERATING --> COMPLETE + + PENDING_SAFETY_CHECK --> SAFETY_CHECKING + + SAFETY_CHECKING --> PENDING_SUBMIT + SAFETY_CHECKING --> COMPLETE + + PENDING_SUBMIT --> SUBMITTING + + SUBMITTING --> SUBMIT_COMPLETE + SUBMITTING --> COMPLETE + + SUBMIT_COMPLETE --> COMPLETE +``` + +--- + +```mermaid +--- +title: Black Box Error States +--- + +flowchart TD + style ERROR stroke-dasharray: 5 5 + style ABORTED stroke-dasharray: 5 5 + style USER_REQUESTED_ABORT stroke-dasharray: 5 5 + + ERROR["`Error + (_valid from all states_)`"] --> ABORTED + ERROR --> ABANDONED + + ABORTED --> REPORTED_FAILED + ABORTED --> ABANDONED + + USER_REQUESTED_ABORT["`User Requested Abort + (_valid from all states_)`"] --> USER_ABORT_COMPLETE + USER_REQUESTED_ABORT --> ABANDONED + + USER_ABORT_COMPLETE + + REPORTED_FAILED + + ABANDONED +``` + +--- + +Note that a generation may still require additional steps, such as post-processing or safety checking, but it is assumed that these steps are handled internally by the backend and do not require explicit state transitions in the worker. The worker will still report the final state as `COMPLETE` or `FAILED` based on the outcome of the generation. It is incumbent on the implementor to ensure that these steps have happened as intended. diff --git a/examples/ai_horde_client/find_user.py b/examples/ai_horde_client/find_user.py index 84cc011f..7046f393 100644 --- a/examples/ai_horde_client/find_user.py +++ b/examples/ai_horde_client/find_user.py @@ -3,7 +3,7 @@ from horde_sdk import RequestErrorResponse from horde_sdk.ai_horde_api.ai_horde_clients import AIHordeAPIManualClient from horde_sdk.ai_horde_api.apimodels import FindUserRequest -from horde_sdk.ai_horde_api.apimodels._users import UserDetailsResponse +from horde_sdk.ai_horde_api.apimodels.users import UserDetailsResponse def find_user_example( diff --git a/examples/ai_horde_client/image/alchemy_example.py b/examples/ai_horde_client/image/alchemy_example.py index b74a86b4..5bd71dde 100644 --- a/examples/ai_horde_client/image/alchemy_example.py +++ b/examples/ai_horde_client/image/alchemy_example.py @@ -10,11 +10,11 @@ from horde_sdk import ANON_API_KEY, RequestErrorResponse from horde_sdk.ai_horde_api.ai_horde_clients import AIHordeAPIAsyncClientSession, AIHordeAPIAsyncSimpleClient from horde_sdk.ai_horde_api.apimodels import ( - KNOWN_ALCHEMY_TYPES, AlchemyAsyncRequest, AlchemyAsyncRequestFormItem, AlchemyStatusResponse, ) +from horde_sdk.generation_parameters.alchemy.consts import KNOWN_ALCHEMY_TYPES async def async_alchemy_example( @@ -35,7 +35,7 @@ async def async_alchemy_example( source_image_base64: str = base64.b64encode(input_image_path.read_bytes()).decode() status_response: AlchemyStatusResponse | RequestErrorResponse - status_response, job_id = await simple_client.alchemy_request( + status_response, gen_id = await simple_client.alchemy_request( AlchemyAsyncRequest( apikey=apikey, forms=[ @@ -57,9 +57,9 @@ async def async_alchemy_example( example_path = Path("requested_images") example_path.mkdir(exist_ok=True, parents=True) - upscale_result_image.save(example_path / f"{job_id}_{upscale_result.upscaler_used}.webp") + upscale_result_image.save(example_path / f"{gen_id}_{upscale_result.upscaler_used}.webp") - logger.info(f"Upscaled image saved to {example_path / f'{job_id}_{upscale_result.upscaler_used}.webp'}") + logger.info(f"Upscaled image saved to {example_path / f'{gen_id}_{upscale_result.upscaler_used}.webp'}") if __name__ == "__main__": diff --git a/examples/ai_horde_client/image/async_manual_client_example.py b/examples/ai_horde_client/image/async_manual_client_example.py index 237771c0..d2490444 100644 --- a/examples/ai_horde_client/image/async_manual_client_example.py +++ b/examples/ai_horde_client/image/async_manual_client_example.py @@ -50,7 +50,7 @@ async def main(apikey: str = ANON_API_KEY) -> None: check_counter += 1 check_response = await manual_client.get_generate_check( - job_id=response.id_, + gen_id=response.id_, ) if isinstance(check_response, RequestErrorResponse): @@ -113,6 +113,7 @@ async def main(apikey: str = ANON_API_KEY) -> None: parser.add_argument( "--apikey", "--api-key", + "--api_key", "-k", type=str, default=ANON_API_KEY, diff --git a/examples/ai_horde_client/image/async_simple_client_example.py b/examples/ai_horde_client/image/async_simple_client_example.py index 3cc973b8..ed6d09af 100644 --- a/examples/ai_horde_client/image/async_simple_client_example.py +++ b/examples/ai_horde_client/image/async_simple_client_example.py @@ -17,7 +17,7 @@ ImageGenerationInputPayload, TIPayloadEntry, ) -from horde_sdk.ai_horde_api.fields import JobID +from horde_sdk.ai_horde_api.fields import GenerationID def save_image_and_json( @@ -40,9 +40,9 @@ async def async_one_image_generate_example( apikey: str = ANON_API_KEY, ) -> None: single_generation_response: ImageGenerateStatusResponse - job_id: JobID + gen_id: GenerationID - single_generation_response, job_id = await simple_client.image_generate_request( + single_generation_response, gen_id = await simple_client.image_generate_request( ImageGenerateAsyncRequest( apikey=apikey, prompt="A cat in a hat", @@ -68,15 +68,15 @@ async def async_one_image_generate_example( example_path = Path("requested_images") example_path.mkdir(exist_ok=True, parents=True) - download_image_tasks: list[asyncio.Task[tuple[Image, JobID]]] = [] + download_image_tasks: list[asyncio.Task[tuple[Image, GenerationID]]] = [] for generation in single_generation_response.generations: download_image_tasks.append(asyncio.create_task(simple_client.download_image_from_generation(generation))) - downloaded_images: list[tuple[Image, JobID]] = await asyncio.gather(*download_image_tasks) + downloaded_images: list[tuple[Image, GenerationID]] = await asyncio.gather(*download_image_tasks) - for image, job_id in downloaded_images: - filename_base = f"{job_id}_simple_async_example" + for image, gen_id in downloaded_images: + filename_base = f"{gen_id}_simple_async_example" save_image_and_json(image, generation, example_path, filename_base) @@ -85,8 +85,8 @@ async def async_multi_image_generate_example( apikey: str = ANON_API_KEY, ) -> None: multi_generation_responses: tuple[ - tuple[ImageGenerateStatusResponse, JobID], - tuple[ImageGenerateStatusResponse, JobID], + tuple[ImageGenerateStatusResponse, GenerationID], + tuple[ImageGenerateStatusResponse, GenerationID], ] multi_generation_responses = await asyncio.gather( simple_client.image_generate_request( @@ -115,9 +115,9 @@ async def async_multi_image_generate_example( ), ) - download_image_tasks: list[asyncio.Task[tuple[Image, JobID]]] = [] + download_image_tasks: list[asyncio.Task[tuple[Image, GenerationID]]] = [] - for generation_response, _job_id in multi_generation_responses: + for generation_response, _gen_id in multi_generation_responses: if isinstance(generation_response, RequestErrorResponse): logger.error(f"Error: {generation_response.message}") else: @@ -129,10 +129,10 @@ async def async_multi_image_generate_example( asyncio.create_task(simple_client.download_image_from_generation(generation)), ) - downloaded_images: list[tuple[Image, JobID]] = await asyncio.gather(*download_image_tasks) + downloaded_images: list[tuple[Image, GenerationID]] = await asyncio.gather(*download_image_tasks) - for image, job_id in downloaded_images: - filename_base = f"{job_id}_simple_async_example" + for image, gen_id in downloaded_images: + filename_base = f"{gen_id}_simple_async_example" save_image_and_json(image, generation, example_path, filename_base) @@ -155,6 +155,7 @@ async def async_simple_generate_example(apikey: str = ANON_API_KEY) -> None: parser.add_argument( "--apikey", "--api-key", + "--api_key", "-k", type=str, default=ANON_API_KEY, diff --git a/examples/ai_horde_client/image/dry_run_example.py b/examples/ai_horde_client/image/dry_run_example.py index e9b0bad0..afa4cb20 100644 --- a/examples/ai_horde_client/image/dry_run_example.py +++ b/examples/ai_horde_client/image/dry_run_example.py @@ -1,7 +1,7 @@ from horde_sdk import ANON_API_KEY from horde_sdk.ai_horde_api.ai_horde_clients import AIHordeAPISimpleClient from horde_sdk.ai_horde_api.apimodels import ImageGenerateAsyncRequest, ImageGenerationInputPayload -from horde_sdk.ai_horde_api.consts import KNOWN_SAMPLERS +from horde_sdk.generation_parameters.image.consts import KNOWN_IMAGE_SAMPLERS def simple_generate_example() -> None: @@ -12,7 +12,7 @@ def simple_generate_example() -> None: apikey=ANON_API_KEY, prompt="A cat in a hat", params=ImageGenerationInputPayload( - sampler_name=KNOWN_SAMPLERS.k_heun, + sampler_name=KNOWN_IMAGE_SAMPLERS.k_heun, denoising_strength=0.4, seed="42", height=832, diff --git a/examples/ai_horde_client/image/manual_client_example.py b/examples/ai_horde_client/image/manual_client_example.py index 2367a07b..4ac8be29 100644 --- a/examples/ai_horde_client/image/manual_client_example.py +++ b/examples/ai_horde_client/image/manual_client_example.py @@ -59,7 +59,7 @@ def manual_image_generation(apikey: str = ANON_API_KEY) -> None: check_counter += 1 check_response: ImageGenerateCheckResponse | RequestErrorResponse check_response = manual_client.get_generate_check( - job_id=response.id_, + gen_id=response.id_, ) # The above is short hand for: diff --git a/examples/ai_horde_client/image/simple_client_example.py b/examples/ai_horde_client/image/simple_client_example.py index f9526151..a72a24b2 100644 --- a/examples/ai_horde_client/image/simple_client_example.py +++ b/examples/ai_horde_client/image/simple_client_example.py @@ -5,8 +5,8 @@ from PIL.Image import Image from horde_sdk import ANON_API_KEY -from horde_sdk.ai_horde_api import KNOWN_SAMPLERS from horde_sdk.ai_horde_api.ai_horde_clients import AIHordeAPISimpleClient +from horde_sdk.generation_parameters.image.consts import KNOWN_IMAGE_SAMPLERS # isort: off from horde_sdk.ai_horde_api.apimodels import ( @@ -20,14 +20,29 @@ # isort: on +def save_image_and_json( + image: Image, + generation: ImageGeneration, + example_path: Path, + filename_base: str, +) -> None: + image.save(example_path / f"{filename_base}.webp") + logger.info(f"Image saved to {example_path / f'{filename_base}.webp'}") + + with open(example_path / f"{filename_base}.json", "w") as f: + f.write(generation.model_dump_json(indent=4)) + + logger.info(f"Response JSON saved to {example_path / f'{filename_base}.json'}") + + def simple_generate_example(api_key: str = ANON_API_KEY) -> None: simple_client = AIHordeAPISimpleClient() - status_response, job_id = simple_client.image_generate_request( + status_response, gen_id = simple_client.image_generate_request( ImageGenerateAsyncRequest( apikey=api_key, params=ImageGenerationInputPayload( - sampler_name=KNOWN_SAMPLERS.k_euler, + sampler_name=KNOWN_IMAGE_SAMPLERS.k_euler, cfg_scale=4, width=512, height=512, @@ -67,21 +82,7 @@ def simple_generate_example(api_key: str = ANON_API_KEY) -> None: f"{status_response.kudos} kudos were spent on this request for {len(status_response.generations)} images.", ) - def save_image_and_json( - image: Image, - generation: ImageGeneration, - example_path: Path, - filename_base: str, - ) -> None: - image.save(example_path / f"{filename_base}.webp") - logger.info(f"Image saved to {example_path / f'{filename_base}.webp'}") - - with open(example_path / f"{filename_base}.json", "w") as f: - f.write(generation.model_dump_json(indent=4)) - - logger.info(f"Response JSON saved to {example_path / f'{filename_base}.json'}") - - filename_base = f"{job_id}_simple_sync_example" + filename_base = f"{gen_id}_simple_sync_example" for generation in status_response.generations: logger.info("Image generation:") diff --git a/examples/ai_horde_client/text/async_text_generate.py b/examples/ai_horde_client/text/async_text_generate.py index 16e0ab31..622f2d8c 100644 --- a/examples/ai_horde_client/text/async_text_generate.py +++ b/examples/ai_horde_client/text/async_text_generate.py @@ -15,7 +15,7 @@ TextGenerateStatusResponse, ) from horde_sdk.ai_horde_api.exceptions import AIHordeRequestError -from horde_sdk.ai_horde_api.fields import JobID +from horde_sdk.ai_horde_api.fields import GenerationID def check_callback(response: TextGenerateStatusResponse) -> None: @@ -29,10 +29,10 @@ async def async_text_generate_example( apikey: str = ANON_API_KEY, ) -> None: status_response: TextGenerateStatusResponse - job_id: JobID + gen_id: GenerationID try: - status_response, job_id = await simple_client.text_generate_request( + status_response, gen_id = await simple_client.text_generate_request( TextGenerateAsyncRequest( apikey=apikey, prompt="Hello, world!", @@ -77,7 +77,7 @@ async def async_text_generate_example( if len(status_response.generations) == 0: raise ValueError("No generations returned in the response.") - logger.debug(f"Job ID: {job_id}") + logger.debug(f"Job ID: {gen_id}") logger.debug(f"Response: {status_response}") text_generated = status_response.generations[0].text @@ -87,10 +87,10 @@ async def async_text_generate_example( example_path = Path("requested_text") example_path.mkdir(exist_ok=True, parents=True) - async with aiofiles.open(example_path / f"{job_id}_async_example.txt", "w") as f: + async with aiofiles.open(example_path / f"{gen_id}_async_example.txt", "w") as f: await f.write(status_response.model_dump_json(indent=4)) - logger.info(f"Wrote full response JSON to {example_path / f'{job_id}_async_example.txt'}") + logger.info(f"Wrote full response JSON to {example_path / f'{gen_id}_async_example.txt'}") async def main(apikey: str) -> None: diff --git a/examples/ai_horde_client/text/text_generate.py b/examples/ai_horde_client/text/text_generate.py index 13919608..9c78eedf 100644 --- a/examples/ai_horde_client/text/text_generate.py +++ b/examples/ai_horde_client/text/text_generate.py @@ -4,7 +4,7 @@ from loguru import logger from horde_sdk import ANON_API_KEY -from horde_sdk.ai_horde_api import JobID +from horde_sdk.ai_horde_api import GenerationID from horde_sdk.ai_horde_api.ai_horde_clients import AIHordeAPISimpleClient from horde_sdk.ai_horde_api.apimodels import ( ModelGenerationInputKobold, @@ -23,9 +23,9 @@ def simple_generate_example(api_key: str = ANON_API_KEY) -> None: simple_client = AIHordeAPISimpleClient() status_response: TextGenerateStatusResponse - job_id: JobID + gen_id: GenerationID - status_response, job_id = simple_client.text_generate_request( + status_response, gen_id = simple_client.text_generate_request( TextGenerateAsyncRequest( apikey=api_key, prompt="Hello, world!", @@ -66,7 +66,7 @@ def simple_generate_example(api_key: str = ANON_API_KEY) -> None: if len(status_response.generations) == 0: raise ValueError("No generations returned in the response.") - logger.debug(f"Job ID: {job_id}") + logger.debug(f"Job ID: {gen_id}") logger.debug(f"Response: {status_response}") text_generated = status_response.generations[0].text @@ -76,7 +76,7 @@ def simple_generate_example(api_key: str = ANON_API_KEY) -> None: example_path = Path("requested_text") example_path.mkdir(exist_ok=True, parents=True) - with open(example_path / f"{job_id}_simple_sync_example.txt", "w") as f: + with open(example_path / f"{gen_id}_simple_sync_example.txt", "w") as f: f.write(status_response.model_dump_json(indent=4)) diff --git a/examples/worker/README.md b/examples/worker/README.md new file mode 100644 index 00000000..d0d4103d --- /dev/null +++ b/examples/worker/README.md @@ -0,0 +1,3 @@ +# Worker Examples + +Contains examples of worker-side logic implementing the SDK. diff --git a/examples/worker/generations/image_generation_advanced.py b/examples/worker/generations/image_generation_advanced.py new file mode 100644 index 00000000..fa606806 --- /dev/null +++ b/examples/worker/generations/image_generation_advanced.py @@ -0,0 +1,454 @@ +""" +This example demonstrates an asynchronous image generation workflow using threading and semaphores. + +It simulates the lifecycle of a worker, including preloading resources, generating images, +post-processing, safety checks, and submission. The example also shows how to handle errors +and manage concurrent generation processes. + +This example does not include interactions with the Horde API or any real image generation logic. + +For more details on the worker lifecycle, refer to: +- Worker Loop: docs/haidra-assets/docs/worker_loop.md +- Worker States Flow: docs/ai-horde-worker/worker_states_flow.md +""" + +import os +import random +import time +from collections.abc import Callable +from concurrent.futures import ThreadPoolExecutor, as_completed +from threading import Semaphore +from uuid import uuid4 + +from loguru import logger + +# You could set this yourself using loguru. See their documentation for more details. +os.environ["HORDE_SDK_LOG_VERBOSITY"] = "10" + +from horde_sdk.generation_parameters.alchemy import AlchemyParameters, UpscaleAlchemyParameters +from horde_sdk.generation_parameters.alchemy.consts import KNOWN_UPSCALERS +from horde_sdk.generation_parameters.image import BasicImageGenerationParameters, ImageGenerationParameters +from horde_sdk.safety import SafetyResult, SafetyRules +from horde_sdk.worker.consts import GENERATION_PROGRESS +from horde_sdk.worker.generations import ImageSingleGeneration + +# Semaphore to limit concurrent generation processes +# This value would depend on the system's capabilities and the expected load. +# Here, we set it to 3 to allow up to 3 concurrent image generations, which is likely not realistic, +# but helps to illustrate the concept. +# +# In a real-world scenario where you would like concurrency, you likely will need to survey your system's capabilities +# and set this value accordingly. +MAX_CONCURRENT_GENERATIONS = 3 +generation_semaphore = Semaphore(MAX_CONCURRENT_GENERATIONS) + + +def preload_resources(generation: ImageSingleGeneration) -> None: + """Simulates the preloading of resources required for image generation. + + Args: + generation (ImageSingleGeneration): The generation instance to preload resources for. + """ + generation.on_preloading() + logger.info(f"{generation.short_id} - Preloading resources for image generation") + time.sleep(2) # Simulate time taken for preloading + logger.info(f"{generation.short_id} - Preloading complete") + generation.on_preloading_complete() + + +def _simulated_image_backend_handoff( + generation: ImageSingleGeneration, + include_post_processing: bool, + status_callback: Callable[[GENERATION_PROGRESS], None], + should_fail: bool = False, +) -> bytes | list[bytes] | None: + """Simulates the handoff to the image generation backend. + + Args: + generation (ImageSingleGeneration): The generation instance to hand off to the backend. + """ + logger.info(f"{generation.short_id} - Handoff to image generation backend") + status_callback(GENERATION_PROGRESS.GENERATING) + + if should_fail: + logger.error(f"{generation.short_id} - Simulated failure during backend handoff") + return None + + time.sleep(random.choice([1, 5])) # Simulate time taken for backend to generation/do inference + status_callback(GENERATION_PROGRESS.GENERATION_COMPLETE) + + if include_post_processing: + status_callback(GENERATION_PROGRESS.POST_PROCESSING) + time.sleep(1) # Simulate time taken for post-processing + status_callback(GENERATION_PROGRESS.POST_PROCESSING_COMPLETE) + logger.info(f"{generation.short_id} - Handoff complete") + + # This should be the result of the image generation process. + # In a real-world scenario, this would be the bytes of the generated image. + # Here, we simulate it by encoding a string for demonstration purposes. + if generation.batch_size == 1: + return b"simulated_image_bytes" + + return [b"simulated_image_bytes" for _ in range(generation.batch_size)] + + +def generate_image(generation: ImageSingleGeneration, prompt: str, should_fail: bool = False) -> None: + with generation_semaphore: + while True: + # The generation object has internal limits to the number of ERROR recoveries it allows. + # By default, it allows 3 recoveries before it considers the generation failed. + # In the case it is failed, GENERATING will not be a valid next state. + # This method should be preferred over handling an exception from `on_generating`. + # + # You can override this by setting your `HordeWorkerJobConfig` appropriately. + # See the docs for more details. + if not generation.is_next_state_valid(GENERATION_PROGRESS.GENERATING): + logger.error(f"{generation.short_id} - Generation failed.") + break + + logger.info(f"{generation.short_id} - Attempting to generate image for prompt: {prompt}") + + def status_callback(progress: GENERATION_PROGRESS) -> None: + """Callback to update the generation progress status.""" + logger.info(f"{generation.short_id} - Generation progress: {progress.name}") + + if progress == GENERATION_PROGRESS.GENERATING: + generation.on_generating() + elif progress == GENERATION_PROGRESS.GENERATION_COMPLETE: + logger.info( + f"{generation.short_id} - Generation complete. " + f"requires_post_processing: {generation.requires_post_processing}", + ) + generation.on_generation_work_complete() + elif progress == GENERATION_PROGRESS.POST_PROCESSING: + generation.on_post_processing() + elif progress == GENERATION_PROGRESS.POST_PROCESSING_COMPLETE: + generation.on_post_processing_complete() + + # Simulate image generation process. This would be the call to the backend or a service + # which actually does the image generation. + # + # In a real-world scenario, this would involve sending the prompt to an API or a + # local model and receiving the generated image. + result = _simulated_image_backend_handoff( + generation, + include_post_processing=generation.requires_post_processing, + status_callback=status_callback, + should_fail=should_fail, + ) + + if result is None: + logger.error(f"{generation.short_id} - Image generation failed") + # It is important to signal the generation failure to the generation object. + # Errors must either recover to the most recent valid state or be aborted. + generation.on_error( + failed_message="Image generation failed during backend handoff.", + ) + continue + + # Set the final result(s) of the generation. This method supports both single and multiple results + # depending on the batch size of the generation. + # + # This value will be used in the next steps of the generation process. + # + # Notice that in this example, the `status_callback` function captured the state transitions + # so the current state is already set to GENERATION_COMPLETE or POST_PROCESSING_COMPLETE before this call. + generation.set_work_result(result=result) # This could happen after a delay if needed. + + logger.info(f"{generation.short_id} - Image generation complete") + break + + +def perform_safety_checks(generation: ImageSingleGeneration) -> None: + """Simulates safety checks on the generated image. + + Iterates over the generated images and performs safety checks on each one. If a result is flagged as NSFW, + it is logged and the generation is marked as such. + + Args: + generation (ImageSingleGeneration): The generation instance to perform safety checks on. + """ + generation.on_safety_checking() + logger.info(f"{generation.short_id} - Performing safety check on generated images") + for idx, generation_result in enumerate(generation.generation_results.values()): + if generation_result is not None: + time.sleep(2) # Simulate time taken for safety checks + safety_result = SafetyResult( + is_csam=False, + is_nsfw=random.choice([True, False]), + ) + logger.info(f"{generation.short_id} - Safety check complete for result index {idx}") + generation.on_safety_check_complete(batch_index=idx, safety_result=safety_result) + if safety_result.is_nsfw: + logger.warning(f"{generation.short_id} - Image flagged as NSFW for result index {idx}") + + +def submit_results(generation: ImageSingleGeneration) -> None: + """Simulates submission of the generated image. + + Args: + generation (ImageSingleGeneration): The generation instance to submit results for. + """ + generation.on_submitting() + for idx, generation_result in enumerate(generation.generation_results.values()): + if generation_result is not None: + logger.info(f"{generation.short_id} - Submitting generated image for result index {idx}") + time.sleep(1) # Simulate time taken for submission + logger.info(f"{generation.short_id} - Submission complete for result index {idx}") + else: + logger.warning( + f"{generation.short_id} - No generated image to submit for result index {idx}. Was it censored?", + ) + + generation.on_submit_complete() + logger.success(f"{generation.short_id} - Submission complete") + + +class GenerationResultDetail: + def __init__(self, result_index: int, status: str, nsfw_detail: SafetyResult | None) -> None: + self.result_index = result_index + self.status = status + self.nsfw_detail = nsfw_detail + + +class GenerationRunResult: + def __init__( + self, + generation_id: str, + short_id: str, + prompt: str, + status: str, + results: list[GenerationResultDetail], + ) -> None: + self.generation_id = generation_id + self.short_id = short_id + self.prompt = prompt + self.status = status + self.results = results + + +def generation_process(generation_params: ImageGenerationParameters, should_fail: bool) -> GenerationRunResult: + """Simulates the entire generation process for a single generation request. + + Args: + generation_params (ImageGenerationParameters): The parameters for the image generation. + Returns: + dict: Summary of the generation process. + """ + generation_id = str(uuid4()) + logger.info(f"Generation ID: {generation_id}") + + generation = ImageSingleGeneration( + generation_parameters=generation_params, + generation_id=generation_id, + safety_rules=SafetyRules( + should_censor_nsfw=random.choice([True, False, False]), + ), + ) + + logger.info(f"{generation.short_id} - Starting generation process") + + preload_resources(generation) + + prompt = generation_params.base_params.prompt + generate_image(generation, prompt, should_fail=should_fail) + + # Abort if all generation attempts failed. + if all(generation_result is None for generation_result in generation.generation_results): + logger.error(f"{generation.short_id} - All generation attempts failed. Aborting.") + generation.on_abort(failed_message="All generation attempts failed.") + return GenerationRunResult( + generation_id=generation_id, + short_id=generation.short_id, + prompt=prompt, + status="failed", + results=[], + ) + + perform_safety_checks(generation) + submit_results(generation) + logger.info(f"{generation.short_id} - Generation process finalized") + + # Collect useful results for summary + results = [] + for idx, generation_result in enumerate(generation.generation_results.values()): + safety_check_results = generation.get_safety_check_results() + if generation_result is not None: + results.append( + GenerationResultDetail( + result_index=idx, + status="success", + nsfw_detail=(safety_check_results[idx] if safety_check_results else None), + ), + ) + else: + results.append( + GenerationResultDetail( + result_index=idx, + status="censored_or_failed", + nsfw_detail=safety_check_results[idx] if safety_check_results else None, + ), + ) + + return GenerationRunResult( + generation_id=generation_id, + short_id=generation.short_id, + prompt=prompt, + status="completed", + results=results, + ) + + +class GenerationTestCaseData: + prompt: str + result_ids: list[str] + include_post_processing: bool + should_fail_on_generation: bool + + def __init__( + self, + prompt: str, + result_ids: list[str], + include_post_processing: bool = False, + should_fail_on_generation: bool = False, + ) -> None: + """Initializes a test case for image generation. + + Args: + prompt (str): The prompt for the image generation. + result_ids (list[str]): The result IDs for the image generation. + include_post_processing (bool): Whether to include post-processing steps. + should_fail_on_generation (bool): Whether the generation should fail (for testing purposes). + """ + self.prompt = prompt + self.result_ids = result_ids + self.include_post_processing = include_post_processing + self.should_fail_on_generation = should_fail_on_generation + + +class GenerationTestCase: + def __init__( + self, + generation_parameters: ImageGenerationParameters, + should_fail_on_generation: bool = False, + ) -> None: + self.generation_parameters = generation_parameters + self.should_fail_on_generation = should_fail_on_generation + + +def get_generation_test_cases() -> list[GenerationTestCase]: + """Simulates fetching generation parameters for multiple image generations. + + Returns: + list[GenerationTestCase]: A list of test cases for different image generations. + """ + + test_case_data = [ + GenerationTestCaseData( + prompt="A beautiful landscape with mountains and a river", + result_ids=[str(uuid4())], + include_post_processing=False, + should_fail_on_generation=False, + ), + GenerationTestCaseData( + prompt="A futuristic cityscape at night", + result_ids=[str(uuid4()), str(uuid4())], + include_post_processing=True, + should_fail_on_generation=False, + ), + GenerationTestCaseData( + prompt="A serene beach during sunset", + result_ids=[str(uuid4())], + include_post_processing=False, + should_fail_on_generation=True, + ), + GenerationTestCaseData( + prompt="A bustling market in a medieval town", + result_ids=[str(uuid4())], + include_post_processing=False, + should_fail_on_generation=False, + ), + GenerationTestCaseData( + prompt="A close-up of a flower in bloom", + result_ids=[str(uuid4())], + include_post_processing=False, + should_fail_on_generation=False, + ), + GenerationTestCaseData( + prompt="A majestic lion resting in the savannah", + result_ids=[str(uuid4()), str(uuid4()), str(uuid4()), str(uuid4())], + include_post_processing=False, + should_fail_on_generation=False, + ), + ] + + # In a typical scenario using the SDK to get jobs, you would not need to manually create these parameters. + # Instead, you would get job objects which would contain `ImageGenerationParameters` (or similar) as part of their + # data. + return [ + GenerationTestCase( + generation_parameters=ImageGenerationParameters( + result_ids=test_case_data.result_ids, + batch_size=len( + test_case_data.result_ids, + ), # This is the number of images to concurrently generate during one inference call. + base_params=BasicImageGenerationParameters( + prompt=test_case_data.prompt, + model="stable_diffusion", + ), + alchemy_params=AlchemyParameters( + upscalers=[ + UpscaleAlchemyParameters( + result_id=result_id, + source_image=b"dummy_image_bytes", + upscaler=KNOWN_UPSCALERS.RealESRGAN_x4plus, + ) + for result_id in test_case_data.result_ids + ], + ), + ), + should_fail_on_generation=test_case_data.should_fail_on_generation, + ) + for test_case_data in test_case_data + ] + + +def main() -> None: + """ + Entry point for the script. Simulates batch image generation with multiple prompts. + Prints a summary of each generation at the end. + """ + test_cases = get_generation_test_cases() + results: list[GenerationRunResult] = [] + + with ThreadPoolExecutor(max_workers=MAX_CONCURRENT_GENERATIONS) as executor: + future_to_case = { + executor.submit( + generation_process, + test_case.generation_parameters, + test_case.should_fail_on_generation, + ): test_case + for test_case in test_cases + } + for future in as_completed(future_to_case): + result = future.result() + results.append(result) + + print("\n" + "=" * 30) + print(" Generation Summary") + print("=" * 30) + for res in results: + print(f"\nGeneration ID : {res.generation_id}") + print(f"Short ID : {res.short_id}") + print(f"Prompt : {res.prompt}") + print(f"Status : {res.status}") + print("Results:") + for detail in res.results: + print(f" - Result Index : {detail.result_index}") + print(f" Status : {detail.status}") + print(f" NSFW Detail : {detail.nsfw_detail}") + print("-" * 30) + + +if __name__ == "__main__": + main() diff --git a/examples/worker/generations/image_generation_basic.py b/examples/worker/generations/image_generation_basic.py new file mode 100644 index 00000000..ae042c1f --- /dev/null +++ b/examples/worker/generations/image_generation_basic.py @@ -0,0 +1,156 @@ +""" +This example demonstrates a more advanced image generation workflow using horde-sdk. + +It simulates the typical steps involved in generating images, including preloading resources, +generating images, performing safety checks, and submitting results. + +This example includes simulated errors and failed safety checks to demonstrate expectations +surrounding the generation process. + +""" + +import os +import random +import time +from uuid import uuid4 + +from loguru import logger + +# You could set this yourself using loguru. See their documentation for more details. +os.environ["HORDE_SDK_LOG_VERBOSITY"] = "10" + +from horde_sdk.generation_parameters.image import BasicImageGenerationParameters, ImageGenerationParameters +from horde_sdk.safety import SafetyResult, SafetyRules +from horde_sdk.worker.consts import GENERATION_PROGRESS +from horde_sdk.worker.generations import ImageSingleGeneration + + +def do_preload(generation: ImageSingleGeneration) -> None: + generation.on_preloading() + logger.info(f"{generation.generation_id} - Preloading resources for image generation") + time.sleep(2) + logger.info(f"{generation.generation_id} - Preloading complete") + generation.on_preloading_complete() + + +def do_generate( + generation: ImageSingleGeneration, + prompt: str, + should_fail: bool = False, +) -> None: + while True: + if not generation.is_next_state_valid(GENERATION_PROGRESS.GENERATING): + logger.error(f"{generation.generation_id} - Generation failed.") + break + + generation.on_generating() + logger.info(f"{generation.generation_id} - Generating image for prompt: {prompt}") + + if should_fail: + time.sleep(0.5) + generation.on_error( + failed_message="Simulated generation failure", + failure_exception=RuntimeError("Simulated generation failure"), + ) + logger.error(f"{generation.generation_id} - Generation error") + continue + + time.sleep(3) + logger.info(f"{generation.generation_id} - Image generation complete") + + result = f"Generated image bytes for prompt: {prompt}".encode() + generation.set_work_result(result) + generation.on_generation_work_complete() + break + + +def do_safety_checks(generation: ImageSingleGeneration) -> None: + generation.on_safety_checking() + logger.info(f"{generation.generation_id} - Performing safety check on generated images") + for idx, generation_result in enumerate(generation.generation_results.values()): + if generation_result is not None: + time.sleep(2) + safety_result = SafetyResult( + is_csam=False, + is_nsfw=random.choice([True, False]), + ) + logger.info(f"{generation.generation_id} - Safety check complete for result index {idx}") + generation.on_safety_check_complete(batch_index=idx, safety_result=safety_result) + if safety_result.is_nsfw: + logger.warning(f"{generation.generation_id} - Image flagged as NSFW for result index {idx}") + + +def do_submit(generation: ImageSingleGeneration) -> None: + generation.on_submitting() + for idx, generation_result in enumerate(generation.generation_results.values()): + if generation_result is not None: + logger.info(f"{generation.generation_id} - Submitting generated image for result index {idx}") + time.sleep(1) + logger.info(f"{generation.generation_id} - Submission complete for result index {idx}") + else: + logger.warning( + f"{generation.generation_id} - No generated image to submit for result index {idx}. Was it censored?", + ) + + generation.on_submit_complete() + logger.success(f"{generation.generation_id} - Submission complete") + + +def main() -> None: + # Define multiple prompts for batch generation + prompts = [ + "A beautiful landscape with mountains and a river", + "A futuristic cityscape at night", + "A serene beach during sunset", + "A bustling market in a medieval town", + "A close-up of a flower in bloom", + "A majestic lion resting in the savannah", + ] + + result_ids = [str(uuid4()) for _ in prompts] + print(f"Expected result IDs: {result_ids}") + + generation_params_list = [ + ImageGenerationParameters( + result_ids=[result_id], + base_params=BasicImageGenerationParameters(prompt=prompt, model="stable_diffusion"), + ) + for result_id, prompt in zip(result_ids, prompts, strict=False) + ] + + for generation_params in generation_params_list: + generation_id = str(uuid4()) + logger.info(f"Generation ID: {generation_id}") + + generation = ImageSingleGeneration( + generation_parameters=generation_params, + generation_id=generation_id, + safety_rules=SafetyRules( + should_censor_nsfw=random.choice([True, False, False]), + ), + ) + + logger.info(f"{generation.generation_id} - Starting generation process") + + do_preload(generation) + + prompt = generation_params.base_params.prompt + + do_generate(generation, prompt, should_fail=random.choice([True, False, False, False])) + + if all(generation_result is None for generation_result in generation.generation_results): + logger.error(f"{generation.generation_id} - All generation failed. Aborting.") + generation.on_abort( + failed_message="All generation attempts failed.", + ) + logger.info(f"{generation.generation_id} - Generation process aborted") + continue + + do_safety_checks(generation) + + do_submit(generation) + logger.info(f"{generation.generation_id} - Finalized") + + +if __name__ == "__main__": + main() diff --git a/examples/worker/generations/image_generation_minimal.py b/examples/worker/generations/image_generation_minimal.py new file mode 100644 index 00000000..0aff6567 --- /dev/null +++ b/examples/worker/generations/image_generation_minimal.py @@ -0,0 +1,110 @@ +""" +This is a bare bones example of how to use the generation classes from horde-sdk. + +This example demonstrates a basic image generation workflow, including preloading resources, +generating an image, performing a safety check, and submitting the result. + +Note that this example does not include error handling or advanced features. See the +`image_generation_advanced.py` example for a more comprehensive workflow. +""" + +import time +from uuid import uuid4 + +from loguru import logger + +from horde_sdk.generation_parameters.image import BasicImageGenerationParameters, ImageGenerationParameters +from horde_sdk.safety import SafetyResult +from horde_sdk.worker.generations import ImageSingleGeneration + + +def do_preload() -> None: + logger.info("Preloading resources for image generation") + time.sleep(2) + logger.info("Preloading complete") + + +def do_generate() -> bytes: + logger.info("Generating image") + time.sleep(3) + logger.info("Image generation complete") + + return b"Generated image bytes here" + + +def do_safety_check(result: bytes) -> SafetyResult: + logger.info("Performing safety check on generated image") + time.sleep(2) + logger.info("Safety check complete") + + return SafetyResult( + is_csam=False, + is_nsfw=False, + ) + + +def do_submit() -> None: + logger.info("Submitting generated image") + time.sleep(2) + logger.info("Submission complete") + + +def main() -> None: + # Define generation parameters + result_ids = [str(uuid4())] + + print(f"Expected result IDs: {result_ids}") + + generation_params = ImageGenerationParameters( + result_ids=result_ids, + base_params=BasicImageGenerationParameters( + prompt="A beautiful landscape with mountains and a river", + model="stable_diffusion", + ), + ) + + generation_id = str(uuid4()) + print(f"Generation ID: {generation_id}") + + # Initialize the generation + generation = ImageSingleGeneration( + generation_parameters=generation_params, + generation_id=generation_id, + ) + + logger.info(f"{generation.generation_id} - {generation.get_generation_progress()}") + + # Preload resources + generation.on_preloading() + logger.info(f"{generation.generation_id} - {generation.get_generation_progress()}") + do_preload() + generation.on_preloading_complete() + logger.info(f"{generation.generation_id} - {generation.get_generation_progress()}") + + # Generate the image + generation.on_generating() + logger.info(f"{generation.generation_id} - {generation.get_generation_progress()}") + generation.set_work_result(do_generate()) + generation.on_generation_work_complete() + logger.info(f"{generation.generation_id} - {generation.get_generation_progress()}") + + # Perform safety check + generation.on_safety_checking() + logger.info(f"{generation.generation_id} - {generation.get_generation_progress()}") + + for idx, generation_result in enumerate(generation.generation_results.values()): + if generation_result is not None: + safety_result = do_safety_check(generation_result) + generation.on_safety_check_complete( + batch_index=idx, + safety_result=safety_result, + ) + + # Submit the generation + generation.on_submitting() + logger.info(f"{generation.generation_id} - {generation.get_generation_progress()}") + do_submit() + + +if __name__ == "__main__": + main() diff --git a/horde_sdk/__init__.py b/horde_sdk/__init__.py index b0bb4a26..726b3e9e 100644 --- a/horde_sdk/__init__.py +++ b/horde_sdk/__init__.py @@ -2,9 +2,13 @@ # isort: off # We import dotenv first so that we can use it to load environment variables before importing anything else. +import asyncio import ssl import certifi import dotenv +import sys + +import aiohttp.client_exceptions # If the current working directory contains a `.env` file, import the environment variables from it. # This is useful for development. @@ -15,6 +19,7 @@ # We import the horde_sdk logging module first so that we can use it to configure the logging system before importing from horde_sdk.horde_logging import COMPLETE_LOGGER_LABEL, PROGRESS_LOGGER_LABEL + from loguru import logger # isort: on @@ -63,12 +68,21 @@ def _dev_env_var_warnings() -> None: # pragma: no cover _dev_env_var_warnings() _default_sslcontext = ssl.create_default_context(cafile=certifi.where()) +_async_client_exceptions: tuple[type[Exception], ...] = (TimeoutError, aiohttp.client_exceptions.ClientError, OSError) + +if sys.version_info[:2] == (3, 10): + _async_client_exceptions = (asyncio.exceptions.TimeoutError, aiohttp.client_exceptions.ClientError, OSError) + +# It is important to load `horde_sdk.consts` here (and before `horde_sdk.generic_api`), after env vars are loaded +# so the CI will work as intended. See `get_default_frozen_model_config_dict` for more details. from horde_sdk.consts import ( + KNOWN_DISPATCH_SOURCE, PAYLOAD_HTTP_METHODS, HTTPMethod, HTTPStatusCode, get_all_error_status_codes, get_all_success_status_codes, + get_default_frozen_model_config_dict, is_error_status_code, is_success_status_code, ) @@ -80,36 +94,41 @@ def _dev_env_var_warnings() -> None: # pragma: no cover HordeAPIMessage, HordeAPIObject, HordeRequest, + MessageSpecifiesUserIDMixin, RequestErrorResponse, - RequestSpecifiesUserIDMixin, RequestUsesWorkerMixin, ResponseRequiringFollowUpMixin, ResponseWithProgressMixin, ) from horde_sdk.generic_api.consts import ANON_API_KEY +from horde_sdk.utils import create_bridge_agent_string __all__ = [ + "ANON_API_KEY", + "COMPLETE_LOGGER_LABEL", + "KNOWN_DISPATCH_SOURCE", "PAYLOAD_HTTP_METHODS", - "HTTPMethod", - "HTTPStatusCode", - "get_all_error_status_codes", - "get_all_success_status_codes", - "is_error_status_code", - "is_success_status_code", + "PROGRESS_LOGGER_LABEL", "APIKeyAllowedInRequestMixin", - "HordeRequest", "ContainsMessageResponseMixin", + "HTTPMethod", + "HTTPStatusCode", "HordeAPIData", "HordeAPIMessage", "HordeAPIObject", + "HordeException", + "HordeRequest", + "MessageSpecifiesUserIDMixin", "RequestErrorResponse", - "RequestSpecifiesUserIDMixin", "RequestUsesWorkerMixin", "ResponseRequiringFollowUpMixin", "ResponseWithProgressMixin", - "ANON_API_KEY", - "PROGRESS_LOGGER_LABEL", - "COMPLETE_LOGGER_LABEL", - "HordeException", + "_async_client_exceptions", "_default_sslcontext", + "create_bridge_agent_string", + "get_all_error_status_codes", + "get_all_success_status_codes", + "get_default_frozen_model_config_dict", + "is_error_status_code", + "is_success_status_code", ] diff --git a/horde_sdk/_telemetry/__init__.py b/horde_sdk/_telemetry/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/horde_sdk/_telemetry/metrics.py b/horde_sdk/_telemetry/metrics.py new file mode 100644 index 00000000..82733d7e --- /dev/null +++ b/horde_sdk/_telemetry/metrics.py @@ -0,0 +1,33 @@ +import logfire + +_telemetry_client_critical_errors_counter = logfire.metric_counter( + "client_critical_errors", + unit="1", + description="The number of critical error for which there is no special handling", +) + +_telemetry_client_horde_api_errors_counter = logfire.metric_counter( + "client_horde_api_errors", + unit="1", + description="The number of API errors", +) + +_telemetry_client_requests_started_counter = logfire.metric_counter( + "client_requests_started", + unit="1", + description="The number of requests started", +) + +_telemetry_client_requests_finished_successfully_counter = logfire.metric_counter( + "client_requests_finished_successfully", + unit="1", + description="The number of requests finished", +) + + +__all__ = [ + "_telemetry_client_critical_errors_counter", + "_telemetry_client_horde_api_errors_counter", + "_telemetry_client_requests_finished_successfully_counter", + "_telemetry_client_requests_started_counter", +] diff --git a/horde_sdk/ai_horde_api/__init__.py b/horde_sdk/ai_horde_api/__init__.py index cf6aa507..6172e2b1 100644 --- a/horde_sdk/ai_horde_api/__init__.py +++ b/horde_sdk/ai_horde_api/__init__.py @@ -10,12 +10,8 @@ download_image_from_generation, ) from horde_sdk.ai_horde_api.consts import ( - ALCHEMY_FORMS, GENERATION_MAX_LIFE, GENERATION_STATE, - KNOWN_SAMPLERS, - KNOWN_SOURCE_PROCESSING, - WORKER_TYPE, ) from horde_sdk.ai_horde_api.endpoints import ( AI_HORDE_API_ENDPOINT_SUBPATH, @@ -24,35 +20,36 @@ from horde_sdk.ai_horde_api.exceptions import ( AIHordeGenerationTimedOutError, AIHordeImageValidationError, - AIHordePayloadValidationError, AIHordeRequestError, AIHordeServerException, ) -from horde_sdk.ai_horde_api.fields import ImageID, JobID, TeamID, WorkerID +from horde_sdk.ai_horde_api.fields import GenerationID, ImageID, TeamID, WorkerID +from horde_sdk.exceptions import PayloadValidationError +from horde_sdk.generation_parameters.alchemy.consts import KNOWN_ALCHEMY_FORMS +from horde_sdk.generation_parameters.image.consts import KNOWN_IMAGE_SAMPLERS, KNOWN_IMAGE_SOURCE_PROCESSING __all__ = [ - "AIHordeAPIManualClient", - "AIHordeAPIClientSession", - "AIHordeAPIAsyncManualClient", - "AIHordeAPIAsyncClientSession", - "AIHordeAPISimpleClient", - "AIHordeAPIAsyncSimpleClient", - "download_image_from_generation", - "GENERATION_MAX_LIFE", - "AI_HORDE_BASE_URL", "AI_HORDE_API_ENDPOINT_SUBPATH", - "ALCHEMY_FORMS", + "AI_HORDE_BASE_URL", + "GENERATION_MAX_LIFE", "GENERATION_STATE", - "KNOWN_SAMPLERS", - "KNOWN_SOURCE_PROCESSING", - "WORKER_TYPE", - "AIHordeRequestError", - "AIHordeImageValidationError", + "KNOWN_ALCHEMY_FORMS", + "KNOWN_IMAGE_SAMPLERS", + "KNOWN_IMAGE_SOURCE_PROCESSING", + "AIHordeAPIAsyncClientSession", + "AIHordeAPIAsyncManualClient", + "AIHordeAPIAsyncSimpleClient", + "AIHordeAPIClientSession", + "AIHordeAPIManualClient", + "AIHordeAPISimpleClient", "AIHordeGenerationTimedOutError", + "AIHordeImageValidationError", + "AIHordeRequestError", "AIHordeServerException", - "AIHordePayloadValidationError", + "GenerationID", "ImageID", - "JobID", + "PayloadValidationError", "TeamID", "WorkerID", + "download_image_from_generation", ] diff --git a/horde_sdk/ai_horde_api/ai_horde_clients.py b/horde_sdk/ai_horde_api/ai_horde_clients.py index 86fdebea..a1005e00 100644 --- a/horde_sdk/ai_horde_api/ai_horde_clients.py +++ b/horde_sdk/ai_horde_api/ai_horde_clients.py @@ -14,6 +14,7 @@ from typing import cast import aiohttp +import logfire import PIL.Image import requests from loguru import logger @@ -64,7 +65,7 @@ from horde_sdk.ai_horde_api.consts import GENERATION_MAX_LIFE, MODEL_STATE, PROGRESS_STATE from horde_sdk.ai_horde_api.endpoints import AI_HORDE_BASE_URL from horde_sdk.ai_horde_api.exceptions import AIHordeImageValidationError, AIHordeRequestError -from horde_sdk.ai_horde_api.fields import JobID, WorkerID +from horde_sdk.ai_horde_api.fields import GenerationID, WorkerID from horde_sdk.ai_horde_api.metadata import AIHordePathData, AIHordeQueryData from horde_sdk.generic_api.apimodels import ( ContainsMessageResponseMixin, @@ -82,32 +83,6 @@ ) -def parse_image_from_base64(image_base64: str) -> PIL.Image.Image: - """Parse an image from base 64. - - Args: - image_base64 (str): The base 64 encoded image. - - Returns: - PIL.Image.Image: The parsed image. - - Raises: - binascii.Error: If the image couldn't be parsed from base 64. - RuntimeError: If the image couldn't be parsed for any other reason. - - """ - try: - image_bytes = base64.b64decode(image_base64) - except Exception as e: - logger.error(f"Error parsing image: {e}") - raise e - - if image_bytes is None: - raise RuntimeError("Error parsing image") - - return PIL.Image.open(io.BytesIO(image_bytes)) - - def download_image_bytes(url: str) -> io.BytesIO: """Download an image from a URL. @@ -222,19 +197,19 @@ def __init__(self) -> None: def get_generate_check( self, - job_id: JobID, + gen_id: GenerationID, ) -> ImageGenerateCheckResponse: """Check if a pending image request has finished generating from the AI-Horde API. Not to be confused with `get_generate_status` which returns the images too. Args: - job_id (JobID | str): The ID of the request to check. + gen_id (GenerationID | str): The ID of the request to check. Returns: ImageGenerateCheckResponse: The response from the API. """ - api_request = ImageGenerateCheckRequest(id=job_id) + api_request = ImageGenerateCheckRequest(id=gen_id) api_response = self.submit_request(api_request, api_request.get_default_success_response_type()) if isinstance(api_response, RequestErrorResponse): # pragma: no cover @@ -245,7 +220,7 @@ def get_generate_check( def get_generate_status( self, - job_id: JobID, + gen_id: GenerationID, ) -> ImageGenerateStatusResponse: """Get the status and any generated images for a pending image request from the AI-Horde API. @@ -253,12 +228,12 @@ def get_generate_status( Use `get_generate_check` instead to check the status of a pending image request. Args: - job_id (JobID): The ID of the request to check. + gen_id (GenerationID): The ID of the request to check. Returns: - tuple[ImageGenerateStatusResponse, JobID]: The final status response and the corresponding job ID. + tuple[ImageGenerateStatusResponse, GenerationID]: The final status response and the corresponding job ID. """ - api_request = ImageGenerateStatusRequest(id=job_id) + api_request = ImageGenerateStatusRequest(id=gen_id) api_response = self.submit_request(api_request, api_request.get_default_success_response_type()) if isinstance(api_response, RequestErrorResponse): # pragma: no cover @@ -269,17 +244,17 @@ def get_generate_status( def delete_pending_image( self, - job_id: JobID, + gen_id: GenerationID, ) -> ImageGenerateStatusResponse: """Delete a pending image request from the AI-Horde API. Args: - job_id (JobID): The ID of the request to delete. + gen_id (GenerationID): The ID of the request to delete. Returns: ImageGenerateStatusResponse: The response from the API. """ - api_request = DeleteImageGenerateRequest(id=job_id) + api_request = DeleteImageGenerateRequest(id=gen_id) api_response = self.submit_request(api_request, api_request.get_default_success_response_type()) if isinstance(api_response, RequestErrorResponse): # pragma: no cover @@ -308,19 +283,19 @@ def __init__( async def get_generate_check( self, - job_id: JobID, + gen_id: GenerationID, ) -> ImageGenerateCheckResponse: """Asynchronously check if a pending image request has finished generating and return the status of it. Not to be confused with `get_generate_status` which returns the images too. Args: - job_id (JobID | str): The ID of the request to check. + gen_id (GenerationID | str): The ID of the request to check. Returns: ImageGenerateCheckResponse: The response from the API. """ - api_request = ImageGenerateCheckRequest(id=job_id) + api_request = ImageGenerateCheckRequest(id=gen_id) api_response = await self.submit_request(api_request, api_request.get_default_success_response_type()) if isinstance(api_response, RequestErrorResponse): # pragma: no cover @@ -331,7 +306,7 @@ async def get_generate_check( async def get_generate_status( self, - job_id: JobID, + gen_id: GenerationID, ) -> ImageGenerateStatusResponse: """Asynchronously get the status and any generated images for a pending image request from the AI-Horde API. @@ -339,12 +314,12 @@ async def get_generate_status( Use `get_generate_check` instead to check the status of a pending image request. Args: - job_id (JobID): The ID of the request to check. + gen_id (GenerationID): The ID of the request to check. Returns: ImageGenerateStatusResponse: The response from the API. """ - api_request = ImageGenerateStatusRequest(id=job_id) + api_request = ImageGenerateStatusRequest(id=gen_id) api_response = await self.submit_request(api_request, api_request.get_default_success_response_type()) if isinstance(api_response, RequestErrorResponse): # pragma: no cover @@ -355,17 +330,17 @@ async def get_generate_status( async def delete_pending_image( self, - job_id: JobID, + gen_id: GenerationID, ) -> ImageGenerateStatusResponse: """Asynchronously delete a pending image request from the AI-Horde API. Args: - job_id (JobID | str): The ID of the request to delete. + gen_id (GenerationID | str): The ID of the request to delete. Returns: ImageGenerateStatusResponse: The response from the API. """ - api_request = DeleteImageGenerateRequest(id=job_id) + api_request = DeleteImageGenerateRequest(id=gen_id) api_response = await self.submit_request(api_request, api_request.get_default_success_response_type()) if isinstance(api_response, RequestErrorResponse): # pragma: no cover @@ -418,6 +393,8 @@ def __init__( class BaseAIHordeSimpleClient(ABC): """The base class for the most straightforward clients which interact with the AI-Horde API.""" + _msg_format_sleep = "sleeping for {seconds} seconds" + reasonable_minimum_timeout = 20 def validate_timeout( @@ -457,7 +434,7 @@ def validate_timeout( def download_image_from_generation( self, generation: ImageGeneration, - ) -> PIL.Image.Image | Coroutine[None, None, tuple[PIL.Image.Image, JobID]]: + ) -> PIL.Image.Image | Coroutine[None, None, tuple[PIL.Image.Image, GenerationID]]: """Convert from base64 or download an image from a response.""" @abstractmethod @@ -467,27 +444,10 @@ def download_image_from_url( ) -> PIL.Image.Image | Coroutine[None, None, PIL.Image.Image]: """Download an image from a URL.""" - @staticmethod - def parse_image_from_base64(image_base64: str) -> PIL.Image.Image: - """Parse an image from base 64. - - Args: - image_base64 (str): The base 64 encoded image. - - Returns: - PIL.Image.Image: The parsed image. - - Raises: - binascii.Error: If the image couldn't be parsed from base 64. - RuntimeError: If the image couldn't be parsed for any other reason. - - """ - return parse_image_from_base64(image_base64) - def _handle_initial_response( self, initial_response: HordeResponse | RequestErrorResponse, - ) -> tuple[HordeRequest, JobID, list[dict[str, object]]]: + ) -> tuple[HordeRequest, GenerationID, list[dict[str, object]]]: # Check for error responses if isinstance(initial_response, RequestErrorResponse): # pragma: no cover if "Image validation failed" in initial_response.message: # TODO: No magic strings! @@ -517,22 +477,22 @@ def _handle_initial_response( f"Check request type is not a JobRequestMixin: {check_request.log_safe_model_dump()}", ) - job_id: JobID = check_request.id_ + gen_id: GenerationID = check_request.id_ logger.log(PROGRESS_LOGGER_LABEL, f"Response received: {initial_response}") if isinstance(initial_response, ContainsMessageResponseMixin) and initial_response.message: if "warning" in initial_response.message.lower(): - logger.warning(f"{job_id}: {initial_response.message}") + logger.warning(f"{gen_id}: {initial_response.message}") else: - logger.info(f"{job_id}: {initial_response.message}") + logger.info(f"{gen_id}: {initial_response.message}") - return check_request, job_id, follow_up_data + return check_request, gen_id, follow_up_data def _handle_progress_response( self, check_request: HordeRequest, check_response: HordeResponse | RequestErrorResponse, - job_id: JobID, + gen_id: GenerationID, *, check_count: int, number_of_responses: int, @@ -563,27 +523,28 @@ def _handle_progress_response( check_callback(check_response) # Log a message indicating that the request has been checked - log_message = f"Checked request: {job_id}, is_possible: {check_response.is_job_possible()}" + log_message = f"Checked request: {gen_id}, is_possible: {check_response.is_job_possible()}" # Log the request if it's the first check or every 5th check if check_count == 1 or check_count % 5 == 0: logger.log(PROGRESS_LOGGER_LABEL, log_message) - logger.log(PROGRESS_LOGGER_LABEL, f"{job_id}: {check_response.log_safe_model_dump()}") + logger.log(PROGRESS_LOGGER_LABEL, f"{gen_id}: {check_response.log_safe_model_dump()}") if not check_response.is_job_possible(): - logger.warning(f"Job not possible: {job_id}") + logger.warning(f"Job not possible: {gen_id}") # Otherwise, just log the message at the debug level else: logger.debug(log_message) # If the number of finished images is equal to the number of images requested, we're done if check_response.is_job_complete(number_of_responses): - logger.log(PROGRESS_LOGGER_LABEL, f"Job finished and available on the server: {job_id}") + logger.log(PROGRESS_LOGGER_LABEL, f"Job finished and available on the server: {gen_id}") return PROGRESS_STATE.finished # If we've timed out, stop waiting, log a warning, and break out of the loop if timeout and timeout > 0 and time.time() - start_time > timeout: logger.warning( - f"Timeout reached, cancelling generations still outstanding: {job_id}: {check_response}:", + f"Timeout reached, cancelling generations still outstanding: {gen_id}: " + f"{check_response.log_safe_model_dump()}:", ) return PROGRESS_STATE.timed_out @@ -628,6 +589,7 @@ def download_image_from_url(self, url: str) -> PIL.Image.Image: """ return download_image_from_url(url) + @logfire.instrument() def _do_request_with_check( self, api_request: BaseAIHordeRequest, @@ -636,7 +598,7 @@ def _do_request_with_check( timeout: int = GENERATION_MAX_LIFE, check_callback: Callable[[HordeResponse], None] | None = None, check_callback_type: type[ResponseWithProgressMixin | ResponseGenerationProgressCombinedMixin] | None = None, - ) -> tuple[HordeResponse, JobID]: + ) -> tuple[HordeResponse, GenerationID]: """Submit a request which requires check/status polling to the AI-Horde API, and wait for it to complete. Args: @@ -649,14 +611,15 @@ def _do_request_with_check( The type of response expected by the callback. Returns: - tuple[HordeResponse, JobID]: The final response and the corresponding job ID. + tuple[HordeResponse, GenerationID]: The final response and the corresponding job ID. """ if check_callback is not None and len(inspect.getfullargspec(check_callback).args) == 0: raise ValueError("Callback must take at least one argument") + logger.debug("Starting request with check") + # This session class will cleanup incomplete requests in the event of an exception with AIHordeAPIClientSession() as horde_session: - # Submit the initial request logger.debug( f"Submitting request: {api_request.log_safe_model_dump()} with timeout {timeout}", ) @@ -666,7 +629,7 @@ def _do_request_with_check( ) # Handle the initial response to get the check request, job ID, and follow-up data - check_request, job_id, follow_up_data = self._handle_initial_response(initial_response) + check_request, gen_id, follow_up_data = self._handle_initial_response(initial_response) # There is a rate limit, so we start a clock to keep track of how long we've been waiting start_time = time.time() @@ -687,7 +650,7 @@ def _do_request_with_check( progress_state = self._handle_progress_response( check_request, check_response, - job_id, + gen_id, check_count=check_count, number_of_responses=number_of_responses, start_time=start_time, @@ -700,7 +663,9 @@ def _do_request_with_check( break # Wait for 4 seconds before checking again - time.sleep(4) + sleep_time = 4 + with logfire.span(self._msg_format_sleep.format(seconds=sleep_time), sleep_time=sleep_time): + time.sleep(sleep_time) # Check if the check response has progress if not isinstance(check_response, ResponseWithProgressMixin): @@ -729,10 +694,10 @@ def _do_request_with_check( raise AIHordeRequestError(final_response) # Log a message indicating that the request is complete - logger.log(COMPLETE_LOGGER_LABEL, f"Request complete: {job_id}") + logger.log(COMPLETE_LOGGER_LABEL, f"Request complete: {gen_id}") # Return the final response and job ID - return (final_response, job_id) + return (final_response, gen_id) # If there is an exception, log an error and raise a RuntimeError logger.error("Something went wrong with the request:") @@ -764,7 +729,7 @@ def image_generate_request( image_gen_request: ImageGenerateAsyncRequest, timeout: int = GENERATION_MAX_LIFE, check_callback: Callable[[ImageGenerateCheckResponse], None] | None = None, - ) -> tuple[ImageGenerateStatusResponse, JobID]: + ) -> tuple[ImageGenerateStatusResponse, GenerationID]: """Submit an image generation request to the AI-Horde API, and wait for it to complete. Args: @@ -791,7 +756,7 @@ def image_generate_request( n = image_gen_request.params.n if image_gen_request.params and image_gen_request.params.n else 1 logger.log(PROGRESS_LOGGER_LABEL, f"Requesting {n} images.") - final_response, JobID = self._do_request_with_check( + final_response, GenerationID = self._do_request_with_check( image_gen_request, number_of_responses=n, timeout=timeout, @@ -806,7 +771,7 @@ def image_generate_request( if not isinstance(final_response, ImageGenerateStatusResponse): # pragma: no cover raise RuntimeError("Response was not an ImageGenerateStatusResponse") - return (final_response, JobID) + return (final_response, GenerationID) def image_generate_request_dry_run( self, @@ -842,7 +807,7 @@ def alchemy_request( alchemy_request: AlchemyAsyncRequest, timeout: int = GENERATION_MAX_LIFE, check_callback: Callable[[AlchemyStatusResponse], None] | None = None, - ) -> tuple[AlchemyStatusResponse, JobID]: + ) -> tuple[AlchemyStatusResponse, GenerationID]: """Submit an alchemy request to the AI-Horde API, and wait for it to complete. Args: @@ -869,7 +834,7 @@ def alchemy_request( for form in alchemy_request.forms: logger.debug(f"Request: {form}") - response, job_id = self._do_request_with_check( + response, gen_id = self._do_request_with_check( alchemy_request, number_of_responses=len(alchemy_request.forms), timeout=timeout, @@ -882,14 +847,14 @@ def alchemy_request( if not isinstance(response, AlchemyStatusResponse): # pragma: no cover raise RuntimeError("Response was not an AlchemyAsyncResponse") - return (response, job_id) + return (response, gen_id) def text_generate_request( self, text_gen_request: TextGenerateAsyncRequest, timeout: int = GENERATION_MAX_LIFE, check_callback: Callable[[TextGenerateStatusResponse], None] | None = None, - ) -> tuple[TextGenerateStatusResponse, JobID]: + ) -> tuple[TextGenerateStatusResponse, GenerationID]: """Submit a text generation request to the AI-Horde API, and wait for it to complete. Args: @@ -920,7 +885,7 @@ def text_generate_request( logger.log(PROGRESS_LOGGER_LABEL, f"Requesting {num_gens_requested} text generation.") logger.debug(f"Request: {text_gen_request}") - response, job_id = self._do_request_with_check( + response, gen_id = self._do_request_with_check( text_gen_request, number_of_responses=1, timeout=timeout, @@ -934,7 +899,7 @@ def text_generate_request( if not isinstance(response, TextGenerateStatusResponse): # pragma: no cover raise RuntimeError("Response was not a TextGenerateStatusResponse") - return (response, job_id) + return (response, gen_id) def text_generate_request_dry_run( self, @@ -1056,7 +1021,6 @@ def worker_modify( """Update a worker. Args: - worker_id (WorkerID): The ID of the worker to update. modify_worker_request (ModifyWorkerRequest): The request to update the worker. Returns: @@ -1275,7 +1239,10 @@ def __init__( self._horde_client_session = horde_client_session self._aiohttp_session = horde_client_session._aiohttp_session - async def download_image_from_generation(self, generation: ImageGeneration) -> tuple[PIL.Image.Image, JobID]: + async def download_image_from_generation( + self, + generation: ImageGeneration, + ) -> tuple[PIL.Image.Image, GenerationID]: """Asynchronously convert from base64 or download an image from a response. Args: @@ -1346,15 +1313,16 @@ async def download_image_from_url(self, url: str) -> PIL.Image.Image: return PIL.Image.open(io.BytesIO(image_bytes)) + @logfire.instrument() async def _do_request_with_check( self, api_request: BaseAIHordeRequest, *, number_of_responses: int = 1, - timeout: int = GENERATION_MAX_LIFE, + timeout: int = GENERATION_MAX_LIFE, # noqa: ASYNC109 # FIXME check_callback: Callable[[HordeResponse], None] | None = None, check_callback_type: type[ResponseWithProgressMixin | ResponseGenerationProgressCombinedMixin] | None = None, - ) -> tuple[HordeResponse, JobID]: + ) -> tuple[HordeResponse, GenerationID]: """Submit a request which requires check/status polling to the AI-Horde API, and wait for it to complete. Args: @@ -1367,7 +1335,7 @@ async def _do_request_with_check( The type of response expected by the callback. Returns: - tuple[HordeResponse, JobID]: The final response and the corresponding job ID. + tuple[HordeResponse, GenerationID]: The final response and the corresponding job ID. Raises: AIHordeRequestError: If the request failed. The error response is included in the exception. @@ -1375,7 +1343,7 @@ async def _do_request_with_check( if check_callback is not None and len(inspect.getfullargspec(check_callback).args) == 0: raise ValueError("Callback must take at least one argument") - # This session class will cleanup incomplete requests in the event of an exception + logger.debug("Starting async request with check.") # Submit the initial request logger.debug( @@ -1387,7 +1355,7 @@ async def _do_request_with_check( ) # Handle the initial response to get the check request, job ID, and follow-up data - check_request, job_id, follow_up_data = self._handle_initial_response(initial_response) + check_request, gen_id, follow_up_data = self._handle_initial_response(initial_response) # There is a rate limit, so we start a clock to keep track of how long we've been waiting start_time = time.time() @@ -1408,7 +1376,7 @@ async def _do_request_with_check( progress_state = self._handle_progress_response( check_request, check_response, - job_id, + gen_id, check_count=check_count, number_of_responses=number_of_responses, start_time=start_time, @@ -1421,7 +1389,9 @@ async def _do_request_with_check( break # Wait for 4 seconds before checking again - await asyncio.sleep(4) + sleep_time = 4 + with logfire.span(self._msg_format_sleep.format(seconds=sleep_time), sleep_time=sleep_time): + await asyncio.sleep(sleep_time) # This is for type safety, but should never happen in production if not isinstance(check_response, ResponseWithProgressMixin): # pragma: no cover @@ -1455,10 +1425,10 @@ async def _do_request_with_check( raise AIHordeRequestError(final_response) # Log a message indicating that the request is complete - logger.log(COMPLETE_LOGGER_LABEL, f"Request complete: {job_id}") + logger.log(COMPLETE_LOGGER_LABEL, f"Request complete: {gen_id}") # Return the final response and job ID - return (final_response, job_id) + return (final_response, gen_id) async def heartbeat_request( self, @@ -1486,10 +1456,10 @@ async def heartbeat_request( async def image_generate_request( self, image_gen_request: ImageGenerateAsyncRequest, - timeout: int = GENERATION_MAX_LIFE, + timeout: int = GENERATION_MAX_LIFE, # noqa: ASYNC109 # FIXME check_callback: Callable[[ImageGenerateCheckResponse], None] | None = None, delay: float = 0.0, - ) -> tuple[ImageGenerateStatusResponse, JobID]: + ) -> tuple[ImageGenerateStatusResponse, GenerationID]: """Submit an image generation request to the AI-Horde API, and wait for it to complete. *Be warned* that using this method too frequently could trigger a rate limit from the AI-Horde API. @@ -1506,7 +1476,7 @@ async def image_generate_request( Returns: - tuple[ImageGenerateStatusResponse, JobID]: The final status response and the corresponding job ID. + tuple[ImageGenerateStatusResponse, GenerationID]: The final status response and the corresponding job ID. Raises: AIHordeRequestError: If the request failed. The error response is included in the exception. @@ -1521,7 +1491,7 @@ async def image_generate_request( timeout = self.validate_timeout(timeout, log_message=True) n = image_gen_request.params.n if image_gen_request.params and image_gen_request.params.n else 1 - final_response, job_id = await self._do_request_with_check( + final_response, gen_id = await self._do_request_with_check( image_gen_request, number_of_responses=n, timeout=timeout, @@ -1535,7 +1505,7 @@ async def image_generate_request( if not isinstance(final_response, ImageGenerateStatusResponse): # pragma: no cover raise RuntimeError("Response was not an ImageGenerateStatusResponse") - return (final_response, job_id) + return (final_response, gen_id) async def image_generate_request_dry_run( self, @@ -1572,9 +1542,9 @@ async def image_generate_request_dry_run( async def alchemy_request( self, alchemy_request: AlchemyAsyncRequest, - timeout: int = GENERATION_MAX_LIFE, + timeout: int = GENERATION_MAX_LIFE, # noqa: ASYNC109 # FIXME check_callback: Callable[[AlchemyStatusResponse], None] | None = None, - ) -> tuple[AlchemyStatusResponse, JobID]: + ) -> tuple[AlchemyStatusResponse, GenerationID]: """Submit an alchemy request to the AI-Horde API, and wait for it to complete. *Be warned* that using this method too frequently could trigger a rate limit from the AI-Horde API. @@ -1588,7 +1558,7 @@ async def alchemy_request( response. Returns: - tuple[ImageGenerateStatusResponse, JobID]: The final status response and the corresponding job ID. + tuple[ImageGenerateStatusResponse, GenerationID]: The final status response and the corresponding job ID. Raises: AIHordeRequestError: If the request failed. The error response is included in the exception. @@ -1600,7 +1570,7 @@ async def alchemy_request( timeout = self.validate_timeout(timeout, log_message=True) - response, job_id = await self._do_request_with_check( + response, gen_id = await self._do_request_with_check( alchemy_request, number_of_responses=len(alchemy_request.forms), timeout=timeout, @@ -1613,15 +1583,15 @@ async def alchemy_request( if not isinstance(response, AlchemyStatusResponse): # pragma: no cover raise RuntimeError("Response was not an AlchemyAsyncResponse") - return (response, job_id) + return (response, gen_id) async def text_generate_request( self, text_gen_request: TextGenerateAsyncRequest, - timeout: int = GENERATION_MAX_LIFE, + timeout: int = GENERATION_MAX_LIFE, # noqa: ASYNC109 # FIXME check_callback: Callable[[TextGenerateStatusResponse], None] | None = None, delay: float = 0.0, - ) -> tuple[TextGenerateStatusResponse, JobID]: + ) -> tuple[TextGenerateStatusResponse, GenerationID]: """Submit a text generation request to the AI-Horde API, and wait for it to complete. *Be warned* that using this method too frequently could trigger a rate limit from the AI-Horde API. @@ -1637,7 +1607,7 @@ async def text_generate_request( delay (float, optional): The number of seconds to wait before checking the status. Defaults to 0.0. Returns: - tuple[TextGenerateStatusResponse, JobID]: The final status response and the corresponding job ID. + tuple[TextGenerateStatusResponse, GenerationID]: The final status response and the corresponding job ID. Raises: AIHordeRequestError: If the request failed. The error response is included in the exception. @@ -1659,7 +1629,7 @@ async def text_generate_request( logger.log(PROGRESS_LOGGER_LABEL, f"Requesting {num_gens_requested} text generation.") logger.debug(f"Request: {text_gen_request}") - response, job_id = await self._do_request_with_check( + response, gen_id = await self._do_request_with_check( text_gen_request, number_of_responses=1, timeout=timeout, @@ -1673,7 +1643,7 @@ async def text_generate_request( if not isinstance(response, TextGenerateStatusResponse): # pragma: no cover raise RuntimeError("Response was not a TextGenerateStatusResponse") - return (response, job_id) + return (response, gen_id) async def text_generate_request_dry_run( self, @@ -1766,7 +1736,6 @@ async def worker_modify( """Update a worker. Args: - worker_id (WorkerID): The ID of the worker to update. modify_worker_request (ModifyWorkerRequest): The request to update the worker. Returns: diff --git a/horde_sdk/ai_horde_api/apimodels/__init__.py b/horde_sdk/ai_horde_api/apimodels/__init__.py index 305882d4..7b7327d4 100644 --- a/horde_sdk/ai_horde_api/apimodels/__init__.py +++ b/horde_sdk/ai_horde_api/apimodels/__init__.py @@ -1,82 +1,20 @@ """All requests, responses and API models defined for the AI Horde API.""" -from horde_sdk.ai_horde_api.apimodels._documents import ( - AIHordeDocumentRequestMixin, - AIHordeGetPrivacyPolicyRequest, - AIHordeGetSponsorsRequest, - AIHordeGetTermsRequest, - DocumentFormat, - HordeDocument, -) -from horde_sdk.ai_horde_api.apimodels._find_user import ( - FindUserRequest, -) -from horde_sdk.ai_horde_api.apimodels._kudos import ( - KudosTransferRequest, - KudosTransferResponse, -) -from horde_sdk.ai_horde_api.apimodels._stats import ( - ImageStatsModelsRequest, - ImageStatsModelsResponse, - ImageStatsModelsTotalRequest, - ImageStatsModelsTotalResponse, - SinglePeriodImgStat, - SinglePeriodTxtStat, - StatsModelsTimeframe, - TextStatsModelResponse, - TextStatsModelsRequest, - TextStatsModelsTotalRequest, - TextStatsModelsTotalResponse, -) -from horde_sdk.ai_horde_api.apimodels._status import ( - ActiveModel, - ActiveModelLite, - AIHordeHeartbeatRequest, - AIHordeHeartbeatResponse, - HordeModes, - HordePerformanceRequest, - HordePerformanceResponse, - HordeStatusModelsAllRequest, - HordeStatusModelsAllResponse, - HordeStatusModelsSingleRequest, - HordeStatusModelsSingleResponse, - Newspiece, - NewsRequest, - NewsResponse, -) -from horde_sdk.ai_horde_api.apimodels._styles import ResponseModelStylesUser -from horde_sdk.ai_horde_api.apimodels._users import ( - ActiveGenerations, - ContributionsDetails, - ListUsersDetailsRequest, - ListUsersDetailsResponse, - ModifyUser, - ModifyUserReply, - ModifyUserRequest, - ModifyUserResponse, - MonthlyKudos, - SingleUserDetailsRequest, - UsageDetails, - UserAmountRecords, - UserDetailsResponse, - UserKudosDetails, - UserRecords, - UserThingRecords, - _ModifyUserBase, -) -from horde_sdk.ai_horde_api.apimodels.alchemy._async import ( +from typing import TypeVar + +from horde_sdk.ai_horde_api.apimodels.alchemy.async_ import ( AlchemyAsyncRequest, AlchemyAsyncRequestFormItem, AlchemyAsyncResponse, ) -from horde_sdk.ai_horde_api.apimodels.alchemy._pop import ( +from horde_sdk.ai_horde_api.apimodels.alchemy.pop import ( AlchemyFormPayloadStable, + AlchemyJobPopResponse, AlchemyPopFormPayload, AlchemyPopRequest, - AlchemyPopResponse, NoValidAlchemyFound, ) -from horde_sdk.ai_horde_api.apimodels.alchemy._status import ( +from horde_sdk.ai_horde_api.apimodels.alchemy.status import ( AlchemyCaptionResult, AlchemyDeleteRequest, AlchemyFormStatus, @@ -88,8 +26,10 @@ AlchemyStatusResponse, AlchemyUpscaleResult, ) -from horde_sdk.ai_horde_api.apimodels.alchemy._submit import AlchemyJobSubmitRequest, AlchemyJobSubmitResponse +from horde_sdk.ai_horde_api.apimodels.alchemy.submit import AlchemyJobSubmitRequest, AlchemyJobSubmitResponse from horde_sdk.ai_horde_api.apimodels.base import ( + ActiveModel, + ActiveModelLite, ExtraSourceImageEntry, ExtraTextEntry, GenMetadataEntry, @@ -98,62 +38,245 @@ JobResponseMixin, JobSubmitResponse, LorasPayloadEntry, + MessageSpecifiesSharedKeyMixin, SingleWarningEntry, TIPayloadEntry, WorkerRequestMixin, WorkerRequestNameMixin, ) -from horde_sdk.ai_horde_api.apimodels.generate._async import ( +from horde_sdk.ai_horde_api.apimodels.collections import ( + AllCollectionsRequest, + AllCollectionsResponse, + CollectionByIDRequest, + CollectionByNameRequest, + CreateCollectionRequest, + CreateCollectionResponse, + DeleteCollectionRequest, + DeleteCollectionResponse, + ResponseModelCollection, + ResponseModelStylesShort, + UpdateCollectionRequest, + UpdateCollectionResponse, + _InputModelCollectionMixin, +) +from horde_sdk.ai_horde_api.apimodels.documents import ( + AIHordeDocumentRequestMixin, + AIHordeGetPrivacyPolicyRequest, + AIHordeGetSponsorsRequest, + AIHordeGetTermsRequest, + DocumentFormat, + HordeDocument, +) +from horde_sdk.ai_horde_api.apimodels.filters import ( + DeleteFilterRequest, + DeleteFilterResponse, + FilterDetails, + FilterPromptSuspicionRequest, + FilterPromptSuspicionResponse, + FilterRegex, + FilterRegexRequest, + FilterRegexResponse, + FiltersListRequest, + FiltersListResponse, + PatchExistingFilter, + PutNewFilterRequest, + SingleFilterRequest, +) +from horde_sdk.ai_horde_api.apimodels.find_user import ( + FindUserRequest, +) +from horde_sdk.ai_horde_api.apimodels.generate.async_ import ( ImageGenerateAsyncDryRunResponse, ImageGenerateAsyncRequest, ImageGenerateAsyncResponse, ImageGenerationInputPayload, ) -from horde_sdk.ai_horde_api.apimodels.generate._check import ImageGenerateCheckRequest, ImageGenerateCheckResponse -from horde_sdk.ai_horde_api.apimodels.generate._pop import ( +from horde_sdk.ai_horde_api.apimodels.generate.check import ImageGenerateCheckRequest, ImageGenerateCheckResponse +from horde_sdk.ai_horde_api.apimodels.generate.pop import ( ImageGenerateJobPopPayload, ImageGenerateJobPopRequest, ImageGenerateJobPopResponse, ImageGenerateJobPopSkippedStatus, + NoValidRequestFound, PopInput, + PopResponseModelMessage, ) -from horde_sdk.ai_horde_api.apimodels.generate._progress import ( +from horde_sdk.ai_horde_api.apimodels.generate.progress import ( ResponseGenerationProgressCombinedMixin, ResponseGenerationProgressInfoMixin, ) -from horde_sdk.ai_horde_api.apimodels.generate._status import ( +from horde_sdk.ai_horde_api.apimodels.generate.rate import ( + AestheticRating, + AestheticsPayload, + RateRequest, + RateResponse, +) +from horde_sdk.ai_horde_api.apimodels.generate.status import ( DeleteImageGenerateRequest, + Generation, ImageGenerateStatusRequest, ImageGenerateStatusResponse, ImageGeneration, ) -from horde_sdk.ai_horde_api.apimodels.generate._submit import ( +from horde_sdk.ai_horde_api.apimodels.generate.submit import ( ImageGenerationJobSubmitRequest, ) -from horde_sdk.ai_horde_api.apimodels.generate.text._async import ( +from horde_sdk.ai_horde_api.apimodels.generate.text.async_ import ( ModelGenerationInputKobold, ModelPayloadRootKobold, TextGenerateAsyncDryRunResponse, TextGenerateAsyncRequest, TextGenerateAsyncResponse, + _BasePayloadKoboldMixin, ) -from horde_sdk.ai_horde_api.apimodels.generate.text._pop import ( +from horde_sdk.ai_horde_api.apimodels.generate.text.pop import ( ModelPayloadKobold, NoValidRequestFoundKobold, TextGenerateJobPopRequest, TextGenerateJobPopResponse, _PopInputKobold, ) -from horde_sdk.ai_horde_api.apimodels.generate.text._status import ( +from horde_sdk.ai_horde_api.apimodels.generate.text.status import ( DeleteTextGenerateRequest, GenerationKobold, TextGenerateStatusRequest, TextGenerateStatusResponse, ) -from horde_sdk.ai_horde_api.apimodels.generate.text._submit import ( +from horde_sdk.ai_horde_api.apimodels.generate.text.submit import ( TextGenerationJobSubmitRequest, ) -from horde_sdk.ai_horde_api.apimodels.workers._workers import ( +from horde_sdk.ai_horde_api.apimodels.kudos import ( + KudosAwardRequest, + KudosAwardResponse, + KudosTransferRequest, + KudosTransferResponse, +) +from horde_sdk.ai_horde_api.apimodels.operations import ( + AllIPTimeoutsRequest, + BlockIPAddressRequest, + BlockIPAddressResponse, + BlockWorkerIPAddressRequest, + BlockWorkerIPAddressResponse, + DeleteIPAddressRequest, + DeleteIPAddressResponse, + DeleteWorkerIPAddressRequest, + DeleteWorkerIPAddressResponse, + IPTimeout, + IPTimeoutListResponse, + SingleIPTimeoutsRequest, +) +from horde_sdk.ai_horde_api.apimodels.sharedkeys import ( + ExpiryStrSharedKeyDetailsResponse, + SharedKeyCreateRequest, + SharedKeyDeleteRequest, + SharedKeyDeleteResponse, + SharedKeyDetailsRequest, + SharedKeyDetailsResponse, + SharedKeyModifyRequest, + SharedKeySettings, +) +from horde_sdk.ai_horde_api.apimodels.stats import ( + ImageStatsModelsRequest, + ImageStatsModelsResponse, + ImageStatsModelsTotalRequest, + ImageStatsModelsTotalResponse, + SinglePeriodImgStat, + SinglePeriodTxtStat, + StatsModelsTimeframe, + TextStatsModelResponse, + TextStatsModelsRequest, + TextStatsModelsTotalRequest, + TextStatsModelsTotalResponse, +) +from horde_sdk.ai_horde_api.apimodels.status import ( + AIHordeHeartbeatRequest, + AIHordeHeartbeatResponse, + HordeModes, + HordePerformanceRequest, + HordePerformanceResponse, + HordeStatusModelsAllRequest, + HordeStatusModelsAllResponse, + HordeStatusModelsSingleRequest, + HordeStatusModelsSingleResponse, + Newspiece, + NewsRequest, + NewsResponse, +) +from horde_sdk.ai_horde_api.apimodels.styles import ( + AllStylesImageRequest, + AllStylesImageResponse, + AllStylesTextRequest, + AllStylesTextResponse, + CreateStyleImageRequest, + CreateStyleTextRequest, + DeleteStyleImageRequest, + DeleteStyleImageResponse, + DeleteStyleTextRequest, + DeleteStyleTextResponse, + ModelStyleInputParamsKobold, + ModelStyleInputParamsStable, + ModifyStyleImageRequest, + ModifyStyleImageResponse, + ModifyStyleTextRequest, + ModifyStyleTextResponse, + ResponseModelStylesUser, + SingleStyleImageByIDRequest, + SingleStyleImageByNameRequest, + SingleStyleTextByIDRequest, + SingleStyleTextByNameRequest, + StyleExample, + StyleImageExampleAddRequest, + StyleImageExampleDeleteRequest, + StyleImageExampleDeleteResponse, + StyleImageExampleModifyRequest, + StyleImageExampleModifyResponse, + StyleKobold, + StyleStable, + StyleType, +) +from horde_sdk.ai_horde_api.apimodels.teams import ( + AllTeamDetailsRequest, + AllTeamDetailsResponse, + CreateTeamRequest, + DeleteTeamRequest, + DeleteTeamResponse, + ModifyTeam, + ModifyTeamInput, + ModifyTeamRequest, + SingleTeamDetailsRequest, + TeamDetails, +) +from horde_sdk.ai_horde_api.apimodels.users import ( + ActiveGenerations, + ContributionsDetails, + DeleteUserRequest, + DeleteUserResponse, + ListUsersDetailsRequest, + ListUsersDetailsResponse, + ModifyUser, + ModifyUserReply, + ModifyUserRequest, + ModifyUserResponse, + MonthlyKudos, + SingleUserDetailsRequest, + UsageDetails, + UserAmountRecords, + UserDetailsResponse, + UserKudosDetails, + UserRecords, + UserThingRecords, + _ModifyUserBase, +) +from horde_sdk.ai_horde_api.apimodels.workers.messages import ( + AllWorkerMessagesRequest, + CreateWorkerMessageRequest, + DeleteWorkerMessageRequest, + DeleteWorkerMessageResponse, + ResponseModelMessage, + ResponseModelMessages, + SingleWorkerMessageRequest, +) +from horde_sdk.ai_horde_api.apimodels.workers.workers import ( AllWorkersDetailsRequest, AllWorkersDetailsResponse, DeleteWorkerRequest, @@ -165,155 +288,270 @@ SingleWorkerNameDetailsRequest, TeamDetailsLite, WorkerDetailItem, + WorkerDetailLite, WorkerKudosDetails, ) -from horde_sdk.ai_horde_api.consts import KNOWN_ALCHEMY_TYPES +from horde_sdk.generation_parameters.alchemy.consts import KNOWN_ALCHEMY_TYPES from horde_sdk.generic_api.apimodels import ( APIKeyAllowedInRequestMixin, ContainsMessageResponseMixin, - RequestSpecifiesUserIDMixin, + ContainsWarningsResponseMixin, + MessageSpecifiesUserIDMixin, RequestUsesWorkerMixin, ResponseRequiringDownloadMixin, ResponseRequiringFollowUpMixin, ResponseWithProgressMixin, ) +JobPopResponseTypeVar = TypeVar( + "JobPopResponseTypeVar", + bound=ImageGenerateJobPopResponse | TextGenerateJobPopResponse | AlchemyJobPopResponse, +) + __all__ = [ + "KNOWN_ALCHEMY_TYPES", "AIHordeDocumentRequestMixin", "AIHordeGetPrivacyPolicyRequest", "AIHordeGetSponsorsRequest", "AIHordeGetTermsRequest", - "DocumentFormat", - "HordeDocument", + "AIHordeHeartbeatRequest", + "AIHordeHeartbeatResponse", + "APIKeyAllowedInRequestMixin", "ActiveGenerations", - "ContributionsDetails", - "FindUserRequest", - "KudosTransferRequest", - "KudosTransferResponse", - "UserDetailsResponse", - "MonthlyKudos", - "UsageDetails", - "UserAmountRecords", - "UserKudosDetails", - "UserRecords", - "UserThingRecords", - "_ModifyUserBase", - "ImageStatsModelsRequest", - "ImageStatsModelsTotalRequest", - "ImageStatsModelsTotalResponse", - "HordeModes", - "HordePerformanceRequest", - "HordePerformanceResponse", - "HordeStatusModelsAllRequest", - "HordeStatusModelsAllResponse", - "HordeStatusModelsSingleRequest", - "HordeStatusModelsSingleResponse", - "Newspiece", - "NewsRequest", - "NewsResponse", - "ResponseModelStylesUser", - "ListUsersDetailsRequest", - "ListUsersDetailsResponse", - "ModifyUser", - "ModifyUserReply", - "SingleUserDetailsRequest", - "ModifyUserRequest", - "ModifyUserResponse", "ActiveModel", "ActiveModelLite", - "SinglePeriodImgStat", - "SinglePeriodTxtStat", - "ImageStatsModelsResponse", - "StatsModelsTimeframe", - "TextStatsModelResponse", - "TextStatsModelsRequest", - "TextStatsModelsTotalRequest", - "TextStatsModelsTotalResponse", - "AIHordeHeartbeatRequest", - "AIHordeHeartbeatResponse", - "KNOWN_ALCHEMY_TYPES", + "AestheticRating", + "AestheticsPayload", "AlchemyAsyncRequest", "AlchemyAsyncRequestFormItem", "AlchemyAsyncResponse", - "AlchemyFormPayloadStable", - "AlchemyPopFormPayload", - "AlchemyPopRequest", - "AlchemyPopResponse", - "NoValidAlchemyFound", "AlchemyCaptionResult", "AlchemyDeleteRequest", + "AlchemyFormPayloadStable", "AlchemyFormStatus", "AlchemyInterrogationDetails", "AlchemyInterrogationResult", "AlchemyInterrogationResultItem", + "AlchemyJobPopResponse", + "AlchemyJobSubmitRequest", + "AlchemyJobSubmitResponse", "AlchemyNSFWResult", + "AlchemyPopFormPayload", + "AlchemyPopRequest", "AlchemyStatusRequest", "AlchemyStatusResponse", "AlchemyUpscaleResult", - "AlchemyJobSubmitRequest", - "AlchemyJobSubmitResponse", + "AllCollectionsRequest", + "AllCollectionsResponse", + "AllIPTimeoutsRequest", + "AllStylesImageRequest", + "AllStylesImageResponse", + "AllStylesTextRequest", + "AllStylesTextResponse", + "AllTeamDetailsRequest", + "AllTeamDetailsResponse", + "AllWorkerMessagesRequest", + "AllWorkersDetailsRequest", + "AllWorkersDetailsResponse", + "BlockIPAddressRequest", + "BlockIPAddressResponse", + "BlockWorkerIPAddressRequest", + "BlockWorkerIPAddressResponse", + "CollectionByIDRequest", + "CollectionByNameRequest", + "ContainsMessageResponseMixin", + "ContainsWarningsResponseMixin", + "ContributionsDetails", + "CreateCollectionRequest", + "CreateCollectionResponse", + "CreateStyleImageRequest", + "CreateStyleTextRequest", + "CreateTeamRequest", + "CreateWorkerMessageRequest", + "DeleteCollectionRequest", + "DeleteCollectionResponse", + "DeleteFilterRequest", + "DeleteFilterResponse", + "DeleteIPAddressRequest", + "DeleteIPAddressResponse", + "DeleteImageGenerateRequest", + "DeleteStyleImageRequest", + "DeleteStyleImageResponse", + "DeleteStyleTextRequest", + "DeleteStyleTextResponse", + "DeleteTeamRequest", + "DeleteTeamResponse", + "DeleteTextGenerateRequest", + "DeleteUserRequest", + "DeleteUserResponse", + "DeleteWorkerIPAddressRequest", + "DeleteWorkerIPAddressResponse", + "DeleteWorkerMessageRequest", + "DeleteWorkerMessageResponse", + "DeleteWorkerRequest", + "DeleteWorkerResponse", + "DocumentFormat", + "ExpiryStrSharedKeyDetailsResponse", "ExtraSourceImageEntry", "ExtraTextEntry", + "FilterDetails", + "FilterPromptSuspicionRequest", + "FilterPromptSuspicionResponse", + "FilterRegex", + "FilterRegexRequest", + "FilterRegexResponse", + "FiltersListRequest", + "FiltersListResponse", + "FindUserRequest", "GenMetadataEntry", - "ImageGenerateParamMixin", - "JobRequestMixin", - "JobResponseMixin", - "LorasPayloadEntry", - "SingleWarningEntry", - "TIPayloadEntry", - "WorkerRequestMixin", - "WorkerRequestNameMixin", + "Generation", + "GenerationKobold", + "HordeDocument", + "HordeModes", + "HordePerformanceRequest", + "HordePerformanceResponse", + "HordeStatusModelsAllRequest", + "HordeStatusModelsAllResponse", + "HordeStatusModelsSingleRequest", + "HordeStatusModelsSingleResponse", + "IPTimeout", + "IPTimeoutListResponse", "ImageGenerateAsyncDryRunResponse", "ImageGenerateAsyncRequest", "ImageGenerateAsyncResponse", - "ImageGenerationInputPayload", "ImageGenerateCheckRequest", "ImageGenerateCheckResponse", "ImageGenerateJobPopPayload", "ImageGenerateJobPopRequest", "ImageGenerateJobPopResponse", "ImageGenerateJobPopSkippedStatus", - "PopInput", - "ResponseGenerationProgressCombinedMixin", - "ResponseGenerationProgressInfoMixin", - "DeleteImageGenerateRequest", + "ImageGenerateParamMixin", "ImageGenerateStatusRequest", "ImageGenerateStatusResponse", "ImageGeneration", + "ImageGenerationInputPayload", "ImageGenerationJobSubmitRequest", + "ImageStatsModelsRequest", + "ImageStatsModelsResponse", + "ImageStatsModelsTotalRequest", + "ImageStatsModelsTotalResponse", + "JobPopResponseTypeVar", + "JobRequestMixin", + "JobResponseMixin", "JobSubmitResponse", + "KudosAwardRequest", + "KudosAwardResponse", + "KudosTransferRequest", + "KudosTransferResponse", + "ListUsersDetailsRequest", + "ListUsersDetailsResponse", + "LorasPayloadEntry", + "MessageSpecifiesSharedKeyMixin", + "MessageSpecifiesUserIDMixin", "ModelGenerationInputKobold", + "ModelPayloadKobold", "ModelPayloadRootKobold", + "ModelStyleInputParamsKobold", + "ModelStyleInputParamsStable", + "ModifyStyleImageRequest", + "ModifyStyleImageResponse", + "ModifyStyleTextRequest", + "ModifyStyleTextResponse", + "ModifyTeam", + "ModifyTeamInput", + "ModifyTeamRequest", + "ModifyUser", + "ModifyUserReply", + "ModifyUserRequest", + "ModifyUserResponse", + "ModifyWorkerRequest", + "ModifyWorkerResponse", + "MonthlyKudos", + "NewsRequest", + "NewsResponse", + "Newspiece", + "NoValidAlchemyFound", + "NoValidRequestFound", + "NoValidRequestFoundKobold", + "PatchExistingFilter", + "PopInput", + "PopResponseModelMessage", + "PutNewFilterRequest", + "RateRequest", + "RateResponse", + "RequestUsesWorkerMixin", + "ResponseGenerationProgressCombinedMixin", + "ResponseGenerationProgressInfoMixin", + "ResponseModelCollection", + "ResponseModelMessage", + "ResponseModelMessages", + "ResponseModelStylesShort", + "ResponseModelStylesUser", + "ResponseRequiringDownloadMixin", + "ResponseRequiringFollowUpMixin", + "ResponseWithProgressMixin", + "SharedKeyCreateRequest", + "SharedKeyDeleteRequest", + "SharedKeyDeleteResponse", + "SharedKeyDetailsRequest", + "SharedKeyDetailsResponse", + "SharedKeyModifyRequest", + "SharedKeySettings", + "SingleFilterRequest", + "SingleIPTimeoutsRequest", + "SinglePeriodImgStat", + "SinglePeriodTxtStat", + "SingleStyleImageByIDRequest", + "SingleStyleImageByNameRequest", + "SingleStyleTextByIDRequest", + "SingleStyleTextByNameRequest", + "SingleTeamDetailsRequest", + "SingleUserDetailsRequest", + "SingleWarningEntry", + "SingleWorkerDetailsRequest", + "SingleWorkerDetailsResponse", + "SingleWorkerMessageRequest", + "SingleWorkerNameDetailsRequest", + "StatsModelsTimeframe", + "StyleExample", + "StyleImageExampleAddRequest", + "StyleImageExampleDeleteRequest", + "StyleImageExampleDeleteResponse", + "StyleImageExampleModifyRequest", + "StyleImageExampleModifyResponse", + "StyleKobold", + "StyleStable", + "StyleType", + "TIPayloadEntry", + "TeamDetails", + "TeamDetailsLite", + "TextGenerateAsyncDryRunResponse", "TextGenerateAsyncRequest", "TextGenerateAsyncResponse", - "ModelPayloadKobold", - "NoValidRequestFoundKobold", "TextGenerateJobPopRequest", "TextGenerateJobPopResponse", - "_PopInputKobold", - "TextGenerateAsyncDryRunResponse", - "DeleteTextGenerateRequest", - "GenerationKobold", "TextGenerateStatusRequest", "TextGenerateStatusResponse", "TextGenerationJobSubmitRequest", - "AllWorkersDetailsRequest", - "AllWorkersDetailsResponse", - "DeleteWorkerRequest", - "DeleteWorkerResponse", - "ModifyWorkerResponse", - "ModifyWorkerRequest", - "SingleWorkerNameDetailsRequest", - "SingleWorkerDetailsRequest", - "SingleWorkerDetailsResponse", - "TeamDetailsLite", + "TextStatsModelResponse", + "TextStatsModelsRequest", + "TextStatsModelsTotalRequest", + "TextStatsModelsTotalResponse", + "UpdateCollectionRequest", + "UpdateCollectionResponse", + "UsageDetails", + "UserAmountRecords", + "UserDetailsResponse", + "UserKudosDetails", + "UserRecords", + "UserThingRecords", "WorkerDetailItem", + "WorkerDetailLite", "WorkerKudosDetails", - "APIKeyAllowedInRequestMixin", - "ContainsMessageResponseMixin", - "RequestSpecifiesUserIDMixin", - "RequestUsesWorkerMixin", - "ResponseRequiringDownloadMixin", - "ResponseRequiringFollowUpMixin", - "ResponseWithProgressMixin", + "WorkerRequestMixin", + "WorkerRequestNameMixin", + "_BasePayloadKoboldMixin", + "_InputModelCollectionMixin", + "_ModifyUserBase", + "_PopInputKobold", ] diff --git a/horde_sdk/ai_horde_api/apimodels/_kudos.py b/horde_sdk/ai_horde_api/apimodels/_kudos.py deleted file mode 100644 index 68fea6a1..00000000 --- a/horde_sdk/ai_horde_api/apimodels/_kudos.py +++ /dev/null @@ -1,51 +0,0 @@ -from typing_extensions import override - -from horde_sdk.ai_horde_api.apimodels.base import BaseAIHordeRequest -from horde_sdk.ai_horde_api.endpoints import AI_HORDE_API_ENDPOINT_SUBPATH -from horde_sdk.consts import _ANONYMOUS_MODEL, HTTPMethod -from horde_sdk.generic_api.apimodels import APIKeyAllowedInRequestMixin, HordeResponseBaseModel - - -class KudosTransferResponse(HordeResponseBaseModel): - transferred: float | None = None - """The amount of Kudos transferred.""" - - @override - @classmethod - def get_api_model_name(cls) -> str: - return "KudosTransferred" - - -class KudosTransferRequest( - BaseAIHordeRequest, - APIKeyAllowedInRequestMixin, -): - username: str - """The username of the user to transfer Kudos to.""" - amount: float - """The amount of Kudos to transfer.""" - - @override - @classmethod - def get_api_model_name(cls) -> str: - return _ANONYMOUS_MODEL - - @override - @classmethod - def get_http_method(cls) -> HTTPMethod: - return HTTPMethod.POST - - @override - @classmethod - def get_api_endpoint_subpath(cls) -> AI_HORDE_API_ENDPOINT_SUBPATH: - return AI_HORDE_API_ENDPOINT_SUBPATH.v2_kudos_transfer - - @override - @classmethod - def get_default_success_response_type(cls) -> type[KudosTransferResponse]: - return KudosTransferResponse - - @override - @classmethod - def is_api_key_required(cls) -> bool: - return True diff --git a/horde_sdk/ai_horde_api/apimodels/_styles.py b/horde_sdk/ai_horde_api/apimodels/_styles.py deleted file mode 100644 index ae79b4af..00000000 --- a/horde_sdk/ai_horde_api/apimodels/_styles.py +++ /dev/null @@ -1,22 +0,0 @@ -from enum import auto - -from pydantic import Field -from strenum import StrEnum - -from horde_sdk.generic_api.apimodels import HordeAPIData - - -class StyleType(StrEnum): - """An enum representing the different types of styles.""" - - image = auto() - text = auto() - - -class ResponseModelStylesUser(HordeAPIData): - name: str - """The name of the style.""" - id_: str = Field(alias="id") - """The ID of the style.""" - type_: StyleType = Field(alias="type") - """The type of the style.""" diff --git a/horde_sdk/ai_horde_api/apimodels/alchemy/_async.py b/horde_sdk/ai_horde_api/apimodels/alchemy/async_.py similarity index 71% rename from horde_sdk/ai_horde_api/apimodels/alchemy/_async.py rename to horde_sdk/ai_horde_api/apimodels/alchemy/async_.py index eb98b75d..f95b650e 100644 --- a/horde_sdk/ai_horde_api/apimodels/alchemy/_async.py +++ b/horde_sdk/ai_horde_api/apimodels/alchemy/async_.py @@ -1,19 +1,19 @@ import base64 import urllib.parse +from typing import Any, override from loguru import logger from pydantic import field_validator -from typing_extensions import override -from horde_sdk.ai_horde_api.apimodels.alchemy._status import AlchemyDeleteRequest, AlchemyStatusRequest +from horde_sdk.ai_horde_api.apimodels.alchemy.status import AlchemyDeleteRequest, AlchemyStatusRequest from horde_sdk.ai_horde_api.apimodels.base import ( BaseAIHordeRequest, JobResponseMixin, ) -from horde_sdk.ai_horde_api.consts import KNOWN_ALCHEMY_TYPES from horde_sdk.ai_horde_api.endpoints import AI_HORDE_API_ENDPOINT_SUBPATH from horde_sdk.ai_horde_api.metadata import AIHordePathData from horde_sdk.consts import HTTPMethod, HTTPStatusCode +from horde_sdk.generation_parameters.alchemy.consts import KNOWN_ALCHEMY_TYPES from horde_sdk.generic_api.apimodels import ( APIKeyAllowedInRequestMixin, ContainsMessageResponseMixin, @@ -22,6 +22,7 @@ HordeResponseTypes, ResponseRequiringFollowUpMixin, ) +from horde_sdk.generic_api.decoration import Unequatable, Unhashable class AlchemyAsyncResponse( @@ -30,7 +31,7 @@ class AlchemyAsyncResponse( ResponseRequiringFollowUpMixin, ContainsMessageResponseMixin, ): - """Represents the data returned from the `/v2/alchemy/async` endpoint. + """Represents the data returned from the /v2/interrogate/async endpoint with http status code 202. v2 API Model: `RequestInterrogationResponse` """ @@ -65,11 +66,21 @@ def get_follow_up_failure_cleanup_request_type(cls) -> type[AlchemyDeleteRequest class AlchemyAsyncRequestFormItem(HordeAPIData): + """Represents a single form of alchemy to request. + + See :class:`horde_sdk.generation_parameters.alchemy.consts.KNOWN_ALCHEMY_TYPES` for the list of + known alchemy types. + """ + name: KNOWN_ALCHEMY_TYPES | str """The name of the form to request.""" + payload: Any | None = None + """Not currently supported.""" + @field_validator("name") - def check_name(cls, v: KNOWN_ALCHEMY_TYPES | str) -> KNOWN_ALCHEMY_TYPES | str: + def validate_name(cls, v: KNOWN_ALCHEMY_TYPES | str) -> KNOWN_ALCHEMY_TYPES | str: + """Validate the name of the form to request.""" if isinstance(v, KNOWN_ALCHEMY_TYPES): return v if str(v) not in KNOWN_ALCHEMY_TYPES.__members__: @@ -77,10 +88,24 @@ def check_name(cls, v: KNOWN_ALCHEMY_TYPES | str) -> KNOWN_ALCHEMY_TYPES | str: return v +@Unhashable +@Unequatable class AlchemyAsyncRequest( BaseAIHordeRequest, APIKeyAllowedInRequestMixin, ): + """Data required to request an alchemy job. Alchemy includes upscaling, captioning, etc. + + Multiple forms of alchemy can be requested at once. + + See :class:`horde_sdk.generation_parameters.alchemy.consts.KNOWN_ALCHEMY_TYPES` for the list of + known alchemy types. + + Represents a POST request to the /v2/interrogate/async endpoint. + + v2 API Model: `ModelInterrogationInputStable` + """ + forms: list[AlchemyAsyncRequestFormItem] """The list of forms (types of post-processing/interrogation/captioning/etc) to request.""" source_image: str @@ -89,15 +114,20 @@ class AlchemyAsyncRequest( """Whether to use the slower workers. Costs additional kudos if `False`.""" extra_slow_workers: bool = False """Whether to use the super slow workers.""" + webhook: str | None = None + """Provide a URL where the AI Horde will send a POST call after each delivered generation. The request will + include the details of the job as well as the request ID.""" @field_validator("forms") - def check_at_least_one_form(cls, v: list[AlchemyAsyncRequestFormItem]) -> list[AlchemyAsyncRequestFormItem]: + def validate_at_least_one_form(cls, v: list[AlchemyAsyncRequestFormItem]) -> list[AlchemyAsyncRequestFormItem]: + """Ensure at least one form is provided.""" if not v: raise ValueError("At least one form must be provided.") return v @field_validator("source_image") - def check_source_image(cls, v: str) -> str: + def validate_source_image(cls, v: str) -> str: + """Validate the source image URL or base64 string.""" if "http" in v: parsed_url = urllib.parse.urlparse(v) if parsed_url.scheme not in ["http", "https"]: diff --git a/horde_sdk/ai_horde_api/apimodels/alchemy/_pop.py b/horde_sdk/ai_horde_api/apimodels/alchemy/pop.py similarity index 71% rename from horde_sdk/ai_horde_api/apimodels/alchemy/_pop.py rename to horde_sdk/ai_horde_api/apimodels/alchemy/pop.py index 28adacf1..e854c2bf 100644 --- a/horde_sdk/ai_horde_api/apimodels/alchemy/_pop.py +++ b/horde_sdk/ai_horde_api/apimodels/alchemy/pop.py @@ -1,23 +1,26 @@ from __future__ import annotations +from typing import override + from loguru import logger -from pydantic import Field, field_validator, model_validator -from typing_extensions import override +from pydantic import Field, field_validator -from horde_sdk.ai_horde_api.apimodels.alchemy._submit import AlchemyJobSubmitRequest +from horde_sdk.ai_horde_api.apimodels.alchemy.submit import AlchemyJobSubmitRequest from horde_sdk.ai_horde_api.apimodels.base import ( BaseAIHordeRequest, JobRequestMixin, ) -from horde_sdk.ai_horde_api.consts import KNOWN_ALCHEMY_TYPES from horde_sdk.ai_horde_api.endpoints import AI_HORDE_API_ENDPOINT_SUBPATH +from horde_sdk.ai_horde_api.fields import GenerationID from horde_sdk.consts import HTTPMethod +from horde_sdk.generation_parameters.alchemy.consts import KNOWN_ALCHEMY_TYPES from horde_sdk.generic_api.apimodels import ( APIKeyAllowedInRequestMixin, HordeAPIObjectBaseModel, HordeResponseBaseModel, ResponseRequiringFollowUpMixin, ) +from horde_sdk.generic_api.decoration import Unequatable, Unhashable # FIXME @@ -47,7 +50,10 @@ def get_api_model_name(cls) -> str | None: class AlchemyPopFormPayload(HordeAPIObjectBaseModel, JobRequestMixin): - """v2 API Model: `InterrogationPopFormPayload`.""" + """Contains the data for a single alchemy generation form for workers (pop). + + v2 API Model: `InterrogationPopFormPayload` + """ @override @classmethod @@ -61,6 +67,7 @@ def get_api_model_name(cls) -> str | None: @field_validator("form", mode="before") def validate_form(cls, v: str | KNOWN_ALCHEMY_TYPES) -> KNOWN_ALCHEMY_TYPES | str: + """Ensure that the form is a known alchemy type.""" if isinstance(v, KNOWN_ALCHEMY_TYPES): return v if isinstance(v, str) and v not in KNOWN_ALCHEMY_TYPES.__members__: @@ -80,12 +87,15 @@ def validate_form(cls, v: str | KNOWN_ALCHEMY_TYPES) -> KNOWN_ALCHEMY_TYPES | st class NoValidAlchemyFound(HordeAPIObjectBaseModel): - """v2 API Model: `NoValidInterrogationsFoundStable`.""" + """The number of jobs this worker was not eligible for, and why. + + v2 API Model: `NoValidInterrogationsFound` + """ @override @classmethod def get_api_model_name(cls) -> str | None: - return "NoValidInterrogationsFoundStable" + return "NoValidInterrogationsFound" bridge_version: int | None = Field( default=None, @@ -126,8 +136,15 @@ def __hash__(self) -> int: return hash((self.bridge_version, self.untrusted, self.worker_id)) -class AlchemyPopResponse(HordeResponseBaseModel, ResponseRequiringFollowUpMixin): - """v2 API Model: `InterrogationPopPayload`.""" +class AlchemyJobPopResponse(HordeResponseBaseModel, ResponseRequiringFollowUpMixin): + """Contains job data for workers, if any were available. Also contains data for jobs this worker was skipped for. + + This is the key response type for all alchemy workers as it contains all assignment data for the worker. + + Represents the data returned from the /v2/interrogate/pop endpoint with http status code 200. + + v2 API Model: `InterrogationPopPayload` + """ # and not actually specifying a schema forms: list[AlchemyPopFormPayload] | None = None @@ -167,14 +184,6 @@ def get_follow_up_returned_params(self, *, as_python_field_name: bool = False) - return all_ids - @model_validator(mode="after") - def coerce_list_order(self) -> AlchemyPopResponse: - if self.forms is not None: - logger.debug("Sorting forms by id") - self.forms.sort(key=lambda form: form.id_) - - return self - @override @classmethod def get_follow_up_request_types(cls) -> list[type[AlchemyJobSubmitRequest]]: # type: ignore[override] @@ -182,7 +191,7 @@ def get_follow_up_request_types(cls) -> list[type[AlchemyJobSubmitRequest]]: # return [AlchemyJobSubmitRequest] def __eq__(self, other: object) -> bool: - if not isinstance(other, AlchemyPopResponse): + if not isinstance(other, AlchemyJobPopResponse): return False forms_match = True @@ -200,21 +209,59 @@ def __hash__(self) -> int: if self.forms is None: return hash(self.skipped) - return hash((tuple([form.id_ for form in self.forms]), self.skipped)) + return hash((tuple(sorted([form.id_ for form in self.forms])), self.skipped)) + @property + def ids(self) -> list[GenerationID]: + """Return a list of all the ids in the response.""" + if self.forms is None: + return [] + return [form.id_ for form in self.forms] + + @property + def ids_present(self) -> bool: + """Return whether the response has any ids.""" + return bool(self.ids) + +@Unhashable +@Unequatable class AlchemyPopRequest(BaseAIHordeRequest, APIKeyAllowedInRequestMixin): - """Represents the data needed to make a request to the `/v2/interrogate/pop` endpoint. + """Request additional jobs, if any are available, for an alchemy worker. + + This is the key request type for all alchemy workers as it requests all available jobs for the worker. + + Represents a POST request to the /v2/interrogate/pop endpoint. v2 API Model: `InterrogationPopInput` """ + bridge_agent: str = Field( + default="unknown", + examples=["AI Horde Worker reGen:4.1.0:https://github.com/Haidra-Org/horde-worker-reGen"], + max_length=1000, + ) + """The agent string for the bridge, in the format `name:version:url or contact info`.""" + name: str """The name of the request. This is used to identify the request in the logs.""" priority_usernames: list[str] """The usernames that should be prioritized for this request.""" forms: list[KNOWN_ALCHEMY_TYPES] """The types of alchemy that should be generated.""" + amount: int + """The number of jobs to request.""" + threads: int = Field( + default=1, + ge=1, + ) + """The number of threads to report that this bridge is using to the API.""" + max_tiles: int = Field( + default=16, + ge=1, + le=256, + ) + """The maximum number of 512x512 tiles that this worker can process.""" @override @classmethod @@ -233,5 +280,5 @@ def get_api_endpoint_subpath(cls) -> AI_HORDE_API_ENDPOINT_SUBPATH: @override @classmethod - def get_default_success_response_type(cls) -> type[AlchemyPopResponse]: - return AlchemyPopResponse + def get_default_success_response_type(cls) -> type[AlchemyJobPopResponse]: + return AlchemyJobPopResponse diff --git a/horde_sdk/ai_horde_api/apimodels/alchemy/_status.py b/horde_sdk/ai_horde_api/apimodels/alchemy/status.py similarity index 88% rename from horde_sdk/ai_horde_api/apimodels/alchemy/_status.py rename to horde_sdk/ai_horde_api/apimodels/alchemy/status.py index b927f3ef..8c1feae8 100644 --- a/horde_sdk/ai_horde_api/apimodels/alchemy/_status.py +++ b/horde_sdk/ai_horde_api/apimodels/alchemy/status.py @@ -1,11 +1,13 @@ +from typing import override + from loguru import logger from pydantic import field_validator -from typing_extensions import override from horde_sdk.ai_horde_api.apimodels.base import BaseAIHordeRequest, JobRequestMixin -from horde_sdk.ai_horde_api.consts import GENERATION_STATE, KNOWN_ALCHEMY_TYPES, KNOWN_UPSCALERS +from horde_sdk.ai_horde_api.consts import GENERATION_STATE from horde_sdk.ai_horde_api.endpoints import AI_HORDE_API_ENDPOINT_SUBPATH from horde_sdk.consts import HTTPMethod +from horde_sdk.generation_parameters.alchemy.consts import KNOWN_ALCHEMY_TYPES, KNOWN_UPSCALERS from horde_sdk.generic_api.apimodels import ( APIKeyAllowedInRequestMixin, HordeAPIData, @@ -86,6 +88,7 @@ class AlchemyFormStatus(HordeAPIData): @field_validator("form", mode="before") def validate_form(cls, v: str | KNOWN_ALCHEMY_TYPES) -> KNOWN_ALCHEMY_TYPES | str: + """Ensure that the form is a known alchemy type.""" if isinstance(v, KNOWN_ALCHEMY_TYPES): return v if str(v) not in KNOWN_ALCHEMY_TYPES.__members__: @@ -102,6 +105,7 @@ def validate_result( cls, v: dict[str, object], ) -> dict[str, object] | None: + """Ensure that the result is valid and convert it to the correct type, if possible.""" if "additionalProp1" in v: logger.debug("Found additionalProp1 in result, this is a dummy result. Ignoring.") return None @@ -116,9 +120,11 @@ def validate_result( class AlchemyStatusResponse(HordeResponseBaseModel, ResponseWithProgressMixin): - """The response from the `/v2/interrogate/status/{id}` endpoint. + """Contains the status of an alchemy job and any completed work (if any). - You will find the results of the alchemy here. + Represents the data returned from the following endpoints and http status codes: + - /v2/interrogate/status/{id} | AlchemyStatusRequest [GET] -> 200 + - /v2/interrogate/status/{id} | AlchemyDeleteRequest [DELETE] -> 200 v2 API Model: `InterrogationStatus` """ @@ -150,6 +156,7 @@ def all_upscale_results(self) -> list[AlchemyUpscaleResult]: """Return all completed upscale results.""" return [form.result for form in self.forms if form.done and isinstance(form.result, AlchemyUpscaleResult)] + @override @classmethod def get_api_model_name(cls) -> str | None: return "InterrogationStatus" @@ -193,7 +200,10 @@ class AlchemyStatusRequest( JobRequestMixin, APIKeyAllowedInRequestMixin, ): - """Represents the data needed to make a request to the `/v2/interrogate/status/{id}` endpoint.""" + """Poll for the status of an interrogation job, and retrieve any completed work. + + Represents a GET request to the /v2/interrogate/status/{id} endpoint. + """ @override @classmethod @@ -220,7 +230,10 @@ class AlchemyDeleteRequest( BaseAIHordeRequest, JobRequestMixin, ): - """Represents the data needed to make a request to the `/v2/interrogate/status/{id}` endpoint.""" + """Cancel an in-progress interrogation job. + + Represents a DELETE request to the /v2/interrogate/status/{id} endpoint. + """ @override @classmethod diff --git a/horde_sdk/ai_horde_api/apimodels/alchemy/_submit.py b/horde_sdk/ai_horde_api/apimodels/alchemy/submit.py similarity index 73% rename from horde_sdk/ai_horde_api/apimodels/alchemy/_submit.py rename to horde_sdk/ai_horde_api/apimodels/alchemy/submit.py index bd4ba0d4..de0f6100 100644 --- a/horde_sdk/ai_horde_api/apimodels/alchemy/_submit.py +++ b/horde_sdk/ai_horde_api/apimodels/alchemy/submit.py @@ -1,4 +1,4 @@ -from typing_extensions import override +from typing import override from horde_sdk.ai_horde_api.apimodels.base import BaseAIHordeRequest, JobRequestMixin from horde_sdk.ai_horde_api.consts import GENERATION_STATE @@ -8,6 +8,14 @@ class AlchemyJobSubmitResponse(HordeResponseBaseModel): + """Indicates that an alchemy job has been submitted successfully and the kudos gained. + + Represents the data returned from the following endpoints and http status codes: + - /v2/interrogate/submit | AlchemyJobSubmitRequest [POST] -> 200 + + v2 API Model: `GenerationSubmitted` + """ + @override @classmethod def get_api_model_name(cls) -> str | None: @@ -18,6 +26,13 @@ def get_api_model_name(cls) -> str | None: class AlchemyJobSubmitRequest(BaseAIHordeRequest, JobRequestMixin, APIKeyAllowedInRequestMixin): + """Request to submit an alchemy job once a worker has completed it. + + Represents a POST request to the /v2/interrogate/submit endpoint. + + v2 API Model: `SubmitInputStable` + """ + result: str # FIXME """The result of the alchemy job.""" state: GENERATION_STATE diff --git a/horde_sdk/ai_horde_api/apimodels/base.py b/horde_sdk/ai_horde_api/apimodels/base.py index 18997a17..5e9a472c 100644 --- a/horde_sdk/ai_horde_api/apimodels/base.py +++ b/horde_sdk/ai_horde_api/apimodels/base.py @@ -5,27 +5,31 @@ import os import random import uuid -from typing import Any +from typing import Any, override from loguru import logger from pydantic import ConfigDict, Field, field_validator, model_validator -from typing_extensions import override from horde_sdk.ai_horde_api.consts import ( - KNOWN_CONTROLNETS, - KNOWN_FACEFIXERS, - KNOWN_MISC_POST_PROCESSORS, - KNOWN_SAMPLERS, - KNOWN_UPSCALERS, - KNOWN_WORKFLOWS, METADATA_TYPE, METADATA_VALUE, + MODEL_TYPE, POST_PROCESSOR_ORDER_TYPE, WarningCode, - _all_valid_post_processors_names_and_values, ) from horde_sdk.ai_horde_api.endpoints import AI_HORDE_BASE_URL -from horde_sdk.ai_horde_api.fields import JobID, WorkerID +from horde_sdk.ai_horde_api.fields import GenerationID, SharedKeyID, WorkerID +from horde_sdk.generation_parameters.alchemy.consts import ( + KNOWN_FACEFIXERS, + KNOWN_MISC_POST_PROCESSORS, + KNOWN_UPSCALERS, + _all_valid_post_processors_names_and_values, +) +from horde_sdk.generation_parameters.image.consts import ( + KNOWN_IMAGE_CONTROLNETS, + KNOWN_IMAGE_SAMPLERS, + KNOWN_IMAGE_WORKFLOWS, +) from horde_sdk.generic_api.apimodels import ( HordeAPIData, HordeAPIObjectBaseModel, @@ -46,15 +50,15 @@ def get_api_url(cls) -> str: class JobRequestMixin(HordeAPIData): """Mix-in class for data relating to any generation jobs.""" - id_: JobID = Field(alias="id") + id_: GenerationID = Field(alias="id") """The UUID for this job. Use this to post the results in the future.""" @field_validator("id_", mode="before") - def validate_id(cls, v: str | JobID) -> JobID | str: + def validate_id(cls, v: str | GenerationID) -> GenerationID | str: """Ensure that the job ID is not empty.""" if isinstance(v, str) and v == "": logger.warning("Job ID is empty") - return JobID(root=uuid.uuid4()) + return GenerationID(root=uuid.uuid4()) return v @@ -70,15 +74,15 @@ def __hash__(self) -> int: class JobResponseMixin(HordeAPIData): """Mix-in class for data relating to any generation jobs.""" - id_: JobID = Field(alias="id") + id_: GenerationID = Field(alias="id") """The UUID for this job.""" @field_validator("id_", mode="before") - def validate_id(cls, v: str | JobID) -> JobID | str: + def validate_id(cls, v: str | GenerationID) -> GenerationID | str: """Ensure that the job ID is not empty.""" if isinstance(v, str) and v == "": logger.warning("Job ID is empty") - return JobID(root=uuid.uuid4()) + return GenerationID(root=uuid.uuid4()) return v @@ -227,33 +231,25 @@ def get_api_model_name(cls) -> str | None: return "RequestSingleWarning" -class ImageGenerateParamMixin(HordeAPIObjectBaseModel): - """Mix-in class of some of the data included in a request to the `/v2/generate/async` endpoint. +class _BaseImageGenerateParamMixin(HordeAPIObjectBaseModel): + """Base class for all shared image generation parameters.""" - Also is the corresponding information returned on a job pop to the `/v2/generate/pop` endpoint. - v2 API Model: `ModelPayloadRootStable` - """ - - model_config = ( - ConfigDict(frozen=True, extra="allow") - if not os.getenv("TESTS_ONGOING") - else ConfigDict(frozen=True, extra="forbid") - ) + height: int = Field(default=512, ge=64, le=3072, multiple_of=64) + """The desired output image height.""" + width: int = Field(default=512, ge=64, le=3072, multiple_of=64) + """The desired output image width.""" - sampler_name: KNOWN_SAMPLERS | str = KNOWN_SAMPLERS.k_lms - """The sampler to use for this generation. Defaults to `KNOWN_SAMPLERS.k_lms`.""" - cfg_scale: float = 7.5 + sampler_name: KNOWN_IMAGE_SAMPLERS | str = KNOWN_IMAGE_SAMPLERS.k_euler + """The sampler to use for this generation. Defaults to `KNOWN_IMAGE_SAMPLERS.k_lms`.""" + karras: bool = True + """Set to True if you want to use the Karras scheduling.""" + cfg_scale: float = Field(default=7.5, ge=0, le=10) """The cfg_scale to use for this generation. Defaults to 7.5.""" denoising_strength: float | None = Field(default=1, ge=0, le=1) """The denoising strength to use for this generation. Defaults to 1.""" - seed: str | None = None - """The seed to use for this generation. If not provided, a random seed will be used.""" - height: int = Field(default=512, ge=64, le=3072) - """The desired output image height.""" - width: int = Field(default=512, ge=64, le=3072) - """The desired output image width.""" - seed_variation: int | None = Field(default=None, ge=1, le=1000) - """Deprecated.""" + clip_skip: int = Field(default=1, ge=1, le=12) + """The number of clip layers to skip.""" + post_processing: list[str | KNOWN_UPSCALERS | KNOWN_FACEFIXERS | KNOWN_MISC_POST_PROCESSORS] = Field( default_factory=list, ) @@ -261,65 +257,99 @@ class ImageGenerateParamMixin(HordeAPIObjectBaseModel): post_processing_order: POST_PROCESSOR_ORDER_TYPE = POST_PROCESSOR_ORDER_TYPE.facefixers_first """The order in which to apply post-processing models. Applying upscalers or removing backgrounds before facefixers costs less kudos.""" - karras: bool = True - """Set to True if you want to use the Karras scheduling.""" - tiling: bool = False - """Set to True if you want to use seamless tiling.""" + facefixer_strength: float | None = Field(default=None, ge=0, le=1) + """The strength of the facefixer model.""" + hires_fix: bool = False """Set to True if you want to use the hires fix.""" hires_fix_denoising_strength: float | None = Field(default=None, ge=0, le=1) """The strength of the denoising for the hires fix second pass.""" - clip_skip: int = Field(default=1, ge=1, le=12) - """The number of clip layers to skip.""" - control_type: str | KNOWN_CONTROLNETS | None = None - """The type of control net type to use.""" - image_is_control: bool | None = None - """Set to True if the image is a control image.""" - return_control_map: bool | None = None - """Set to True if you want the ControlNet map returned instead of a generated image.""" - facefixer_strength: float | None = Field(default=None, ge=0, le=1) - """The strength of the facefixer model.""" + loras: list[LorasPayloadEntry] | None = None """A list of lora parameters to use.""" tis: list[TIPayloadEntry] | None = None """A list of textual inversion (embedding) parameters to use.""" - extra_texts: list[ExtraTextEntry] | None = None - """A list of extra texts and prompts to use in the comfyUI workflow.""" - workflow: str | KNOWN_WORKFLOWS | None = None + + workflow: str | KNOWN_IMAGE_WORKFLOWS | None = None """The specific comfyUI workflow to use.""" transparent: bool | None = None """When true, will generate an image with a transparent background""" + tiling: bool = False + """Set to True if you want to use seamless tiling.""" + special: dict[Any, Any] | None = None """Reserved for future use.""" - use_nsfw_censor: bool = False - """If the request is SFW, and the worker accidentally generates NSFW, it will send back a censored image.""" - @field_validator("width", "height", mode="before") - def width_divisible_by_64(cls, value: int) -> int: - """Ensure that the width is divisible by 64.""" - if value % 64 != 0: - raise ValueError("width must be divisible by 64") - return value + @field_validator("post_processing") + def post_processors_must_be_known( + cls, + v: list[str | KNOWN_UPSCALERS | KNOWN_FACEFIXERS | KNOWN_MISC_POST_PROCESSORS], + ) -> list[str | KNOWN_UPSCALERS | KNOWN_FACEFIXERS | KNOWN_MISC_POST_PROCESSORS]: + """Ensure that the post processors are in this list of supported post processors.""" + _valid_types: list[type] = [str, KNOWN_UPSCALERS, KNOWN_FACEFIXERS, KNOWN_MISC_POST_PROCESSORS] + for post_processor in v: + if post_processor not in _all_valid_post_processors_names_and_values or ( + type(post_processor) not in _valid_types + ): + logger.warning( + f"Unknown post processor {post_processor}. Is your SDK out of date or did the API change?", + ) + return v @field_validator("sampler_name") - def sampler_name_must_be_known(cls, v: str | KNOWN_SAMPLERS) -> str | KNOWN_SAMPLERS: + def sampler_name_must_be_known(cls, v: str | KNOWN_IMAGE_SAMPLERS) -> str | KNOWN_IMAGE_SAMPLERS: """Ensure that the sampler name is in this list of supported samplers.""" - if isinstance(v, KNOWN_SAMPLERS): + if isinstance(v, KNOWN_IMAGE_SAMPLERS): return v try: - KNOWN_SAMPLERS(v) + KNOWN_IMAGE_SAMPLERS(v) except ValueError: logger.warning(f"Unknown sampler name {v}. Is your SDK out of date or did the API change?") return v + +class ImageGenerateParamMixin(_BaseImageGenerateParamMixin): + """Contains basic and api-specific parameters for image generation. + + v2 API Model: `ModelPayloadRootStable` + """ + + model_config = ( + ConfigDict(frozen=True, extra="allow") + if not os.getenv("TESTS_ONGOING") + else ConfigDict(frozen=True, extra="forbid") + ) + + seed: str | None = None + """The seed to use for this generation. If not provided, a random seed will be used.""" + seed_variation: int | None = Field(default=None, ge=1, le=1000) + """Deprecated.""" + + control_type: str | KNOWN_IMAGE_CONTROLNETS | None = None + """The type of control net type to use.""" + image_is_control: bool | None = None + """Set to True if the image is a control image.""" + return_control_map: bool | None = None + """Set to True if you want the ControlNet map returned instead of a generated image.""" + + extra_texts: list[ExtraTextEntry] | None = None + """A list of extra texts and prompts to use in the comfyUI workflow.""" + use_nsfw_censor: bool = False + """If the request is SFW, and the worker accidentally generates NSFW, it will send back a censored image.""" + # @model_validator(mode="after") # def validate_hires_fix(self) -> ImageGenerateParamMixin: # if self.hires_fix and (self.width < 512 or self.height < 512): # raise ValueError("hires_fix is only valid when width and height are both >= 512") # return self + @override + @classmethod + def get_api_model_name(cls) -> str | None: + return "ModelPayloadRootStable" + @field_validator("seed") def random_seed_if_none(cls, v: str | None) -> str | None: """If the seed is None, generate a random seed.""" @@ -330,45 +360,31 @@ def random_seed_if_none(cls, v: str | None) -> str | None: return v - @field_validator("post_processing") - def post_processors_must_be_known( - cls, - v: list[str | KNOWN_UPSCALERS | KNOWN_FACEFIXERS | KNOWN_MISC_POST_PROCESSORS], - ) -> list[str | KNOWN_UPSCALERS | KNOWN_FACEFIXERS | KNOWN_MISC_POST_PROCESSORS]: - """Ensure that the post processors are in this list of supported post processors.""" - _valid_types: list[type] = [str, KNOWN_UPSCALERS, KNOWN_FACEFIXERS, KNOWN_MISC_POST_PROCESSORS] - for post_processor in v: - if post_processor not in _all_valid_post_processors_names_and_values or ( - type(post_processor) not in _valid_types - ): - logger.warning( - f"Unknown post processor {post_processor}. Is your SDK out of date or did the API change?", - ) - return v - @field_validator("control_type") - def control_type_must_be_known(cls, v: str | KNOWN_CONTROLNETS | None) -> str | KNOWN_CONTROLNETS | None: + def control_type_must_be_known( + cls, + v: str | KNOWN_IMAGE_CONTROLNETS | None, + ) -> str | KNOWN_IMAGE_CONTROLNETS | None: """Ensure that the control type is in this list of supported control types.""" if v is None: return None - if isinstance(v, KNOWN_CONTROLNETS): + if isinstance(v, KNOWN_IMAGE_CONTROLNETS): return v try: - KNOWN_CONTROLNETS(v) + KNOWN_IMAGE_CONTROLNETS(v) except ValueError: logger.warning(f"Unknown control type {v}. Is your SDK out of date or did the API change?") return v - @override - @classmethod - def get_api_model_name(cls) -> str | None: - return "ModelPayloadRootStable" - class JobSubmitResponse(HordeResponseBaseModel): - """The response to a job submission request, indicating the number of kudos gained. + """Indicates that a generation job was successfully submitted and the amount of kudos gained. + + Represents the data returned from the following endpoints and http status codes: + - /v2/generate/text/submit | TextGenerationJobSubmitRequest [POST] -> 200 + - /v2/generate/submit | ImageGenerationJobSubmitRequest [POST] -> 200 v2 API Model: `GenerationSubmitted` """ @@ -417,3 +433,94 @@ def validate_value(cls, v: str | METADATA_VALUE) -> str | METADATA_VALUE: @classmethod def get_api_model_name(cls) -> str | None: return "GenerationMetadataStable" + + +class MessageSpecifiesSharedKeyMixin(HordeAPIData): + """Mix-in class to describe an endpoint for which you can specify a shared key.""" + + sharedkey_id: SharedKeyID = Field(alias="id") + """The shared key ID to use for this request.""" + + @field_validator("sharedkey_id", mode="before") + def validate_sharedkey_id(cls, v: str | SharedKeyID) -> SharedKeyID | str: + """Ensure that the shared key ID is not empty.""" + if isinstance(v, str) and v == "": + logger.warning("Shared key ID is empty") + + return v + + +class ActiveModelLite(HordeAPIObjectBaseModel): + """Represents a single active model. + + v2 API Model: `ActiveModelLite` + """ + + count: int | None = Field( + default=None, + ) + """How many of workers in this horde are running this model.""" + name: str | None = Field( + default=None, + ) + """The Name of a model available by workers in this horde.""" + + @override + @classmethod + def get_api_model_name(cls) -> str | None: + return "ActiveModelLite" + + +class ActiveModel(ActiveModelLite): + """Represents a single active model. + + v2 API Model: `ActiveModel` + """ + + eta: int | None = Field( + default=None, + ) + """Estimated time in seconds for this model's queue to be cleared.""" + jobs: float | None = Field( + default=None, + ) + """The job count waiting to be generated by this model.""" + performance: float | None = Field( + default=None, + ) + """The average speed of generation for this model.""" + queued: float | None = Field( + default=None, + ) + """The amount waiting to be generated by this model.""" + type_: MODEL_TYPE | None = Field( + examples=[MODEL_TYPE.image, MODEL_TYPE.text], + alias="type", + ) + """The model type (text or image).""" + + @override + @classmethod + def get_api_model_name(cls) -> str | None: + return "ActiveModel" + + +__all__ = [ + "ActiveModel", + "ActiveModelLite", + "BaseAIHordeRequest", + "ExtraSourceImageEntry", + "ExtraTextEntry", + "GenMetadataEntry", + "ImageGenerateParamMixin", + "JobRequestMixin", + "JobResponseMixin", + "JobSubmitResponse", + "LorasPayloadEntry", + "MessageSpecifiesSharedKeyMixin", + "SingleWarningEntry", + "TIPayloadEntry", + "WorkerRequestMixin", + "WorkerRequestNameMixin", + "_BaseImageGenerateParamMixin", +] diff --git a/horde_sdk/ai_horde_api/apimodels/collections.py b/horde_sdk/ai_horde_api/apimodels/collections.py new file mode 100644 index 00000000..4d2e9b11 --- /dev/null +++ b/horde_sdk/ai_horde_api/apimodels/collections.py @@ -0,0 +1,425 @@ +from typing import Literal, override + +from pydantic import Field + +from horde_sdk.ai_horde_api.apimodels.base import BaseAIHordeRequest +from horde_sdk.ai_horde_api.endpoints import AI_HORDE_API_ENDPOINT_SUBPATH +from horde_sdk.consts import _ANONYMOUS_MODEL, HTTPMethod +from horde_sdk.generic_api.apimodels import ( + APIKeyAllowedInRequestMixin, + ContainsMessageResponseMixin, + ContainsWarningsResponseMixin, + HordeAPIData, + HordeAPIObjectBaseModel, + HordeResponseBaseModel, + HordeResponseRootModel, +) +from horde_sdk.generic_api.decoration import Unequatable, Unhashable + + +class ResponseModelStylesShort(HordeAPIObjectBaseModel): + """The name and ID of a style. + + v2 API Model: `ResponseModelStylesShort` + """ + + name: str = Field( + description="The unique name for this style", + ) + id_: str = Field( + alias="id", + description="The ID of this style", + ) + + @override + @classmethod + def get_api_model_name(cls) -> str | None: + return "ResponseModelStylesShort" + + +@Unhashable +@Unequatable +class ResponseModelCollection(HordeResponseBaseModel): + """A collection of styles. + + Represents the data returned from the following endpoints and http status codes: + - /v2/collection_by_name/{collection_name} | CollectionByNameRequest [GET] -> 200 + - /v2/collections/{collection_id} | CollectionByIDRequest [GET] -> 200 + + v2 API Model: `ResponseModelCollection` + """ + + id: str + """The UUID of the collection. Use this to use this collection of retrieve its information in the future.""" + + name: str = Field() + """The name for the collection. Case-sensitive and unique per user.""" + + type: Literal["image"] = Field() + """The kind of styles stored in this collection.""" + + info: str | None = Field(default=None) + """Extra information about this collection.""" + + public: bool = Field(default=True) + """When true this collection will be listed among all collection publicly.When false, information about this + collection can only be seen by people who know its ID or name.""" + + styles: list[ResponseModelStylesShort] = Field() + """The styles contained in this collection.""" + + use_count: int | None = Field() + """The number of times this collection has been used.""" + + @override + @classmethod + def get_api_model_name(cls) -> str | None: + return "ResponseModelCollection" + + +@Unhashable +@Unequatable +class AllCollectionsResponse(HordeResponseRootModel[list[ResponseModelCollection]]): + """A list of collections. + + Represents the data returned from the /v2/collections endpoint with http status code 200. + + v2 API Model: `_ANONYMOUS_MODEL` + """ + + root: list[ResponseModelCollection] + """The underlying list of collections.""" + + @override + @classmethod + def get_api_model_name(cls) -> str | None: + return _ANONYMOUS_MODEL + + +class AllCollectionsRequest(BaseAIHordeRequest): + """Request to get all collections, optionally filtered by type and sorted by popularity or age. + + Data is paginated. Each page has 25 collections. Set `page` to get the next page. + + Represents a GET request to the /v2/collections endpoint. + """ + + sort: Literal["popular", "age"] = Field( + default="popular", + description="The sort order for the collections.", + ) + + page: int = Field( + default=1, + description="The page number for the collections. Each page has 25 styles.", + ) + + type_: Literal["image", "text", "all"] = Field( + alias="type", + default="all", + description="The type of collections to retrieve.", + ) + + @override + @classmethod + def get_api_model_name(cls) -> str | None: + return None + + @override + @classmethod + def get_http_method(cls) -> HTTPMethod: + return HTTPMethod.GET + + @override + @classmethod + def get_query_fields(cls) -> list[str]: + return ["sort", "page", "type"] + + @override + @classmethod + def get_api_endpoint_subpath(cls) -> AI_HORDE_API_ENDPOINT_SUBPATH: + return AI_HORDE_API_ENDPOINT_SUBPATH.v2_collections + + @override + @classmethod + def get_default_success_response_type(cls) -> type[AllCollectionsResponse]: + return AllCollectionsResponse + + +class CollectionByIDRequest(BaseAIHordeRequest): + """Request to get a collection by its ID. + + Represents a GET request to the /v2/collections/{collection_id} endpoint. + """ + + collection_id: str = Field( + description="The ID of the collection.", + ) + + @override + @classmethod + def get_api_model_name(cls) -> str | None: + return None + + @override + @classmethod + def get_http_method(cls) -> HTTPMethod: + return HTTPMethod.GET + + @override + @classmethod + def get_api_endpoint_subpath(cls) -> AI_HORDE_API_ENDPOINT_SUBPATH: + return AI_HORDE_API_ENDPOINT_SUBPATH.v2_collections_by_id + + @override + @classmethod + def get_default_success_response_type(cls) -> type[ResponseModelCollection]: + return ResponseModelCollection + + +class CollectionByNameRequest(BaseAIHordeRequest): + """Request to get a collection by its name. + + Represents a GET request to the /v2/collection_by_name/{collection_name} endpoint. + """ + + collection_name: str = Field( + description="The name of the collection.", + ) + + @override + @classmethod + def get_api_model_name(cls) -> str | None: + return None + + @override + @classmethod + def get_http_method(cls) -> HTTPMethod: + return HTTPMethod.GET + + @override + @classmethod + def get_api_endpoint_subpath(cls) -> AI_HORDE_API_ENDPOINT_SUBPATH: + return AI_HORDE_API_ENDPOINT_SUBPATH.v2_collections_by_name + + @override + @classmethod + def get_default_success_response_type(cls) -> type[ResponseModelCollection]: + return ResponseModelCollection + + +class CreateCollectionResponse( + HordeResponseBaseModel, + ContainsMessageResponseMixin, + ContainsWarningsResponseMixin, +): + """Indicates that a collection was created and provides its ID and any warnings. + + Represents the data returned from the /v2/collections endpoint with http status code 200. + + v2 API Model: `StyleModify` + """ + + id_: str = Field( + alias="id", + description="The ID of the collection.", + ) + + @override + @classmethod + def get_api_model_name(cls) -> str | None: + return "StyleModify" + + +class _InputModelCollectionMixin(HordeAPIData): + """Mixin class containing the fields for creating or updating a collection.""" + + name: str = Field( + min_length=1, + max_length=100, + ) + """The name for the collection. Case-sensitive and unique per user.""" + + info: str | None = Field( + default=None, + min_length=1, + max_length=1000, + ) + """Extra information about this collection.""" + + public: bool = Field( + default=True, + ) + """When true this collection will be listed among all collections publicly.When false, information about this + collection can only be seen by people who know its ID or name.""" + + styles: list[str] = Field( + min_length=1, + ) + """The styles to use in this collection.""" + + +@Unhashable +@Unequatable +class CreateCollectionRequest( + _InputModelCollectionMixin, + BaseAIHordeRequest, + APIKeyAllowedInRequestMixin, +): + """Request to create a collection. + + Represents a POST request to the /v2/collections endpoint. + + v2 API Model: `InputModelCollection` + """ + + @override + @classmethod + def get_api_model_name(cls) -> str | None: + return "InputModelCollection" + + @override + @classmethod + def get_http_method(cls) -> HTTPMethod: + return HTTPMethod.POST + + @override + @classmethod + def get_api_endpoint_subpath(cls) -> AI_HORDE_API_ENDPOINT_SUBPATH: + return AI_HORDE_API_ENDPOINT_SUBPATH.v2_collections + + @override + @classmethod + def get_default_success_response_type(cls) -> type[CreateCollectionResponse]: + return CreateCollectionResponse + + @override + @classmethod + def is_api_key_required(cls) -> bool: + return True + + +class DeleteCollectionResponse( + HordeResponseBaseModel, + ContainsMessageResponseMixin, +): + """Returns an "OK" message when a collection is successfully deleted with http status code 200. + + Represents the data returned from the /v2/collections/{collection_id} endpoint with http status code 200. + + v2 API Model: `SimpleResponse` + """ + + @override + @classmethod + def get_api_model_name(cls) -> str | None: + return "SimpleResponse" + + +class DeleteCollectionRequest( + BaseAIHordeRequest, + APIKeyAllowedInRequestMixin, +): + """Request to delete a collection by its ID. + + Represents a DELETE request to the /v2/collections/{collection_id} endpoint. + """ + + collection_id: str = Field( + description="The ID of the collection.", + ) + + @override + @classmethod + def get_api_model_name(cls) -> str | None: + return None + + @override + @classmethod + def get_http_method(cls) -> HTTPMethod: + return HTTPMethod.DELETE + + @override + @classmethod + def get_api_endpoint_subpath(cls) -> AI_HORDE_API_ENDPOINT_SUBPATH: + return AI_HORDE_API_ENDPOINT_SUBPATH.v2_collections_by_id + + @override + @classmethod + def get_default_success_response_type(cls) -> type[DeleteCollectionResponse]: + return DeleteCollectionResponse + + @override + @classmethod + def is_api_key_required(cls) -> bool: + return True + + +class UpdateCollectionResponse( + HordeResponseBaseModel, + ContainsMessageResponseMixin, + ContainsWarningsResponseMixin, +): + """Returns an "OK" message when a collection is successfully updated with http status code 200. + + Note: If issues are detected, they will be returned in the warnings field. + + Represents the data returned from the /v2/collections/{collection_id} endpoint with http status code 200. + + v2 API Model: `StyleModify` + """ + + id_: str = Field( + alias="id", + ) + """The ID of the collection that was updated.""" + + @override + @classmethod + def get_api_model_name(cls) -> str | None: + return "StyleModify" + + +@Unhashable +@Unequatable +class UpdateCollectionRequest( + _InputModelCollectionMixin, + BaseAIHordeRequest, + APIKeyAllowedInRequestMixin, +): + """Request to update a collection by its ID. + + Note: Always check the warnings field on response for any issues that may have occurred during the update. + + Represents a PATCH request to the /v2/collections/{collection_id} endpoint. + + v2 API Model: `InputModelCollection` + """ + + collection_id: str + """The ID of the collection to update.""" + + @override + @classmethod + def get_api_model_name(cls) -> str | None: + return "InputModelCollection" + + @override + @classmethod + def get_http_method(cls) -> HTTPMethod: + return HTTPMethod.PATCH + + @override + @classmethod + def get_api_endpoint_subpath(cls) -> AI_HORDE_API_ENDPOINT_SUBPATH: + return AI_HORDE_API_ENDPOINT_SUBPATH.v2_collections_by_id + + @override + @classmethod + def get_default_success_response_type(cls) -> type[UpdateCollectionResponse]: + return UpdateCollectionResponse + + @override + @classmethod + def is_api_key_required(cls) -> bool: + return True diff --git a/horde_sdk/ai_horde_api/apimodels/_documents.py b/horde_sdk/ai_horde_api/apimodels/documents.py similarity index 79% rename from horde_sdk/ai_horde_api/apimodels/_documents.py rename to horde_sdk/ai_horde_api/apimodels/documents.py index 76f57e91..4686c64c 100644 --- a/horde_sdk/ai_horde_api/apimodels/_documents.py +++ b/horde_sdk/ai_horde_api/apimodels/documents.py @@ -1,7 +1,8 @@ +from typing import override + from loguru import logger from pydantic import field_validator from strenum import StrEnum -from typing_extensions import override from horde_sdk.ai_horde_api.apimodels.base import BaseAIHordeRequest from horde_sdk.ai_horde_api.endpoints import AI_HORDE_API_ENDPOINT_SUBPATH @@ -13,11 +14,21 @@ class DocumentFormat(StrEnum): + """The supported formats for a document.""" + html = "html" markdown = "markdown" class HordeDocument(HordeResponseBaseModel): + """Represents the data returned from the following endpoints and http status codes: + - /v2/documents/terms | AIHordeGetTermsRequest [GET] -> 200 + - /v2/documents/sponsors | AIHordeGetSponsorsRequest [GET] -> 200 + - /v2/documents/privacy | AIHordeGetPrivacyPolicyRequest [GET] -> 200 + + v2 API Model: `HordeDocument` + """ # noqa: D205, D415 + html: str | None = None """The HTML content of the document, if requested.""" markdown: str | None = None @@ -30,12 +41,15 @@ def get_api_model_name(cls) -> str: class AIHordeDocumentRequestMixin(HordeAPIObjectBaseModel): + """Mixin for requests that retrieve a document from the AI Horde API.""" + format: DocumentFormat | str = DocumentFormat.html """The format of the document to return. Default is markdown.""" @field_validator("format") def validate_format(cls, value: DocumentFormat | str) -> DocumentFormat | str: + """Validate the format is a valid document format.""" if isinstance(value, DocumentFormat): return value @@ -48,6 +62,8 @@ def validate_format(cls, value: DocumentFormat | str) -> DocumentFormat | str: class AIHordeGetPrivacyPolicyRequest(BaseAIHordeRequest, AIHordeDocumentRequestMixin): + """Represents a GET request to the /v2/documents/privacy endpoint.""" + @override @classmethod def get_api_model_name(cls) -> str | None: @@ -70,6 +86,8 @@ def get_default_success_response_type(cls) -> type[HordeDocument]: class AIHordeGetSponsorsRequest(BaseAIHordeRequest, AIHordeDocumentRequestMixin): + """Represents a GET request to the /v2/documents/sponsors endpoint.""" + @override @classmethod def get_api_model_name(cls) -> str | None: @@ -92,6 +110,8 @@ def get_default_success_response_type(cls) -> type[HordeDocument]: class AIHordeGetTermsRequest(BaseAIHordeRequest, AIHordeDocumentRequestMixin): + """Represents a GET request to the /v2/documents/terms endpoint.""" + @override @classmethod def get_api_model_name(cls) -> str | None: diff --git a/horde_sdk/ai_horde_api/apimodels/filters.py b/horde_sdk/ai_horde_api/apimodels/filters.py new file mode 100644 index 00000000..68bd2a3d --- /dev/null +++ b/horde_sdk/ai_horde_api/apimodels/filters.py @@ -0,0 +1,467 @@ +from typing import Any, override + +from loguru import logger +from pydantic import Field, model_validator + +from horde_sdk.ai_horde_api.apimodels.base import BaseAIHordeRequest +from horde_sdk.ai_horde_api.endpoints import AI_HORDE_API_ENDPOINT_SUBPATH +from horde_sdk.consts import _ANONYMOUS_MODEL, HTTPMethod, HTTPStatusCode +from horde_sdk.generic_api.apimodels import ( + APIKeyAllowedInRequestMixin, + ContainsMessageResponseMixin, + HordeAPIObjectBaseModel, + HordeResponseBaseModel, + HordeResponseRootModel, + HordeResponseTypes, +) +from horde_sdk.generic_api.decoration import Unequatable, Unhashable + + +class FilterDetails(HordeResponseBaseModel): + """Details about a filter. + + Represents the data returned from the following endpoints and http status codes: + - /v2/filters/{filter_id} | SingleFilterRequest [GET] -> 200 + - /v2/filters | PutNewFilterRequest [PUT] -> 201 + - /v2/filters/{filter_id} | PatchExistingFilter [PATCH] -> 200 + + v2 API Model: `FilterDetails` + """ + + description: str | None = Field(default=None, description="Description about this regex.") + """The description of this filter.""" + filter_type: int = Field(examples=[10], ge=10, le=29) + """The type of this filter.""" + id_: str = Field(alias="id") + """The UUID of this filter.""" + regex: str = Field(examples=["ac.*"]) + """The regex for this filter.""" + replacement: str | None = Field(default="") + """The replacement string for this regex.""" + user: str + """The moderator which added or last updated this regex.""" + + @override + @classmethod + def get_api_model_name(cls) -> str: + return "FilterDetails" + + +@Unhashable +@Unequatable +class FiltersListResponse(HordeResponseRootModel[list[FilterDetails]]): + """A list of filters. + + Represents the data returned from the /v2/filters endpoint with http status code 200. + + v2 API Model: `FiltersList` + """ + + root: list[FilterDetails] + """The underlying list of filters.""" + + @override + @classmethod + def get_api_model_name(cls) -> str: + return "FiltersList" + + +class FiltersListRequest( + BaseAIHordeRequest, + APIKeyAllowedInRequestMixin, +): + """Request to get a list of filters, with optional filtering. + + Represents a GET request to the /v2/filters endpoint. + """ + + filter_type: int | None = Field( + default=None, + description="The type of filter to return. If not specified, all filters are returned.", + examples=[10], + ge=10, + le=29, + ) + + contains: str | None = Field( + default=None, + description="A string to search for in the filter description.", + examples=["cat"], + ) + + @override + @classmethod + def get_api_model_name(cls) -> None: + return None + + @override + @classmethod + def get_api_endpoint_subpath(cls) -> AI_HORDE_API_ENDPOINT_SUBPATH: + return AI_HORDE_API_ENDPOINT_SUBPATH.v2_filters + + @override + @classmethod + def get_http_method(cls) -> HTTPMethod: + return HTTPMethod.GET + + @override + @classmethod + def get_default_success_response_type(cls) -> type[FiltersListResponse]: + return FiltersListResponse + + @override + @classmethod + def get_query_fields(cls) -> list[str]: + return ["filter_type", "contains"] + + @override + @classmethod + def is_api_key_required(cls) -> bool: + return True + + +@Unhashable +@Unequatable +class FilterPromptSuspicionResponse(HordeResponseBaseModel): + """The degree of suspicion for a prompt and the sections of the prompt that matched the filter. + + Represents the data returned from the /v2/filters endpoint with http status code 200. + + v2 API Model: `FilterPromptSuspicion` + """ + + matches: list[str] | None = None + """The sections of the prompt that matched the filter.""" + + suspicion: int + """Rates how suspicious the provided prompt is. A suspicion over 2 means it would be blocked.""" + + @override + @classmethod + def get_api_model_name(cls) -> str: + return "FilterPromptSuspicion" + + +class FilterPromptSuspicionRequest( + BaseAIHordeRequest, + APIKeyAllowedInRequestMixin, +): + """Request to check a prompt for suspicion and return the sections that matched the filter. + + Represents a POST request to the /v2/filters endpoint. + + v2 API Model: `_ANONYMOUS_MODEL` + """ + + prompt: str = Field(description="The prompt to check for suspicion.", examples=["cat"]) + filter_type: int = Field( + description="The type of filter to use. If not specified, all filters are used.", + default=0, + examples=[10], + ge=10, + le=29, + ) + + @override + @classmethod + def get_api_model_name(cls) -> str: + return _ANONYMOUS_MODEL + + @override + @classmethod + def get_api_endpoint_subpath(cls) -> AI_HORDE_API_ENDPOINT_SUBPATH: + return AI_HORDE_API_ENDPOINT_SUBPATH.v2_filters + + @override + @classmethod + def get_http_method(cls) -> HTTPMethod: + return HTTPMethod.POST + + @override + @classmethod + def get_default_success_response_type(cls) -> type[FilterPromptSuspicionResponse]: + return FilterPromptSuspicionResponse + + @override + @classmethod + def is_api_key_required(cls) -> bool: + return True + + @model_validator(mode="before") + def validate_filter_type(cls, v: dict[Any, Any]) -> dict[Any, Any]: + """Ensure filter_type is not 0.""" + if v["filter_type"] == 0: + logger.warning("Filter type 0 is not allowed. Defaulting to 10.") + v["filter_type"] = 10 + return v + + +class PutNewFilterRequest( + BaseAIHordeRequest, + APIKeyAllowedInRequestMixin, +): + """Create a new filter. + + Represents a PUT request to the /v2/filters endpoint. + + v2 API Model: `PutNewFilter` + """ + + description: str = Field(default="") + """The description of the filter.""" + filter_type: int = Field(examples=[10], ge=10, le=29) + """The type of filter to add.""" + regex: str = Field(examples=["ac.*"]) + """The regex for this filter.""" + replacement: str | None = Field(default="") + """The replacement string for this regex.""" + + @override + @classmethod + def get_api_model_name(cls) -> str: + return "PutNewFilter" + + @override + @classmethod + def get_api_endpoint_subpath(cls) -> AI_HORDE_API_ENDPOINT_SUBPATH: + return AI_HORDE_API_ENDPOINT_SUBPATH.v2_filters + + @override + @classmethod + def get_http_method(cls) -> HTTPMethod: + return HTTPMethod.PUT + + @override + @classmethod + def get_default_success_response_type(cls) -> type[FilterDetails]: + return FilterDetails + + @override + @classmethod + def get_success_status_response_pairs(cls) -> dict[HTTPStatusCode, type[HordeResponseTypes]]: + return { + HTTPStatusCode.CREATED: FilterDetails, + } + + @override + @classmethod + def is_api_key_required(cls) -> bool: + return True + + +class FilterRegex(HordeAPIObjectBaseModel): + """Details about a filter, including the regex and its type. + + v2 API Model: `FilterRegex` + """ + + filter_type: int = Field(examples=[10], ge=10, le=29) + """The type of filter.""" + regex: str = Field(examples=["ac.*"]) + """The regex for this filter.""" + + @override + @classmethod + def get_api_model_name(cls) -> str: + return "FilterRegex" + + +@Unhashable +@Unequatable +class FilterRegexResponse(HordeResponseRootModel[list[FilterRegex]]): + """A list of filters. + + Represents the data returned from the /v2/filters/regex endpoint with http status code 200. + + v2 API Model: `_ANONYMOUS_MODEL` + """ + + root: list[FilterRegex] + """The underlying list of filters.""" + + @override + @classmethod + def get_api_model_name(cls) -> str: + return _ANONYMOUS_MODEL + + +class FilterRegexRequest( + BaseAIHordeRequest, + APIKeyAllowedInRequestMixin, +): + """Request to get a list of regex filters. + + Represents a GET request to the /v2/filters/regex endpoint. + """ + + @override + @classmethod + def get_api_model_name(cls) -> None: + return None + + @override + @classmethod + def get_api_endpoint_subpath(cls) -> AI_HORDE_API_ENDPOINT_SUBPATH: + return AI_HORDE_API_ENDPOINT_SUBPATH.v2_filters_regex + + @override + @classmethod + def get_http_method(cls) -> HTTPMethod: + return HTTPMethod.GET + + @override + @classmethod + def get_default_success_response_type(cls) -> type[FilterRegexResponse]: + return FilterRegexResponse + + @override + @classmethod + def is_api_key_required(cls) -> bool: + return True + + +class SingleFilterRequest( + BaseAIHordeRequest, + APIKeyAllowedInRequestMixin, +): + """Request details about a single filter. + + Represents a GET request to the /v2/filters/{filter_id} endpoint. + """ + + filter_id: str + """The ID of the filter to retrieve.""" + + @override + @classmethod + def get_api_model_name(cls) -> None: + return None + + @override + @classmethod + def get_api_endpoint_subpath(cls) -> AI_HORDE_API_ENDPOINT_SUBPATH: + return AI_HORDE_API_ENDPOINT_SUBPATH.v2_filters_regex_single + + @override + @classmethod + def get_http_method(cls) -> HTTPMethod: + return HTTPMethod.GET + + @override + @classmethod + def get_default_success_response_type(cls) -> type[FilterDetails]: + return FilterDetails + + @override + @classmethod + def get_query_fields(cls) -> list[str]: + return ["filter_id"] + + @override + @classmethod + def is_api_key_required(cls) -> bool: + return True + + +class DeleteFilterResponse( + HordeResponseBaseModel, + ContainsMessageResponseMixin, +): + """Response to deleting a filter, with a message. + + Represents the data returned from the /v2/filters/{filter_id} endpoint with http status code 200. + + v2 API Model: `_ANONYMOUS_MODEL` + """ + + @override + @classmethod + def get_api_model_name(cls) -> str: + return _ANONYMOUS_MODEL + + +class DeleteFilterRequest( + BaseAIHordeRequest, + APIKeyAllowedInRequestMixin, +): + """Request to delete a filter by its filter_id. + + Represents a DELETE request to the /v2/filters/{filter_id} endpoint. + """ + + filter_id: str + """The ID of the filter to delete.""" + + @override + @classmethod + def get_api_model_name(cls) -> None: + return None + + @override + @classmethod + def get_api_endpoint_subpath(cls) -> AI_HORDE_API_ENDPOINT_SUBPATH: + return AI_HORDE_API_ENDPOINT_SUBPATH.v2_filters_regex_single + + @override + @classmethod + def get_http_method(cls) -> HTTPMethod: + return HTTPMethod.DELETE + + @override + @classmethod + def get_default_success_response_type(cls) -> type[DeleteFilterResponse]: + return DeleteFilterResponse + + @override + @classmethod + def is_api_key_required(cls) -> bool: + return True + + +class PatchExistingFilter( + BaseAIHordeRequest, + APIKeyAllowedInRequestMixin, +): + """Update an existing filter by its filter_id. + + Represents a PATCH request to the /v2/filters/{filter_id} endpoint. + + v2 API Model: `PatchExistingFilter` + """ + + description: str | None = Field(default=None) + """The description of the filter.""" + regex: str | None = Field(default=None) + """The regex for this filter.""" + replacement: str | None = Field(default=None) + """The replacement string for this regex.""" + filter_type: int | None = Field(examples=[10], ge=10, le=29) + """The type of filter to add.""" + + filter_id: str + """The ID of the filter to update.""" + + @override + @classmethod + def get_api_model_name(cls) -> str: + return "PatchExistingFilter" + + @override + @classmethod + def get_api_endpoint_subpath(cls) -> AI_HORDE_API_ENDPOINT_SUBPATH: + return AI_HORDE_API_ENDPOINT_SUBPATH.v2_filters_regex_single + + @override + @classmethod + def get_http_method(cls) -> HTTPMethod: + return HTTPMethod.PATCH + + @override + @classmethod + def get_default_success_response_type(cls) -> type[FilterDetails]: + return FilterDetails + + @override + @classmethod + def is_api_key_required(cls) -> bool: + return True diff --git a/horde_sdk/ai_horde_api/apimodels/_find_user.py b/horde_sdk/ai_horde_api/apimodels/find_user.py similarity index 72% rename from horde_sdk/ai_horde_api/apimodels/_find_user.py rename to horde_sdk/ai_horde_api/apimodels/find_user.py index 6b3f2d76..c2a527b3 100644 --- a/horde_sdk/ai_horde_api/apimodels/_find_user.py +++ b/horde_sdk/ai_horde_api/apimodels/find_user.py @@ -1,13 +1,20 @@ -from typing_extensions import override +from typing import override -from horde_sdk.ai_horde_api.apimodels._users import UserDetailsResponse from horde_sdk.ai_horde_api.apimodels.base import BaseAIHordeRequest +from horde_sdk.ai_horde_api.apimodels.users import UserDetailsResponse from horde_sdk.ai_horde_api.endpoints import AI_HORDE_API_ENDPOINT_SUBPATH from horde_sdk.consts import HTTPMethod from horde_sdk.generic_api.apimodels import APIKeyAllowedInRequestMixin class FindUserRequest(BaseAIHordeRequest, APIKeyAllowedInRequestMixin): + """Request to find a user by their API key. + + This is useful for frontend applications first confirming that a user is registered with the Horde. + + Represents a GET request to the /v2/find_user endpoint. + """ + @override @classmethod def get_api_model_name(cls) -> str | None: diff --git a/horde_sdk/ai_horde_api/apimodels/generate/_async.py b/horde_sdk/ai_horde_api/apimodels/generate/async_.py similarity index 80% rename from horde_sdk/ai_horde_api/apimodels/generate/_async.py rename to horde_sdk/ai_horde_api/apimodels/generate/async_.py index 63d2617c..bcabf05e 100644 --- a/horde_sdk/ai_horde_api/apimodels/generate/_async.py +++ b/horde_sdk/ai_horde_api/apimodels/generate/async_.py @@ -1,8 +1,9 @@ from __future__ import annotations +from typing import override + from loguru import logger from pydantic import AliasChoices, Field, field_validator, model_validator -from typing_extensions import override from horde_sdk.ai_horde_api.apimodels.base import ( BaseAIHordeRequest, @@ -11,11 +12,11 @@ JobResponseMixin, SingleWarningEntry, ) -from horde_sdk.ai_horde_api.apimodels.generate._check import ImageGenerateCheckRequest -from horde_sdk.ai_horde_api.apimodels.generate._status import DeleteImageGenerateRequest, ImageGenerateStatusRequest -from horde_sdk.ai_horde_api.consts import KNOWN_SOURCE_PROCESSING +from horde_sdk.ai_horde_api.apimodels.generate.check import ImageGenerateCheckRequest +from horde_sdk.ai_horde_api.apimodels.generate.status import DeleteImageGenerateRequest, ImageGenerateStatusRequest from horde_sdk.ai_horde_api.endpoints import AI_HORDE_API_ENDPOINT_SUBPATH from horde_sdk.consts import _ANONYMOUS_MODEL, HTTPMethod, HTTPStatusCode +from horde_sdk.generation_parameters.image.consts import KNOWN_IMAGE_SOURCE_PROCESSING from horde_sdk.generic_api.apimodels import ( APIKeyAllowedInRequestMixin, ContainsMessageResponseMixin, @@ -24,6 +25,7 @@ RequestUsesWorkerMixin, ResponseRequiringFollowUpMixin, ) +from horde_sdk.generic_api.decoration import Unequatable, Unhashable class ImageGenerateAsyncResponse( @@ -32,7 +34,13 @@ class ImageGenerateAsyncResponse( ResponseRequiringFollowUpMixin, ContainsMessageResponseMixin, ): - """Represents the data returned from the `/v2/generate/async` endpoint. + """The expected cost of the requested job and any warnings generated by the server. + + A typical warning is that the request is not currently possible. You may want to handle certain + warnings in your application. See :class:`horde_sdk.ai_horde_api.apimodels.base.SingleWarningEntry` for more + information. + + Represents the data returned from the /v2/generate/async endpoint with http status code 202. v2 API Model: `RequestAsync` """ @@ -45,6 +53,7 @@ class ImageGenerateAsyncResponse( @model_validator(mode="after") def validate_warnings(self) -> ImageGenerateAsyncResponse: + """Log any warnings that were generated by the server or a serving worker.""" if self.warnings is None: return self @@ -59,6 +68,7 @@ def get_follow_up_returned_params(self, *, as_python_field_name: bool = False) - return [{"id_": self.id_}] return [{"id": self.id_}] + @override @classmethod def get_follow_up_default_request_type(cls) -> type[ImageGenerateCheckRequest]: return ImageGenerateCheckRequest @@ -88,6 +98,13 @@ def __eq__(self, __value: object) -> bool: class ImageGenerateAsyncDryRunResponse(HordeResponseBaseModel): + """Request the expected cost of the job without actually starting the job. + + Represents the data returned from the /v2/generate/async endpoint with http status code 200. + + v2 API Model: `_ANONYMOUS_MODEL` + """ + kudos: float """The expected kudos consumption for this request.""" @@ -116,6 +133,7 @@ def get_api_model_name(cls) -> str | None: @field_validator("n", mode="before") def validate_n(cls, value: int) -> int: + """Ensure that n is at least 1.""" if value == 0: logger.debug("n (number of images to generate) is not set; defaulting to 1") return 1 @@ -123,12 +141,20 @@ def validate_n(cls, value: int) -> int: return value +@Unhashable +@Unequatable class ImageGenerateAsyncRequest( BaseAIHordeRequest, APIKeyAllowedInRequestMixin, RequestUsesWorkerMixin, ): - """Represents the data needed to make a request to the `/v2/generate/async` endpoint. + """Request to start an image generation job. + + Upon a successful response, you should poll the status of the job using the + check endpoint. See :class:`horde_sdk.ai_horde_api.apimodels.generate.check.ImageGenerateCheckRequest` for more + information. + + Represents a POST request to the /v2/generate/async endpoint. v2 API Model: `GenerationInputStable` """ @@ -156,7 +182,7 @@ class ImageGenerateAsyncRequest( source_image: str | None = None """The public URL of the source image or a base64 string to use.""" - source_processing: KNOWN_SOURCE_PROCESSING = KNOWN_SOURCE_PROCESSING.txt2img + source_processing: KNOWN_IMAGE_SOURCE_PROCESSING = KNOWN_IMAGE_SOURCE_PROCESSING.txt2img """If source_image is provided, specifies how to process it.""" source_mask: str | None = None """If source_processing is set to 'inpainting' or 'outpainting', this parameter can be optionally provided as the @@ -167,6 +193,7 @@ class ImageGenerateAsyncRequest( @model_validator(mode="after") def validate_censor_nsfw(self) -> ImageGenerateAsyncRequest: + """Ensure that censor_nsfw is not set when nsfw is enabled.""" if self.nsfw and self.censor_nsfw: raise ValueError("Cannot censor NSFW content when NSFW detection is enabled.") return self diff --git a/horde_sdk/ai_horde_api/apimodels/generate/_check.py b/horde_sdk/ai_horde_api/apimodels/generate/check.py similarity index 74% rename from horde_sdk/ai_horde_api/apimodels/generate/_check.py rename to horde_sdk/ai_horde_api/apimodels/generate/check.py index 71881d4d..d6d21c5e 100644 --- a/horde_sdk/ai_horde_api/apimodels/generate/_check.py +++ b/horde_sdk/ai_horde_api/apimodels/generate/check.py @@ -1,9 +1,10 @@ +from typing import override + from loguru import logger -from typing_extensions import override from horde_sdk.ai_horde_api.apimodels.base import BaseAIHordeRequest, JobRequestMixin -from horde_sdk.ai_horde_api.apimodels.generate._progress import ResponseGenerationProgressInfoMixin -from horde_sdk.ai_horde_api.apimodels.generate._status import ImageGenerateStatusRequest +from horde_sdk.ai_horde_api.apimodels.generate.progress import ResponseGenerationProgressInfoMixin +from horde_sdk.ai_horde_api.apimodels.generate.status import ImageGenerateStatusRequest from horde_sdk.ai_horde_api.endpoints import AI_HORDE_API_ENDPOINT_SUBPATH from horde_sdk.consts import HTTPMethod from horde_sdk.generic_api.apimodels import HordeResponseBaseModel, ResponseWithProgressMixin @@ -14,7 +15,10 @@ class ImageGenerateCheckResponse( ResponseWithProgressMixin, ResponseGenerationProgressInfoMixin, ): - """Represents the data returned from the `/v2/generate/check/{id}` endpoint. + """The progress of an image request. This does not return any image data. + + Represents the data returned from the following endpoints and http status codes: + - /v2/generate/check/{id} | ImageGenerateCheckRequest [GET] -> 200 v2 API Model: `RequestStatusCheck` """ @@ -44,7 +48,12 @@ def get_finalize_success_request_type(cls) -> type[ImageGenerateStatusRequest]: class ImageGenerateCheckRequest(BaseAIHordeRequest, JobRequestMixin): - """Represents a GET request to the `/v2/generate/check/{id}` endpoint.""" + """Request the progress of an image request. + + This is the preferred way to check the progress of a generate request. + + Represents a GET request to the /v2/generate/check/{id} endpoint. + """ @override @classmethod diff --git a/horde_sdk/ai_horde_api/apimodels/generate/_pop.py b/horde_sdk/ai_horde_api/apimodels/generate/pop.py similarity index 85% rename from horde_sdk/ai_horde_api/apimodels/generate/_pop.py rename to horde_sdk/ai_horde_api/apimodels/generate/pop.py index f85b9e8f..143667b8 100644 --- a/horde_sdk/ai_horde_api/apimodels/generate/_pop.py +++ b/horde_sdk/ai_horde_api/apimodels/generate/pop.py @@ -2,28 +2,28 @@ import asyncio import uuid +from typing import override from urllib.parse import urlparse import aiohttp from loguru import logger from pydantic import AliasChoices, Field, field_validator, model_validator -from typing_extensions import override from horde_sdk.ai_horde_api.apimodels.base import ( BaseAIHordeRequest, ExtraSourceImageEntry, ImageGenerateParamMixin, ) -from horde_sdk.ai_horde_api.apimodels.generate._submit import ImageGenerationJobSubmitRequest +from horde_sdk.ai_horde_api.apimodels.generate.submit import ImageGenerationJobSubmitRequest +from horde_sdk.ai_horde_api.apimodels.workers.messages import _ResponseModelMessageData from horde_sdk.ai_horde_api.consts import ( GENERATION_STATE, - KNOWN_FACEFIXERS, - KNOWN_SOURCE_PROCESSING, - KNOWN_UPSCALERS, ) from horde_sdk.ai_horde_api.endpoints import AI_HORDE_API_ENDPOINT_SUBPATH -from horde_sdk.ai_horde_api.fields import JobID -from horde_sdk.consts import HTTPMethod +from horde_sdk.ai_horde_api.fields import GenerationID +from horde_sdk.consts import _OVERLOADED_MODEL, HTTPMethod +from horde_sdk.generation_parameters.alchemy.consts import KNOWN_FACEFIXERS, KNOWN_UPSCALERS +from horde_sdk.generation_parameters.image.consts import KNOWN_IMAGE_SOURCE_PROCESSING from horde_sdk.generic_api.apimodels import ( APIKeyAllowedInRequestMixin, HordeAPIObjectBaseModel, @@ -31,9 +31,15 @@ ResponseRequiringDownloadMixin, ResponseRequiringFollowUpMixin, ) +from horde_sdk.generic_api.decoration import Unequatable, Unhashable class NoValidRequestFound(HordeAPIObjectBaseModel): + """Base class for the number of jobs a worker skipped for, and why. + + v2 API Model: `NoValidRequestFound` + """ + blacklist: int | None = Field(default=None, ge=0) """How many waiting requests were skipped because they demanded a generation with a word that this worker does not accept.""" @@ -65,9 +71,14 @@ def is_empty(self) -> bool: """Whether or not this object has any non-zero values.""" return len(self.model_fields_set) == 0 + @override + @classmethod + def get_api_model_name(cls) -> str | None: + return "NoValidRequestFound" + class ImageGenerateJobPopSkippedStatus(NoValidRequestFound): - """Represents the data returned from the `/v2/generate/pop` endpoint for why a worker was skipped. + """The number of jobs a worker was skipped for, and why. v2 API Model: `NoValidRequestFoundStable` """ @@ -96,6 +107,11 @@ def get_api_model_name(cls) -> str | None: class ImageGenerateJobPopPayload(ImageGenerateParamMixin): + """Mixin for the additional image generation parameters used in dispatching a job to a worker. + + v2 API Model: `ModelPayloadRootStable` + """ + prompt: str | None = None """The prompt to use for this image generation.""" @@ -107,6 +123,8 @@ class ImageGenerateJobPopPayload(ImageGenerateParamMixin): class ExtraSourceImageMixin(ResponseRequiringDownloadMixin): + """Mixin for jobs which have extra source images.""" + extra_source_images: list[ExtraSourceImageEntry] | None = None """Additional uploaded images (as base64) which can be used for further operations.""" _downloaded_extra_source_images: list[ExtraSourceImageEntry] | None = None @@ -137,8 +155,16 @@ async def async_download_extra_source_images( return self._downloaded_extra_source_images self._downloaded_extra_source_images = [] + + download_tasks = [] for extra_source_image in self.extra_source_images: - await self._download_image_if_needed(client_session, extra_source_image, max_retries) + download_tasks.append( + asyncio.create_task( + self._download_image_if_needed(client_session, extra_source_image, max_retries), + ), + ) + + await asyncio.gather(*download_tasks) self._sort_downloaded_images() return self._downloaded_extra_source_images.copy() @@ -199,24 +225,46 @@ def _sort_downloaded_images(self) -> None: ) +class PopResponseModelMessage(_ResponseModelMessageData): + """The message data which appears in a job pop response. + + v2 API Model: `_MODEL_OVERLOADED` + """ + + id_: str | None = Field(default=None, alias="id") + """The ID of the message.""" + + expiry: str | None = None + """The time at which this message expires.""" + + @override + @classmethod + def get_api_model_name(cls) -> str | None: + return _OVERLOADED_MODEL + + class ImageGenerateJobPopResponse( HordeResponseBaseModel, ResponseRequiringFollowUpMixin, ExtraSourceImageMixin, ): - """Represents the data returned from the `/v2/generate/pop` endpoint. + """Contains job data for workers, if any were available. Also contains data for jobs this worker was skipped for. + + This is the key response type for all image workers as it contains all assignment data for the worker. + + Represents the data returned from the /v2/generate/pop endpoint with http status code 200. v2 API Model: `GenerationPayloadStable` """ - id_: JobID | None = Field(default=None, alias="id") + id_: GenerationID | None = Field(default=None, alias="id") """(Obsolete) The UUID for this image generation.""" - ids: list[JobID] + ids: list[GenerationID] """A list of UUIDs for image generation.""" payload: ImageGenerateJobPopPayload """The parameters used to generate this image.""" - skipped: ImageGenerateJobPopSkippedStatus + skipped: ImageGenerateJobPopSkippedStatus = Field(default_factory=ImageGenerateJobPopSkippedStatus) """The reasons this worker was not issued certain jobs, and the number of jobs for each reason.""" model: str | None = None """Which of the available models to use for this request.""" @@ -224,7 +272,7 @@ class ImageGenerateJobPopResponse( """The URL or Base64-encoded webp to use for img2img.""" _downloaded_source_image: str | None = None """The downloaded source image (as base64), if any. This is not part of the API response.""" - source_processing: str | KNOWN_SOURCE_PROCESSING = KNOWN_SOURCE_PROCESSING.txt2img + source_processing: str | KNOWN_IMAGE_SOURCE_PROCESSING = KNOWN_IMAGE_SOURCE_PROCESSING.txt2img """If source_image is provided, specifies how to process it.""" source_mask: str | None = None """If img_processing is set to 'inpainting' or 'outpainting', this parameter can be optionally provided as the @@ -239,23 +287,30 @@ class ImageGenerateJobPopResponse( ttl: int | None = None """The amount of seconds before this job is considered stale and aborted.""" + messages: list[PopResponseModelMessage] | None = None + """The messages that have been sent to this worker.""" + @field_validator("source_processing") - def source_processing_must_be_known(cls, v: str | KNOWN_SOURCE_PROCESSING) -> str | KNOWN_SOURCE_PROCESSING: + def source_processing_must_be_known( + cls, + v: str | KNOWN_IMAGE_SOURCE_PROCESSING, + ) -> str | KNOWN_IMAGE_SOURCE_PROCESSING: """Ensure that the source processing is in this list of supported source processing.""" - if isinstance(v, KNOWN_SOURCE_PROCESSING): + if isinstance(v, KNOWN_IMAGE_SOURCE_PROCESSING): return v try: - KNOWN_SOURCE_PROCESSING(v) + KNOWN_IMAGE_SOURCE_PROCESSING(v) except ValueError: logger.warning(f"Unknown source processing {v}. Is your SDK out of date or did the API change?") return v @field_validator("id_", mode="before") - def validate_id(cls, v: str | JobID) -> JobID | str: + def validate_id(cls, v: str | GenerationID) -> GenerationID | str: + """Validate the ID is not an empty string.""" if isinstance(v, str) and v == "": logger.warning("Job ID is empty") - return JobID(root=uuid.uuid4()) + return GenerationID(root=uuid.uuid4()) return v @@ -266,14 +321,6 @@ def ids_present(self) -> bool: """Whether or not the IDs are present.""" return self._ids_present - def _sort_ids(self) -> None: - """Sort the IDs in place and sort so r2_uploads is changed so the same index changes occur.""" - if len(self.ids) > 1: - logger.debug("Sorting IDs") - self.ids.sort() - if self.r2_uploads is not None: - self.r2_uploads.sort() - @model_validator(mode="after") def validate_ids_present(self) -> ImageGenerateJobPopResponse: """Ensure that either id_ or ids is present.""" @@ -287,8 +334,6 @@ def validate_ids_present(self) -> ImageGenerateJobPopResponse: if self.id_ is None and len(self.ids) == 0: raise ValueError("Neither id_ nor ids were present in the response.") - self._sort_ids() - self._ids_present = True return self @@ -340,7 +385,7 @@ def has_upscaler(self) -> bool: return False return any( - post_processing in KNOWN_UPSCALERS.__members__ or post_processing in KNOWN_UPSCALERS._value2member_map_ + post_processing in KNOWN_UPSCALERS.__members__ or post_processing in KNOWN_UPSCALERS.__members__.values() for post_processing in self.payload.post_processing ) @@ -424,13 +469,18 @@ def __hash__(self) -> int: return hash(ImageGenerateJobPopResponse.__name__) + hash(self.id_) if len(self.ids) > 0: - return hash(ImageGenerateJobPopResponse.__name__) + hash(tuple(self.ids)) + return hash(ImageGenerateJobPopResponse.__name__) + hash(tuple(sorted(self.ids))) logger.warning("No ID or IDs found in response. This is a bug.") return hash(0) class PopInput(HordeAPIObjectBaseModel): + """The input data for a image worker requesting jobs. + + v2 API Model: `PopInput` + """ + amount: int | None = Field(1, ge=1, le=20) """The number of jobs to pop at the same time.""" bridge_agent: str | None = Field( @@ -478,8 +528,14 @@ def get_api_model_name(cls) -> str | None: return "PopInput" +@Unhashable +@Unequatable class ImageGenerateJobPopRequest(BaseAIHordeRequest, APIKeyAllowedInRequestMixin, PopInput): - """Represents the data needed to make a job request from a worker to the /v2/generate/pop endpoint. + """Request additional jobs, if any are available, for an image worker. + + This is the key request type for all image workers as it contains all the parameters needed to request a job. + + Represents a POST request to the /v2/generate/pop endpoint. v2 API Model: `PopInputStable` """ diff --git a/horde_sdk/ai_horde_api/apimodels/generate/_progress.py b/horde_sdk/ai_horde_api/apimodels/generate/progress.py similarity index 86% rename from horde_sdk/ai_horde_api/apimodels/generate/_progress.py rename to horde_sdk/ai_horde_api/apimodels/generate/progress.py index edbf7d2e..e8d31d71 100644 --- a/horde_sdk/ai_horde_api/apimodels/generate/_progress.py +++ b/horde_sdk/ai_horde_api/apimodels/generate/progress.py @@ -2,6 +2,8 @@ class ResponseGenerationProgressInfoMixin(HordeAPIObjectBaseModel): + """A mixin for responses that contain information about the progress of a generation request.""" + finished: int """The amount of finished jobs in this request.""" processing: int @@ -25,4 +27,4 @@ class ResponseGenerationProgressInfoMixin(HordeAPIObjectBaseModel): class ResponseGenerationProgressCombinedMixin(ResponseWithProgressMixin, ResponseGenerationProgressInfoMixin): - pass + """A mixin for responses which provide progress and detailed information about a generation.""" diff --git a/horde_sdk/ai_horde_api/apimodels/generate/rate.py b/horde_sdk/ai_horde_api/apimodels/generate/rate.py new file mode 100644 index 00000000..b39c625e --- /dev/null +++ b/horde_sdk/ai_horde_api/apimodels/generate/rate.py @@ -0,0 +1,109 @@ +from typing import override + +from pydantic import Field + +from horde_sdk.ai_horde_api.apimodels.base import BaseAIHordeRequest +from horde_sdk.ai_horde_api.endpoints import AI_HORDE_API_ENDPOINT_SUBPATH +from horde_sdk.consts import HTTPMethod +from horde_sdk.generic_api.apimodels import ( + HordeAPIObjectBaseModel, + HordeResponseBaseModel, +) +from horde_sdk.generic_api.decoration import Unequatable, Unhashable + + +class AestheticRating(HordeAPIObjectBaseModel): + """Represents an aesthetic rating for an image. + + v2 API Model: `AestheticRating` + """ + + id_: str = Field( + alias="id", + description="The UUID of image being rated.", + ) + """The UUID of image being rated.""" + + rating: int + """The aesthetic rating 1-10 for this image.""" + + artifacts: int + """The artifacts rating for this image. + 0 for flawless generation that perfectly fits to the prompt. + 1 for small, hardly recognizable flaws. 2 small flaws that can easily be spotted, but don not harm the aesthetic + experience. + 3 for flaws that look obviously wrong, but only mildly harm the aesthetic experience. + 4 for flaws that look obviously wrong & significantly harm the aesthetic experience. + 5 for flaws that make the image look like total garbage.""" + + @override + @classmethod + def get_api_model_name(cls) -> str | None: + return "AestheticRating" + + +class AestheticsPayload(HordeAPIObjectBaseModel): + """Represents the payload for rating images. + + v2 API Model: `AestheticsPayload` + """ + + best: str | None = None + """The UUID of the best image in this generation batch (only used when 2+ images generated). + If 2+ aesthetic ratings are also provided, then they take precedence if they're not tied.""" + + ratings: list[AestheticRating] + """The aesthetic ratings for each image in the batch.""" + + @override + @classmethod + def get_api_model_name(cls) -> str | None: + return "AestheticsPayload" + + +class RateResponse(HordeResponseBaseModel): + """The response to a rating submission, including the reward amount. + + Represents the data returned from the /v2/generate/rate/{id} endpoint with http status code 200. + + v2 API Model: `GenerationSubmitted` + """ + + reward: float + """The reward for the rating.""" + + @override + @classmethod + def get_api_model_name(cls) -> str | None: + return "GenerationSubmitted" + + +@Unhashable +@Unequatable +class RateRequest(AestheticsPayload, BaseAIHordeRequest): + """Submit ratings for a batch of images. + + Represents a POST request to the /v2/generate/rate/{id} endpoint. + + v2 API Model: `AestheticsPayload` + """ + + @override + @classmethod + def get_api_model_name(cls) -> str | None: + return "AestheticsPayload" + + @override + @classmethod + def get_http_method(cls) -> HTTPMethod: + return HTTPMethod.POST + + @override + @classmethod + def get_api_endpoint_subpath(cls) -> AI_HORDE_API_ENDPOINT_SUBPATH: + return AI_HORDE_API_ENDPOINT_SUBPATH.v2_generate_rate_id + + @override + @classmethod + def get_default_success_response_type(cls) -> type[RateResponse]: + return RateResponse diff --git a/horde_sdk/ai_horde_api/apimodels/generate/_status.py b/horde_sdk/ai_horde_api/apimodels/generate/status.py similarity index 73% rename from horde_sdk/ai_horde_api/apimodels/generate/_status.py rename to horde_sdk/ai_horde_api/apimodels/generate/status.py index 70a11743..963e900c 100644 --- a/horde_sdk/ai_horde_api/apimodels/generate/_status.py +++ b/horde_sdk/ai_horde_api/apimodels/generate/status.py @@ -1,19 +1,24 @@ import uuid +from typing import override from loguru import logger from pydantic import Field, field_validator -from typing_extensions import override from horde_sdk.ai_horde_api.apimodels.base import BaseAIHordeRequest, GenMetadataEntry, JobRequestMixin -from horde_sdk.ai_horde_api.apimodels.generate._progress import ResponseGenerationProgressInfoMixin +from horde_sdk.ai_horde_api.apimodels.generate.progress import ResponseGenerationProgressInfoMixin from horde_sdk.ai_horde_api.consts import GENERATION_STATE from horde_sdk.ai_horde_api.endpoints import AI_HORDE_API_ENDPOINT_SUBPATH -from horde_sdk.ai_horde_api.fields import JobID, WorkerID +from horde_sdk.ai_horde_api.fields import GenerationID, WorkerID from horde_sdk.consts import HTTPMethod from horde_sdk.generic_api.apimodels import HordeAPIObjectBaseModel, HordeResponseBaseModel, ResponseWithProgressMixin class Generation(HordeAPIObjectBaseModel): + """Mixin that contains common fields for generation responses. + + v2 API Model: `Generation` + """ + model: str = Field(title="Generation Model") """The model which generated this image.""" state: GENERATION_STATE = Field( @@ -31,15 +36,22 @@ class Generation(HordeAPIObjectBaseModel): ) """The name of the worker which generated this image.""" + @override + @classmethod + def get_api_model_name(cls) -> str | None: + return "Generation" + class ImageGeneration(Generation): - """Represents the individual image generation responses in a ImageGenerateStatusResponse. + """Represents an individual image generation in a status response, including the image data. + + Represents the individual image generation responses in a ImageGenerateStatusResponse. v2 API Model: `GenerationStable` """ - id_: JobID = Field(alias="id") - """The UUID of this generation. Is always returned as a `JobID`, but can initialized from a `str`.""" + id_: GenerationID = Field(alias="id") + """The UUID of this generation. Is always returned as a `GenerationID`, but can initialized from a `str`.""" # todo: remove `str`? img: str """The generated image as a Base64-encoded .webp file.""" @@ -56,10 +68,11 @@ def get_api_model_name(self) -> str | None: return "GenerationStable" @field_validator("id_", mode="before") - def validate_id(cls, v: str | JobID) -> JobID | str: + def validate_id(cls, v: str | GenerationID) -> GenerationID | str: + """Validate the ID is not an empty string.""" if isinstance(v, str) and v == "": logger.warning("Job ID is empty") - return JobID(root=uuid.uuid4()) + return GenerationID(root=uuid.uuid4()) return v @@ -77,7 +90,11 @@ class ImageGenerateStatusResponse( ResponseWithProgressMixin, ResponseGenerationProgressInfoMixin, ): - """Represent the response from the AI-Horde API when checking the status of an image generation job. + """The current status of an image generation request and the data if it is complete. + + Represents the data returned from the following endpoints and http status codes: + - /v2/generate/status/{id} | ImageGenerateStatusRequest [GET] -> 200 + - /v2/generate/status/{id} | DeleteImageGenerateRequest [DELETE] -> 200 v2 API Model: `RequestStatusStable` """ @@ -123,7 +140,10 @@ class DeleteImageGenerateRequest( BaseAIHordeRequest, JobRequestMixin, ): - """Represents a DELETE request to the `/v2/generate/status/{id}` endpoint.""" + """Request to cancel an image generation by ID. + + Represents a DELETE request to the /v2/generate/status/{id} endpoint. + """ @override @classmethod @@ -158,7 +178,15 @@ def __hash__(self) -> int: class ImageGenerateStatusRequest(BaseAIHordeRequest, JobRequestMixin): - """Represents a GET request to the `/v2/generate/status/{id}` endpoint.""" + """Request the status of an image generation by ID. + + Important: This is a rate limited endpoint. Prefer using the check endpoint when simply checking the status. + Use this endpoint when you need to get the resulting image data. + See :class:`horde_sdk.api_horde_api.apimodels.generate.check.ImageGenerateCheckRequest` + for more information. + + Represents a GET request to the /v2/generate/status/{id} endpoint. + """ @override @classmethod diff --git a/horde_sdk/ai_horde_api/apimodels/generate/_submit.py b/horde_sdk/ai_horde_api/apimodels/generate/submit.py similarity index 82% rename from horde_sdk/ai_horde_api/apimodels/generate/_submit.py rename to horde_sdk/ai_horde_api/apimodels/generate/submit.py index 8ace859c..0f91a6c7 100644 --- a/horde_sdk/ai_horde_api/apimodels/generate/_submit.py +++ b/horde_sdk/ai_horde_api/apimodels/generate/submit.py @@ -1,8 +1,9 @@ from __future__ import annotations +from typing import Literal, override + from loguru import logger from pydantic import model_validator -from typing_extensions import override from horde_sdk.ai_horde_api.apimodels.base import ( BaseAIHordeRequest, @@ -14,20 +15,25 @@ from horde_sdk.ai_horde_api.endpoints import AI_HORDE_API_ENDPOINT_SUBPATH from horde_sdk.consts import HTTPMethod from horde_sdk.generic_api.apimodels import APIKeyAllowedInRequestMixin +from horde_sdk.generic_api.decoration import Unequatable, Unhashable +@Unhashable +@Unequatable class ImageGenerationJobSubmitRequest( BaseAIHordeRequest, JobRequestMixin, APIKeyAllowedInRequestMixin, ): - """Represents the data needed to make a job submit 'request' from a worker to the /v2/generate/submit endpoint. + """Used when a worker submits a generation job. Includes metadata about the generation. + + Represents a POST request to the /v2/generate/submit endpoint. v2 API Model: `SubmitInputStable` """ - generation: str = "" - """R2 result was uploaded to R2, else the string of the result.""" + generation: str | Literal["R2"] = "" + """R2 result was uploaded to R2, else the string of the result as base64.""" state: GENERATION_STATE """The state of this generation.""" seed: int = 0 @@ -39,6 +45,7 @@ class ImageGenerationJobSubmitRequest( @model_validator(mode="after") def validate_generation(self) -> ImageGenerationJobSubmitRequest: + """Validate the generation field is not an empty string and warn if the seed is 0.""" if self.generation == "": logger.error("Generation cannot be an empty string.") logger.error(self.log_safe_model_dump()) diff --git a/horde_sdk/ai_horde_api/apimodels/generate/text/_async.py b/horde_sdk/ai_horde_api/apimodels/generate/text/async_.py similarity index 90% rename from horde_sdk/ai_horde_api/apimodels/generate/text/_async.py rename to horde_sdk/ai_horde_api/apimodels/generate/text/async_.py index 781dabb9..215933e3 100644 --- a/horde_sdk/ai_horde_api/apimodels/generate/text/_async.py +++ b/horde_sdk/ai_horde_api/apimodels/generate/text/async_.py @@ -1,8 +1,9 @@ from __future__ import annotations +from typing import override + from loguru import logger from pydantic import Field, model_validator -from typing_extensions import override from horde_sdk.ai_horde_api.apimodels.base import ( BaseAIHordeRequest, @@ -10,7 +11,7 @@ JobResponseMixin, SingleWarningEntry, ) -from horde_sdk.ai_horde_api.apimodels.generate.text._status import DeleteTextGenerateRequest, TextGenerateStatusRequest +from horde_sdk.ai_horde_api.apimodels.generate.text.status import DeleteTextGenerateRequest, TextGenerateStatusRequest from horde_sdk.ai_horde_api.endpoints import AI_HORDE_API_ENDPOINT_SUBPATH from horde_sdk.consts import _ANONYMOUS_MODEL, HTTPMethod, HTTPStatusCode from horde_sdk.generic_api.apimodels import ( @@ -31,6 +32,13 @@ class TextGenerateAsyncResponse( ResponseRequiringFollowUpMixin, ContainsMessageResponseMixin, ): + """The expected cost of the requested job and any warnings generated by the server. + + Represents the data returned from the /v2/generate/text/async endpoint with http status code 202. + + v2 API Model: `RequestAsync` + """ + kudos: float | None = Field( default=None, ) @@ -40,6 +48,7 @@ class TextGenerateAsyncResponse( @model_validator(mode="after") def validate_warnings(self) -> TextGenerateAsyncResponse: + """Log any warnings that were generated by the server or a serving worker.""" if self.warnings is None: return self @@ -54,6 +63,7 @@ def get_follow_up_returned_params(self, *, as_python_field_name: bool = False) - return [{"id_": self.id_}] return [{"id": self.id_}] + @override @classmethod def get_follow_up_default_request_type(cls) -> type[TextGenerateStatusRequest]: return TextGenerateStatusRequest @@ -82,12 +92,12 @@ def __eq__(self, __value: object) -> bool: return isinstance(__value, TextGenerateAsyncResponse) and self.id_ == __value.id_ -@Unhashable -class ModelPayloadRootKobold(HordeAPIData): +class _BasePayloadKoboldMixin(HordeAPIData): dynatemp_exponent: float | None = Field(1, ge=0.0, le=5.0) """Dynamic temperature exponent value.""" dynatemp_range: float | None = Field(0, ge=0.0, le=5.0) """Dynamic temperature range value.""" + frmtadsnsp: bool | None = Field( default=None, description=( @@ -132,24 +142,16 @@ class ModelPayloadRootKobold(HordeAPIData): ) """Output formatting option. When enabled, removes some characters from the end of the output such that the output doesn't end in the middle of a sentence. If the output is less than one sentence long, does nothing.""" - max_context_length: int | None = Field( - default=1024, - ge=80, - le=32000, - ) - """Maximum number of tokens to send to the model.""" - max_length: int | None = Field(80, ge=16, le=1024) - """Number of tokens to generate.""" + min_p: float | None = Field(0, ge=0.0, le=1.0) """Min-p sampling value.""" - n: int | None = Field(default=None, examples=[1], ge=1, le=20) - """The number of generations to produce.""" rep_pen: float | None = Field(default=None, ge=1.0, le=3.0) """Base repetition penalty value.""" rep_pen_range: int | None = Field(default=None, ge=0, le=4096) """Repetition penalty range.""" rep_pen_slope: float | None = Field(default=None, ge=0.0, le=10.0) """Repetition penalty slope.""" + sampler_order: list[int] | None = None """The sampler order to use for the generation.""" singleline: bool | None = Field( @@ -184,12 +186,35 @@ class ModelPayloadRootKobold(HordeAPIData): """When True, uses the default KoboldAI bad word IDs.""" +@Unhashable +class ModelPayloadRootKobold(_BasePayloadKoboldMixin): + """Base model for KoboldAI generation payloads.""" + + n: int | None = Field(default=None, examples=[1], ge=1, le=20) + """The number of generations to produce.""" + + max_length: int | None = Field(80, ge=16, le=1024) + """Number of tokens to generate.""" + + max_context_length: int | None = Field( + default=1024, + ge=80, + le=32000, + ) + """Maximum number of tokens to send to the model.""" + + @Unhashable class ModelGenerationInputKobold(ModelPayloadRootKobold): - pass + """API Parity intermediate model for generation input.""" class TextGenerateAsyncDryRunResponse(HordeResponseBaseModel): + """Represents the data returned from the /v2/generate/text/async endpoint with http status code 200. + + v2 API Model: `_ANONYMOUS_MODEL` + """ + kudos: float """The expected kudos consumption for this request.""" @@ -205,7 +230,7 @@ class TextGenerateAsyncRequest( APIKeyAllowedInRequestMixin, RequestUsesWorkerMixin, ): - """Represents the data needed to make a request to the `/v2/generate/async` endpoint. + """Represents a POST request to the /v2/generate/text/async endpoint. v2 API Model: `GenerationInputKobold` """ @@ -215,12 +240,6 @@ class TextGenerateAsyncRequest( prompt: str | None = None """The prompt which will be sent to KoboldAI to generate text.""" - allow_downgrade: bool | None = Field(False) - """When true and the request requires upfront kudos and the account does not have enough The request will be - downgraded in max context and max tokens so that it does not need upfront kudos.""" - disable_batching: bool | None = Field(False) - """When true, This request will not use batching. This will allow you to retrieve accurate seeds. - Feature is restricted to Trusted users and Patreons.""" extra_source_images: list[ExtraSourceImageEntry] | None = None """Any extra source images that should be used for this request; e.g., for multi-modal models.""" proxied_account: str | None = Field(default=None) @@ -231,9 +250,6 @@ class TextGenerateAsyncRequest( min_length=1, ) """Specify which softprompt needs to be used to service this request.""" - webhook: str | None = Field(default=None) - """Provide a URL where the AI Horde will send a POST call after each delivered generation. - The request will include the details of the job as well as the request ID.""" @override @classmethod diff --git a/horde_sdk/ai_horde_api/apimodels/generate/text/_pop.py b/horde_sdk/ai_horde_api/apimodels/generate/text/pop.py similarity index 72% rename from horde_sdk/ai_horde_api/apimodels/generate/text/_pop.py rename to horde_sdk/ai_horde_api/apimodels/generate/text/pop.py index 5143f01c..fdde9c71 100644 --- a/horde_sdk/ai_horde_api/apimodels/generate/text/_pop.py +++ b/horde_sdk/ai_horde_api/apimodels/generate/text/pop.py @@ -1,32 +1,45 @@ from __future__ import annotations import uuid +from typing import override import aiohttp from loguru import logger from pydantic import Field, field_validator, model_validator -from typing_extensions import override from horde_sdk.ai_horde_api.apimodels.base import BaseAIHordeRequest -from horde_sdk.ai_horde_api.apimodels.generate._pop import ExtraSourceImageMixin, NoValidRequestFound, PopInput -from horde_sdk.ai_horde_api.apimodels.generate.text._async import ModelPayloadRootKobold -from horde_sdk.ai_horde_api.apimodels.generate.text._status import DeleteTextGenerateRequest, TextGenerateStatusRequest +from horde_sdk.ai_horde_api.apimodels.generate.pop import ( + ExtraSourceImageMixin, + NoValidRequestFound, + PopInput, + PopResponseModelMessage, +) +from horde_sdk.ai_horde_api.apimodels.generate.text.async_ import ModelPayloadRootKobold +from horde_sdk.ai_horde_api.apimodels.generate.text.status import DeleteTextGenerateRequest, TextGenerateStatusRequest from horde_sdk.ai_horde_api.endpoints import AI_HORDE_API_ENDPOINT_SUBPATH -from horde_sdk.ai_horde_api.fields import JobID +from horde_sdk.ai_horde_api.fields import GenerationID from horde_sdk.consts import HTTPMethod from horde_sdk.generic_api.apimodels import ( APIKeyAllowedInRequestMixin, HordeResponseBaseModel, ResponseRequiringFollowUpMixin, ) +from horde_sdk.generic_api.decoration import Unequatable, Unhashable class ModelPayloadKobold(ModelPayloadRootKobold): + """Mixin for the model payload for Kobold.""" + prompt: str | None = None """The prompt for the text generation.""" class NoValidRequestFoundKobold(NoValidRequestFound): + """The number of jobs a worker was skipped for, and why. + + v2 API Model: `NoValidRequestFoundKobold` + """ + max_context_length: int | None = Field(default=None) """How many waiting requests were skipped because they demanded a higher max_context_length than what this worker provides.""" @@ -48,11 +61,21 @@ class TextGenerateJobPopResponse( ResponseRequiringFollowUpMixin, ExtraSourceImageMixin, ): + """Request additional jobs, if any are available, for a text generation worker. + + This is the key response type for all text generation workers as it contains all assignment + data for the worker. + + Represents the data returned from the /v2/generate/text/pop endpoint with http status code 200. + + v2 API Model: `GenerationPayloadKobold` + """ + payload: ModelPayloadKobold """The settings for this text generation.""" - id_: JobID | None = Field(default=None, alias="id") + id_: GenerationID | None = Field(default=None, alias="id") """The UUID for this text generation.""" - ids: list[JobID] + ids: list[GenerationID] """The UUIDs for this text generations.""" skipped: NoValidRequestFoundKobold = Field(NoValidRequestFoundKobold()) """The skipped requests that were not valid for this worker.""" @@ -63,11 +86,15 @@ class TextGenerateJobPopResponse( ttl: int | None = None """The amount of seconds before this job is considered stale and aborted.""" + messages: list[PopResponseModelMessage] | None = None + """The messages sent to this worker.""" + @field_validator("id_", mode="before") - def validate_id(cls, v: str | JobID) -> JobID | str: + def validate_id(cls, v: str | GenerationID) -> GenerationID | str: + """Validate the ID is not an empty string.""" if isinstance(v, str) and v == "": logger.warning("Job ID is empty") - return JobID(root=uuid.uuid4()) + return GenerationID(root=uuid.uuid4()) return v @@ -84,10 +111,6 @@ def ids_present(self) -> TextGenerateJobPopResponse: if self.id_ is None and len(self.ids) == 0: raise ValueError("Neither id_ nor ids were present in the response.") - if len(self.ids) > 1: - logger.debug("Sorting IDs") - self.ids.sort() - return self @override @@ -130,7 +153,7 @@ def __eq__(self, value: object) -> bool: def __hash__(self) -> int: if self.ids: - return hash(tuple(self.ids)) + return hash(tuple(sorted(self.ids))) return hash(self.id_) @@ -142,13 +165,29 @@ class _PopInputKobold(PopInput): """The max amount of context to submit to this AI for sampling.""" softprompts: list[str] | None = Field(default=None) """The available softprompt files on this worker for the currently running model.""" + extra_slow_worker: bool = Field(default=False) + """If this worker is extra slow and requires a longer timeout. + + Note that this reduces the pool of jobs available to this worker. + """ +@Unhashable +@Unequatable class TextGenerateJobPopRequest( BaseAIHordeRequest, APIKeyAllowedInRequestMixin, _PopInputKobold, ): + """Request additional jobs, if any are available, for a text worker. + + This is the key request type for all text workers as it contains all the parameters needed to request a job. + + Represents a POST request to the /v2/generate/text/pop endpoint. + + v2 API Model: `PopInputKobold` + """ + @override @classmethod def get_api_model_name(cls) -> str | None: diff --git a/horde_sdk/ai_horde_api/apimodels/generate/text/_status.py b/horde_sdk/ai_horde_api/apimodels/generate/text/status.py similarity index 78% rename from horde_sdk/ai_horde_api/apimodels/generate/text/_status.py rename to horde_sdk/ai_horde_api/apimodels/generate/text/status.py index eaf3094a..94b159f4 100644 --- a/horde_sdk/ai_horde_api/apimodels/generate/text/_status.py +++ b/horde_sdk/ai_horde_api/apimodels/generate/text/status.py @@ -1,19 +1,24 @@ import uuid +from typing import override from loguru import logger from pydantic import Field, field_validator -from typing_extensions import override from horde_sdk.ai_horde_api.apimodels.base import BaseAIHordeRequest, GenMetadataEntry, JobRequestMixin -from horde_sdk.ai_horde_api.apimodels.generate._progress import ResponseGenerationProgressInfoMixin -from horde_sdk.ai_horde_api.apimodels.generate._status import Generation +from horde_sdk.ai_horde_api.apimodels.generate.progress import ResponseGenerationProgressInfoMixin +from horde_sdk.ai_horde_api.apimodels.generate.status import Generation from horde_sdk.ai_horde_api.endpoints import AI_HORDE_API_ENDPOINT_SUBPATH -from horde_sdk.ai_horde_api.fields import JobID +from horde_sdk.ai_horde_api.fields import GenerationID from horde_sdk.consts import HTTPMethod from horde_sdk.generic_api.apimodels import HordeResponseBaseModel, ResponseWithProgressMixin class GenerationKobold(Generation): + """Represents a text generation, including its ID, seed, and text. + + v2 API Model: `GenerationKobold` + """ + id_: str | None = Field(default=None, title="Generation ID") """The ID for this generation.""" gen_metadata: list[GenMetadataEntry] | None = None # FIXME: API declares a `GenerationMetadataKobold` here @@ -29,10 +34,11 @@ def get_api_model_name(self) -> str | None: return "GenerationKobold" @field_validator("id_", mode="before") - def validate_id(cls, v: str | JobID) -> JobID | str: + def validate_id(cls, v: str | GenerationID) -> GenerationID | str: + """Validate the ID is not an empty string.""" if isinstance(v, str) and v == "": logger.warning("Job ID is empty") - return JobID(root=uuid.uuid4()) + return GenerationID(root=uuid.uuid4()) return v @@ -50,6 +56,15 @@ class TextGenerateStatusResponse( ResponseWithProgressMixin, ResponseGenerationProgressInfoMixin, ): + """The current status of a text generation request and the data if it is complete. + + Represents the data returned from the following endpoints and http status codes: + - /v2/generate/text/status/{id} | TextGenerateStatusRequest [GET] -> 200 + - /v2/generate/text/status/{id} | DeleteTextGenerateRequest [DELETE] -> 200 + + v2 API Model: `RequestStatusKobold` + """ + generations: list[GenerationKobold] = Field( default_factory=list, title="Generations", @@ -92,7 +107,10 @@ class DeleteTextGenerateRequest( BaseAIHordeRequest, JobRequestMixin, ): - """Represents a DELETE request to the `/v2/generate/text/status/{id}` endpoint.""" + """Request to cancel a text generation by ID. + + Represents a DELETE request to the /v2/generate/text/status/{id} endpoint. + """ @override @classmethod @@ -127,7 +145,10 @@ def __hash__(self) -> int: class TextGenerateStatusRequest(BaseAIHordeRequest, JobRequestMixin): - """Represents a GET request to the `/v2/generate/status/{id}` endpoint.""" + """Request the status of a text generation by ID. + + Represents a GET request to the /v2/generate/text/status/{id} endpoint. + """ @override @classmethod diff --git a/horde_sdk/ai_horde_api/apimodels/generate/text/_submit.py b/horde_sdk/ai_horde_api/apimodels/generate/text/submit.py similarity index 81% rename from horde_sdk/ai_horde_api/apimodels/generate/text/_submit.py rename to horde_sdk/ai_horde_api/apimodels/generate/text/submit.py index fd243135..6ad5c2f8 100644 --- a/horde_sdk/ai_horde_api/apimodels/generate/text/_submit.py +++ b/horde_sdk/ai_horde_api/apimodels/generate/text/submit.py @@ -1,8 +1,9 @@ from __future__ import annotations +from typing import override + from loguru import logger from pydantic import model_validator -from typing_extensions import override from horde_sdk.ai_horde_api.apimodels.base import ( BaseAIHordeRequest, @@ -14,16 +15,21 @@ from horde_sdk.ai_horde_api.endpoints import AI_HORDE_API_ENDPOINT_SUBPATH from horde_sdk.consts import HTTPMethod from horde_sdk.generic_api.apimodels import APIKeyAllowedInRequestMixin +from horde_sdk.generic_api.decoration import Unequatable, Unhashable +@Unhashable +@Unequatable class TextGenerationJobSubmitRequest( BaseAIHordeRequest, JobRequestMixin, APIKeyAllowedInRequestMixin, ): - """Represents the data needed to make a job submit 'request' from a worker to the /v2/generate/submit endpoint. + """Used when a worker submits a generation job. Includes metadata about the generation. + + Represents a POST request to the /v2/generate/text/submit endpoint. - v2 API Model: `SubmitInputStable` + v2 API Model: `SubmitInputKobold` """ generation: str = "" @@ -35,6 +41,7 @@ class TextGenerationJobSubmitRequest( @model_validator(mode="after") def validate_generation(self) -> TextGenerationJobSubmitRequest: + """Validate the generation field is not an empty string.""" if self.generation == "": logger.error("Generation cannot be an empty string.") logger.error(self.log_safe_model_dump()) diff --git a/horde_sdk/ai_horde_api/apimodels/kudos.py b/horde_sdk/ai_horde_api/apimodels/kudos.py new file mode 100644 index 00000000..e356543f --- /dev/null +++ b/horde_sdk/ai_horde_api/apimodels/kudos.py @@ -0,0 +1,126 @@ +from typing import override + +from horde_sdk.ai_horde_api.apimodels.base import BaseAIHordeRequest +from horde_sdk.ai_horde_api.endpoints import AI_HORDE_API_ENDPOINT_SUBPATH +from horde_sdk.consts import _ANONYMOUS_MODEL, HTTPMethod +from horde_sdk.generic_api.apimodels import APIKeyAllowedInRequestMixin, HordeResponseBaseModel + + +class KudosTransferResponse(HordeResponseBaseModel): + """The transferred amount of Kudos. + + Represents the data returned from the /v2/kudos/transfer endpoint with http status code 200. + + v2 API Model: `KudosTransferred` + """ + + transferred: float | None = None + """The amount of Kudos transferred.""" + + @override + @classmethod + def get_api_model_name(cls) -> str: + return "KudosTransferred" + + +class KudosTransferRequest( + BaseAIHordeRequest, + APIKeyAllowedInRequestMixin, +): + """Request to transfer Kudos to another user. Be sure to use their entire username (e.g., db0#1). + + Represents a POST request to the /v2/kudos/transfer endpoint. + + v2 API Model: `_ANONYMOUS_MODEL` + """ + + username: str + """The username of the user to transfer Kudos to, including the '#'. For example, 'db0#1'.""" + amount: float + """The amount of Kudos to transfer.""" + + @override + @classmethod + def get_api_model_name(cls) -> str: + return _ANONYMOUS_MODEL + + @override + @classmethod + def get_http_method(cls) -> HTTPMethod: + return HTTPMethod.POST + + @override + @classmethod + def get_api_endpoint_subpath(cls) -> AI_HORDE_API_ENDPOINT_SUBPATH: + return AI_HORDE_API_ENDPOINT_SUBPATH.v2_kudos_transfer + + @override + @classmethod + def get_default_success_response_type(cls) -> type[KudosTransferResponse]: + return KudosTransferResponse + + @override + @classmethod + def is_api_key_required(cls) -> bool: + return True + + +class KudosAwardResponse(HordeResponseBaseModel): + """The awarded amount of Kudos. + + Represents the data returned from the /v2/kudos/award endpoint with http status code 200. + + v2 API Model: `KudosAwarded` + """ + + awarded: float | None = None + """The amount of Kudos awarded.""" + + @override + @classmethod + def get_api_model_name(cls) -> str: + return "KudosAwarded" + + +class KudosAwardRequest( + BaseAIHordeRequest, + APIKeyAllowedInRequestMixin, +): + """Request to award Kudos to another user. Be sure to use their entire username (e.g., db0#1). + + This is a privileged endpoint that requires an admin API key. + + Represents a POST request to the /v2/kudos/award endpoint. + + v2 API Model: `_ANONYMOUS_MODEL` + """ + + username: str + """The username of the user to award Kudos to.""" + amount: float + """The amount of Kudos to award.""" + + @override + @classmethod + def get_api_model_name(cls) -> str: + return _ANONYMOUS_MODEL + + @override + @classmethod + def get_http_method(cls) -> HTTPMethod: + return HTTPMethod.POST + + @override + @classmethod + def get_api_endpoint_subpath(cls) -> AI_HORDE_API_ENDPOINT_SUBPATH: + return AI_HORDE_API_ENDPOINT_SUBPATH.v2_kudos_award + + @override + @classmethod + def get_default_success_response_type(cls) -> type[KudosAwardResponse]: + return KudosAwardResponse + + @override + @classmethod + def is_api_key_required(cls) -> bool: + return True diff --git a/horde_sdk/ai_horde_api/apimodels/operations.py b/horde_sdk/ai_horde_api/apimodels/operations.py new file mode 100644 index 00000000..a05ae2d3 --- /dev/null +++ b/horde_sdk/ai_horde_api/apimodels/operations.py @@ -0,0 +1,370 @@ +"""Definitions for the admin operations.""" + +from typing import override + +from horde_sdk.ai_horde_api.apimodels.base import BaseAIHordeRequest +from horde_sdk.ai_horde_api.endpoints import AI_HORDE_API_ENDPOINT_SUBPATH +from horde_sdk.consts import _ANONYMOUS_MODEL, HTTPMethod +from horde_sdk.generic_api.apimodels import ( + APIKeyAllowedInRequestMixin, + ContainsMessageResponseMixin, + HordeAPIObjectBaseModel, + HordeResponseBaseModel, + HordeResponseRootModel, +) +from horde_sdk.generic_api.decoration import Unequatable, Unhashable + + +class IPTimeout(HordeAPIObjectBaseModel): + """An IP address that is blocked and the number of seconds left in the timeout. + + v2 API Model: `IPTimeout` + """ + + ipaddr: str + """The IP address of the user to block.""" + seconds: int + """How many more seconds this IP block is in timeout.""" + + @override + @classmethod + def get_api_model_name(cls) -> str: + return "IPTimeout" + + +@Unhashable +@Unequatable +class IPTimeoutListResponse(HordeResponseRootModel[list[IPTimeout]]): + """A list of IP addresses that are blocked. + + Represents the data returned from the following endpoints and http status codes: + - /v2/operations/ipaddr | SingleIPTimeoutsRequest [GET] -> 200 + - /v2/operations/ipaddr | AllIPTimeoutsRequest [GET] -> 200 + + v2 API Model: `_ANONYMOUS_MODEL` + """ + + root: list[IPTimeout] + """The underlying list of IP addresses that are blocked.""" + + @override + @classmethod + def get_api_model_name(cls) -> str: + return _ANONYMOUS_MODEL + + +class AllIPTimeoutsRequest( + BaseAIHordeRequest, + APIKeyAllowedInRequestMixin, +): + """A request to get all IP addresses that are blocked. + + Represents a GET request to the /v2/operations/ipaddr endpoint. + """ + + @override + @classmethod + def get_api_model_name(cls) -> None: + return None + + @override + @classmethod + def get_http_method(cls) -> HTTPMethod: + return HTTPMethod.GET + + @override + @classmethod + def get_api_endpoint_subpath(cls) -> AI_HORDE_API_ENDPOINT_SUBPATH: + return AI_HORDE_API_ENDPOINT_SUBPATH.v2_operations_ipaddr + + @override + @classmethod + def get_default_success_response_type(cls) -> type[IPTimeoutListResponse]: + return IPTimeoutListResponse + + @override + @classmethod + def is_api_key_required(cls) -> bool: + return True + + +class SingleIPTimeoutsRequest( + BaseAIHordeRequest, + APIKeyAllowedInRequestMixin, +): + """A request to get a single IP address that is blocked. + + Represents a GET request to the /v2/operations/ipaddr endpoint. + """ + + @override + @classmethod + def get_api_model_name(cls) -> None: + return None + + @override + @classmethod + def get_http_method(cls) -> HTTPMethod: + return HTTPMethod.GET + + @override + @classmethod + def get_api_endpoint_subpath(cls) -> AI_HORDE_API_ENDPOINT_SUBPATH: + return AI_HORDE_API_ENDPOINT_SUBPATH.v2_operations_ipaddr + + @override + @classmethod + def get_default_success_response_type(cls) -> type[IPTimeoutListResponse]: + return IPTimeoutListResponse + + @override + @classmethod + def is_api_key_required(cls) -> bool: + return True + + +class BlockIPAddressResponse(HordeResponseBaseModel, ContainsMessageResponseMixin): + """Indicates that an IP address was successfully blocked. + + Represents the data returned from the /v2/operations/ipaddr endpoint with http status code 200. + + v2 API Model: `SimpleResponse` + """ + + @override + @classmethod + def get_api_model_name(cls) -> str: + return "SimpleResponse" + + +class BlockIPAddressRequest( + BaseAIHordeRequest, + APIKeyAllowedInRequestMixin, +): + """Establish a timeout for an IP address for a specified number of hours. + + Represents a POST request to the /v2/operations/ipaddr endpoint. + + v2 API Model: `AddTimeoutIPInput` + """ + + ipaddr: str + """The IP address of the user to block.""" + + hours: int + """The number of hours to block the IP address for.""" + + @override + @classmethod + def get_api_model_name(cls) -> str: + return "AddTimeoutIPInput" + + @override + @classmethod + def get_http_method(cls) -> HTTPMethod: + return HTTPMethod.POST + + @override + @classmethod + def get_api_endpoint_subpath(cls) -> AI_HORDE_API_ENDPOINT_SUBPATH: + return AI_HORDE_API_ENDPOINT_SUBPATH.v2_operations_ipaddr + + @override + @classmethod + def get_default_success_response_type(cls) -> type[BlockIPAddressResponse]: + return BlockIPAddressResponse + + @override + @classmethod + def is_api_key_required(cls) -> bool: + return True + + +class DeleteIPAddressResponse( + HordeResponseBaseModel, + ContainsMessageResponseMixin, +): + """Indicates that an IP address was successfully unblocked. + + Represents the data returned from the /v2/operations/ipaddr endpoint with http status code 200. + + v2 API Model: `SimpleResponse` + """ + + @override + @classmethod + def get_api_model_name(cls) -> str: + return "SimpleResponse" + + +class DeleteIPAddressRequest( + BaseAIHordeRequest, + APIKeyAllowedInRequestMixin, +): + """Remove a timeout for an IP address. + + Represents a DELETE request to the /v2/operations/ipaddr endpoint. + + v2 API Model: `DeleteTimeoutIPInput` + """ + + ipaddr: str + """The IP address of the user to unblock.""" + + @override + @classmethod + def get_api_model_name(cls) -> str: + return "DeleteTimeoutIPInput" + + @override + @classmethod + def get_http_method(cls) -> HTTPMethod: + return HTTPMethod.DELETE + + @override + @classmethod + def get_api_endpoint_subpath(cls) -> AI_HORDE_API_ENDPOINT_SUBPATH: + return AI_HORDE_API_ENDPOINT_SUBPATH.v2_operations_ipaddr + + @override + @classmethod + def get_default_success_response_type(cls) -> type[DeleteIPAddressResponse]: + return DeleteIPAddressResponse + + @override + @classmethod + def is_api_key_required(cls) -> bool: + return True + + +class BlockWorkerIPAddressResponse( + HordeResponseBaseModel, + ContainsMessageResponseMixin, +): + """Indicates that a worker's IP address was successfully blocked. + + Represents the data returned from the /v2/operations/block_worker_ipaddr/{worker_id} endpoint with http status + code 200. + + v2 API Model: `SimpleResponse` + """ + + @override + @classmethod + def get_api_model_name(cls) -> str: + return "SimpleResponse" + + +class BlockWorkerIPAddressRequest( + BaseAIHordeRequest, + APIKeyAllowedInRequestMixin, +): + """Request to block a worker's IP address for a specified number of days. + + Represents a PUT request to the /v2/operations/block_worker_ipaddr/{worker_id} endpoint. + + v2 API Model: `AddWorkerTimeout` + """ + + worker_id: str + """The ID of the worker to block.""" + + days: int + """The number of days to block the worker IP address for.""" + + @override + @classmethod + def get_api_model_name(cls) -> str: + return "AddWorkerTimeout" + + @override + @classmethod + def get_http_method(cls) -> HTTPMethod: + return HTTPMethod.PUT + + @override + @classmethod + def get_api_endpoint_subpath(cls) -> AI_HORDE_API_ENDPOINT_SUBPATH: + return AI_HORDE_API_ENDPOINT_SUBPATH.v2_operations_block_worker_ipaddr_single + + @override + @classmethod + def get_default_success_response_type(cls) -> type[BlockWorkerIPAddressResponse]: + return BlockWorkerIPAddressResponse + + @override + @classmethod + def is_api_key_required(cls) -> bool: + return True + + +class DeleteWorkerIPAddressResponse( + HordeResponseBaseModel, + ContainsMessageResponseMixin, +): + """Indicates that a worker's IP address was successfully unblocked. + + Represents the data returned from the /v2/operations/block_worker_ipaddr/{worker_id} endpoint with http status + code 200. + + v2 API Model: `SimpleResponse` + """ + + @override + @classmethod + def get_api_model_name(cls) -> str: + return "SimpleResponse" + + +class DeleteWorkerIPAddressRequest( + BaseAIHordeRequest, + APIKeyAllowedInRequestMixin, +): + """Request to unblock a worker's IP address. + + Represents a DELETE request to the /v2/operations/block_worker_ipaddr/{worker_id} endpoint. + """ + + worker_id: str + """The ID of the worker to unblock.""" + + @override + @classmethod + def get_api_model_name(cls) -> None: + return None + + @override + @classmethod + def get_http_method(cls) -> HTTPMethod: + return HTTPMethod.DELETE + + @override + @classmethod + def get_api_endpoint_subpath(cls) -> AI_HORDE_API_ENDPOINT_SUBPATH: + return AI_HORDE_API_ENDPOINT_SUBPATH.v2_operations_block_worker_ipaddr_single + + @override + @classmethod + def get_default_success_response_type(cls) -> type[DeleteWorkerIPAddressResponse]: + return DeleteWorkerIPAddressResponse + + @override + @classmethod + def is_api_key_required(cls) -> bool: + return True + + +__all__ = [ + "AllIPTimeoutsRequest", + "BlockIPAddressRequest", + "BlockIPAddressResponse", + "BlockWorkerIPAddressRequest", + "BlockWorkerIPAddressResponse", + "DeleteIPAddressRequest", + "DeleteIPAddressResponse", + "DeleteWorkerIPAddressRequest", + "DeleteWorkerIPAddressResponse", + "IPTimeout", + "IPTimeoutListResponse", + "SingleIPTimeoutsRequest", +] diff --git a/horde_sdk/ai_horde_api/apimodels/sharedkeys.py b/horde_sdk/ai_horde_api/apimodels/sharedkeys.py new file mode 100644 index 00000000..e6e2592e --- /dev/null +++ b/horde_sdk/ai_horde_api/apimodels/sharedkeys.py @@ -0,0 +1,269 @@ +from typing import override + +from pydantic import Field, field_validator + +from horde_sdk.ai_horde_api.apimodels.base import BaseAIHordeRequest, MessageSpecifiesSharedKeyMixin +from horde_sdk.ai_horde_api.endpoints import AI_HORDE_API_ENDPOINT_SUBPATH +from horde_sdk.consts import _OVERLOADED_MODEL, HTTPMethod +from horde_sdk.generic_api.apimodels import ( + APIKeyAllowedInRequestMixin, + ContainsMessageResponseMixin, + HordeAPIObjectBaseModel, + HordeResponseBaseModel, +) + + +class _BaseSharedKeySettings(HordeAPIObjectBaseModel): + kudos: int + """The Kudos limit assigned to this key.""" + name: str + """The Shared Key Name.""" + max_image_pixels: int + """The maximum amount of image pixels this key can generate per job. -1 means unlimited.""" + max_image_steps: int + """The maximum amount of image steps this key can use per job. -1 means unlimited.""" + max_text_tokens: int + """The maximum amount of text tokens this key can generate per job. -1 means unlimited.""" + + @field_validator("max_image_pixels", "max_image_steps", "max_text_tokens", mode="before") + @classmethod + def validate_restriction_values(cls, v: int) -> int: + """Validate the restriction values. + + Args: + v (int): The restriction value. + + Raises: + ValueError: If the restriction value is invalid. + + Returns: + int: The restriction value. + """ + if v < -1: + raise ValueError("Restriction values must be -1 or greater.") + return v + + +class SharedKeySettings(_BaseSharedKeySettings): + """Represents the settings for a SharedKey. + + v2 API Model: `SharedKeyInput` + """ + + expiry: int = Field(default=-1, ge=-1) + """The number of days until this key expires. -1 means never expires.""" + + +class SharedKeyDetailsResponse(HordeResponseBaseModel, MessageSpecifiesSharedKeyMixin, SharedKeySettings): + """Information about a SharedKey, including its creating user, settings and utilization. + + The data returned in this response can vary depending on the user's permissions, (creator, owner, or admin). + + Represents the data returned from the following endpoints and http status codes: + - /v2/sharedkeys/{sharedkey_id} | SharedKeyModifyRequest [PATCH] -> 200 + - /v2/sharedkeys/{sharedkey_id} | SharedKeyDetailsRequest [GET] -> 200 + - /v2/sharedkeys | SharedKeyCreateRequest [PUT] -> 200 + + v2 API Model: `SharedKeyDetails` + """ + + username: str + """The owning user's unique Username. It is a combination of their chosen alias plus their ID.""" + utilized: int + """How much kudos has been utilized via this shared key until now.""" + + @override + @classmethod + def get_api_model_name(cls) -> str | None: + return "SharedKeyDetails" + + @field_validator("max_image_pixels", "max_image_steps", "max_text_tokens", mode="before") + @classmethod + def validate_restriction_values(cls, v: int) -> int: + """Validate the restriction values. + + Args: + v (int): The restriction value. + + Raises: + ValueError: If the restriction value is invalid. + + Returns: + int: The restriction value. + """ + if v < -1: + raise ValueError("Restriction values must be -1 or greater.") + return v + + +class ExpiryStrSharedKeyDetailsResponse( + HordeResponseBaseModel, + MessageSpecifiesSharedKeyMixin, + _BaseSharedKeySettings, +): + """The shared key details for a style. + + Represents the data returned from the following endpoints and http status codes: + - /v2/sharedkeys/{sharedkey_id} | SharedKeyModifyRequest [PATCH] -> 200 + - /v2/sharedkeys/{sharedkey_id} | SharedKeyDetailsRequest [GET] -> 200 + - /v2/sharedkeys | SharedKeyCreateRequest [PUT] -> 200 + + v2 API Model: `_MODEL_OVERLOADED` + """ + + expiry: str | None = None # FIXME - duplicated in SharedKeyDetailsResponse due to overloaded model + """The expiry date of the shared key.""" + username: str # FIXME - duplicated in SharedKeyDetailsResponse due to overloaded model + """The owning user's unique Username. It is a combination of their chosen alias plus their ID.""" + utilized: int # FIXME - duplicated in SharedKeyDetailsResponse due to overloaded model + """How much kudos has been utilized via this shared key until now.""" + + @override + @classmethod + def get_api_model_name(cls) -> str: + return _OVERLOADED_MODEL + + +class SharedKeyDetailsRequest(BaseAIHordeRequest, MessageSpecifiesSharedKeyMixin): + """Request the details of a SharedKey, including its creating user, settings, and utilization. + + The response data can vary depending on the user's permissions, (e.g., if they are a creator, owner, or admin). + + Represents a GET request to the /v2/sharedkeys/{sharedkey_id} endpoint. + """ + + @override + @classmethod + def get_api_model_name(cls) -> str | None: + return None + + @override + @classmethod + def get_http_method(cls) -> HTTPMethod: + return HTTPMethod.GET + + @override + @classmethod + def get_api_endpoint_subpath(cls) -> AI_HORDE_API_ENDPOINT_SUBPATH: + return AI_HORDE_API_ENDPOINT_SUBPATH.v2_sharedkeys + + @override + @classmethod + def get_default_success_response_type(cls) -> type[ExpiryStrSharedKeyDetailsResponse]: + return ExpiryStrSharedKeyDetailsResponse + + +class SharedKeyDeleteResponse(HordeResponseBaseModel, ContainsMessageResponseMixin): + """Indicates that a SharedKey was successfully deleted. + + Represents the data returned from the /v2/sharedkeys/{sharedkey_id} endpoint with http status code 200. + + v2 API Model: `SimpleResponse` + """ + + @override + @classmethod + def get_api_model_name(cls) -> str | None: + return "SimpleResponse" + + +class SharedKeyDeleteRequest( + BaseAIHordeRequest, + MessageSpecifiesSharedKeyMixin, + APIKeyAllowedInRequestMixin, +): + """Request to delete a SharedKey. + + This is a privileged operation that requires the user to be the owner, a moderator, or an admin. + + Represents a DELETE request to the /v2/sharedkeys/{sharedkey_id} endpoint. + """ + + @override + @classmethod + def get_api_model_name(cls) -> str | None: + return None + + @override + @classmethod + def get_http_method(cls) -> HTTPMethod: + return HTTPMethod.DELETE + + @override + @classmethod + def get_api_endpoint_subpath(cls) -> AI_HORDE_API_ENDPOINT_SUBPATH: + return AI_HORDE_API_ENDPOINT_SUBPATH.v2_sharedkeys + + @override + @classmethod + def get_default_success_response_type(cls) -> type[SharedKeyDeleteResponse]: + return SharedKeyDeleteResponse + + +class SharedKeyModifyRequest( + BaseAIHordeRequest, + SharedKeySettings, + MessageSpecifiesSharedKeyMixin, + APIKeyAllowedInRequestMixin, +): + """Request to modify a SharedKey. + + This is a privileged operation that requires the user to be the owner, a moderator, or an admin. + + Represents a PATCH request to the /v2/sharedkeys/{sharedkey_id} endpoint. + + v2 API Model: `SharedKeyInput` + """ + + @override + @classmethod + def get_api_model_name(cls) -> str | None: + return "SharedKeyInput" + + @override + @classmethod + def get_http_method(cls) -> HTTPMethod: + return HTTPMethod.PATCH + + @override + @classmethod + def get_api_endpoint_subpath(cls) -> AI_HORDE_API_ENDPOINT_SUBPATH: + return AI_HORDE_API_ENDPOINT_SUBPATH.v2_sharedkeys + + @override + @classmethod + def get_default_success_response_type(cls) -> type[ExpiryStrSharedKeyDetailsResponse]: + return ExpiryStrSharedKeyDetailsResponse + + +class SharedKeyCreateRequest( + BaseAIHordeRequest, + SharedKeySettings, + APIKeyAllowedInRequestMixin, +): + """Request to create a new SharedKey. + + Represents a PUT request to the /v2/sharedkeys endpoint. + + v2 API Model: `SharedKeyInput` + """ + + @override + @classmethod + def get_api_model_name(cls) -> str | None: + return "SharedKeyInput" + + @override + @classmethod + def get_http_method(cls) -> HTTPMethod: + return HTTPMethod.PUT + + @override + @classmethod + def get_api_endpoint_subpath(cls) -> AI_HORDE_API_ENDPOINT_SUBPATH: + return AI_HORDE_API_ENDPOINT_SUBPATH.v2_sharedkeys_create + + @override + @classmethod + def get_default_success_response_type(cls) -> type[ExpiryStrSharedKeyDetailsResponse]: + return ExpiryStrSharedKeyDetailsResponse diff --git a/horde_sdk/ai_horde_api/apimodels/_stats.py b/horde_sdk/ai_horde_api/apimodels/stats.py similarity index 85% rename from horde_sdk/ai_horde_api/apimodels/_stats.py rename to horde_sdk/ai_horde_api/apimodels/stats.py index 0a8876f3..eaccdc23 100644 --- a/horde_sdk/ai_horde_api/apimodels/_stats.py +++ b/horde_sdk/ai_horde_api/apimodels/stats.py @@ -1,9 +1,9 @@ from enum import auto +from typing import override from loguru import logger -from pydantic import ConfigDict, Field, field_validator +from pydantic import Field, field_validator from strenum import StrEnum -from typing_extensions import override from horde_sdk.ai_horde_api.apimodels.base import BaseAIHordeRequest from horde_sdk.ai_horde_api.consts import MODEL_STATE @@ -14,6 +14,8 @@ class StatsModelsTimeframe(StrEnum): + """A timeframe for stats data.""" + day = auto() month = auto() total = auto() @@ -22,7 +24,9 @@ class StatsModelsTimeframe(StrEnum): @Unequatable @Unhashable class ImageStatsModelsResponse(HordeResponseBaseModel): - """Represents the data returned from the `/v2/stats/img/models` endpoint. + """Statistics for image model usage. + + Represents the data returned from the /v2/stats/img/models endpoint with http status code 200. v2 API Model: `ImgModelStats` """ @@ -90,14 +94,14 @@ def get_timeframe(self, timeframe: StatsModelsTimeframe) -> dict[str, int]: class ImageStatsModelsRequest(BaseAIHordeRequest): - """Represents the data needed to make a request to the `/v2/stats/img/models` endpoint.""" + """Request the usage statistics for image models. - model_config = ConfigDict( - protected_namespaces=(), # Allows the "model_" prefix on attrs - ) + Represents a GET request to the /v2/stats/img/models endpoint. + """ - model_state: MODEL_STATE = Field( + api_model_state: MODEL_STATE = Field( default=MODEL_STATE.all, + alias="model_state", ) """The state of the models to get stats for. Known models are models that are known to the system.""" @@ -152,7 +156,12 @@ def get_api_model_name(cls) -> str | None: class ImageStatsModelsTotalResponse(HordeResponseBaseModel): - """Represents the data returned from the `/v2/stats/img/totals` endpoint.""" + """Summary statistics for all image models. + + Represents the data returned from the /v2/stats/img/totals endpoint with http status code 200. + + v2 API Model: `StatsImgTotals` + """ day: SinglePeriodImgStat | None = None """The total stats for the past day.""" @@ -172,7 +181,10 @@ def get_api_model_name(cls) -> str | None: class ImageStatsModelsTotalRequest(BaseAIHordeRequest): - """Represents the data needed to make a request to the `/v2/stats/img/totals` endpoint.""" + """Request summary usage statistics across all image models. + + Represents a GET request to the /v2/stats/img/totals endpoint. + """ @override @classmethod @@ -197,7 +209,12 @@ def get_default_success_response_type(cls) -> type[ImageStatsModelsTotalResponse @Unhashable class TextStatsModelResponse(HordeResponseBaseModel): - """Represents the data returned from the `/v2/stats/text/models` endpoint.""" + """Statistics for text model usage. + + Represents the data returned from the /v2/stats/text/models endpoint with http status code 200. + + v2 API Model: `TxtModelStats` + """ day: dict[str, int] """The stats for the past day.""" @@ -240,7 +257,10 @@ def get_api_model_name(cls) -> str | None: class TextStatsModelsRequest(BaseAIHordeRequest): - """Represents the data needed to make a request to the `/v2/stats/text/models` endpoint.""" + """Request the usage statistics for text models. + + Represents a GET request to the /v2/stats/text/models endpoint. + """ @override @classmethod @@ -286,7 +306,12 @@ def get_api_model_name(cls) -> str | None: @Unhashable class TextStatsModelsTotalResponse(HordeResponseBaseModel): - """Represents the data returned from the `/v2/stats/text/totals` endpoint.""" + """Summary statistics for all text models. + + Represents the data returned from the /v2/stats/text/totals endpoint with http status code 200. + + v2 API Model: `StatsTxtTotals` + """ minute: dict[str, int] """The total stats for the past minute.""" @@ -306,7 +331,10 @@ def get_api_model_name(cls) -> str | None: class TextStatsModelsTotalRequest(BaseAIHordeRequest): - """Represents the data needed to make a request to the `/v2/stats/text/totals` endpoint.""" + """Summary usage statistics across all text models. + + Represents a GET request to the /v2/stats/text/totals endpoint. + """ @override @classmethod diff --git a/horde_sdk/ai_horde_api/apimodels/_status.py b/horde_sdk/ai_horde_api/apimodels/status.py similarity index 81% rename from horde_sdk/ai_horde_api/apimodels/_status.py rename to horde_sdk/ai_horde_api/apimodels/status.py index f5790b0b..245c040c 100644 --- a/horde_sdk/ai_horde_api/apimodels/_status.py +++ b/horde_sdk/ai_horde_api/apimodels/status.py @@ -1,32 +1,46 @@ from collections.abc import Iterator +from typing import override -from pydantic import ConfigDict, Field -from typing_extensions import override +from pydantic import Field -from horde_sdk.ai_horde_api.apimodels.base import BaseAIHordeRequest +from horde_sdk.ai_horde_api.apimodels.base import ActiveModel, BaseAIHordeRequest from horde_sdk.ai_horde_api.consts import MODEL_STATE, MODEL_TYPE from horde_sdk.ai_horde_api.endpoints import AI_HORDE_API_ENDPOINT_SUBPATH -from horde_sdk.consts import HTTPMethod +from horde_sdk.consts import _ANONYMOUS_MODEL, HTTPMethod from horde_sdk.generic_api.apimodels import ( ContainsMessageResponseMixin, HordeAPIObjectBaseModel, HordeResponseBaseModel, HordeResponseRootModel, ) -from horde_sdk.generic_api.decoration import Unhashable +from horde_sdk.generic_api.decoration import Unequatable, Unhashable class AIHordeHeartbeatResponse(HordeResponseBaseModel, ContainsMessageResponseMixin): + """Returns the status of the AI Horde API and a message if present. + + Represents the data returned from the /v2/status/heartbeat endpoint with http status code 200. + + v2 API Model: `_ANONYMOUS_MODEL` + """ + version: str """The version of the AI Horde API that this node is running.""" @override @classmethod def get_api_model_name(cls) -> str | None: - return None + return _ANONYMOUS_MODEL class AIHordeHeartbeatRequest(BaseAIHordeRequest): + """Request a heartbeat from the AI Horde API, suitable for checking if the API is up and running. + + These requests may also return other meta information, such as the version of the API. + + Represents a GET request to the /v2/status/heartbeat endpoint. + """ + @override @classmethod def get_api_model_name(cls) -> str | None: @@ -49,6 +63,13 @@ def get_default_success_response_type(cls) -> type[AIHordeHeartbeatResponse]: class HordePerformanceResponse(HordeResponseBaseModel): + """Information about the performance of the horde, such as worker counts and queue sizes. + + Represents the data returned from the /v2/status/performance endpoint with http status code 200. + + v2 API Model: `HordePerformance` + """ + interrogator_count: int | None = Field( default=None, description=( @@ -127,6 +148,11 @@ def get_api_model_name(cls) -> str | None: class HordePerformanceRequest(BaseAIHordeRequest): + """Request performance information about the horde, such as worker counts and queue sizes. + + Represents a GET request to the /v2/status/performance endpoint. + """ + @override @classmethod def get_api_model_name(cls) -> str | None: @@ -149,6 +175,11 @@ def get_default_success_response_type(cls) -> type[HordePerformanceResponse]: class Newspiece(HordeAPIObjectBaseModel): + """A piece of news from the horde, such as updates or other news from the AI-Horde team. + + v2 API Model: `Newspiece` + """ + date_published: str | None = Field( default=None, ) @@ -179,7 +210,13 @@ def get_api_model_name(cls) -> str | None: @Unhashable +@Unequatable class NewsResponse(HordeResponseRootModel[list[Newspiece]]): + """A list of newspieces from the horde, which are updates or other news from the AI-Horde team. + + Represents the data returned from the /v2/status/news endpoint with http status code 200. + """ + root: list[Newspiece] """The underlying list of newspieces.""" @@ -201,6 +238,11 @@ def __eq__(self, other: object) -> bool: class NewsRequest(BaseAIHordeRequest): + """Request news from the horde, such as updates or other news from the AI-Horde team. + + Represents a GET request to the /v2/status/news endpoint. + """ + @override @classmethod def get_api_model_name(cls) -> str | None: @@ -222,53 +264,14 @@ def get_default_success_response_type(cls) -> type[NewsResponse]: return NewsResponse -class ActiveModelLite(HordeAPIObjectBaseModel): - count: int | None = Field( - default=None, - ) - """How many of workers in this horde are running this model.""" - name: str | None = Field( - default=None, - ) - """The Name of a model available by workers in this horde.""" - - @override - @classmethod - def get_api_model_name(cls) -> str | None: - return "ActiveModelLite" - - -class ActiveModel(ActiveModelLite): - eta: int | None = Field( - default=None, - ) - """Estimated time in seconds for this model's queue to be cleared.""" - jobs: float | None = Field( - default=None, - ) - """The job count waiting to be generated by this model.""" - performance: float | None = Field( - default=None, - ) - """The average speed of generation for this model.""" - queued: float | None = Field( - default=None, - ) - """The amount waiting to be generated by this model.""" - type_: MODEL_TYPE | None = Field( - examples=[MODEL_TYPE.image, MODEL_TYPE.text], - alias="type", - ) - """The model type (text or image).""" - - @override - @classmethod - def get_api_model_name(cls) -> str | None: - return "ActiveModel" - - @Unhashable +@Unequatable class HordeStatusModelsAllResponse(HordeResponseRootModel[list[ActiveModel]]): + """A list of details about active models in the horde. + + Represents the data returned from the /v2/status/models endpoint with http status code 200. + """ + root: list[ActiveModel] """The underlying list of models.""" @@ -290,9 +293,10 @@ def __eq__(self, other: object) -> bool: class HordeStatusModelsAllRequest(BaseAIHordeRequest): - model_config = ConfigDict( - protected_namespaces=(), # Allows the "model_" prefix on attrs - ) + """Request details about models in the horde. + + Represents a GET request to the /v2/status/models endpoint. + """ type_: MODEL_TYPE = Field( default=MODEL_TYPE.image, @@ -337,7 +341,13 @@ def get_query_fields(cls) -> list[str]: @Unhashable +@Unequatable class HordeStatusModelsSingleResponse(HordeResponseRootModel[list[ActiveModel]]): + """A list of details about a single active model in the horde. + + Represents the data returned from the /v2/status/models/{model_name} endpoint with http status code 200. + """ + # This is a list because of an oversight in the structure of the API response. # FIXME root: list[ActiveModel] @@ -361,9 +371,10 @@ def __eq__(self, other: object) -> bool: class HordeStatusModelsSingleRequest(BaseAIHordeRequest): - model_config = ConfigDict( - protected_namespaces=(), # Allows the "model_" prefix on attrs - ) + """Request details about a single model in the horde by its name. + + Represents a GET request to the /v2/status/models/{model_name} endpoint. + """ model_name: str """The name of the model to request.""" @@ -390,6 +401,11 @@ def get_default_success_response_type(cls) -> type[HordeStatusModelsSingleRespon class HordeModes(HordeAPIObjectBaseModel): + """The current modes of the horde, such as maintenance mode, invite-only mode, and raid mode. + + v2 API Model: `HordeModes` + """ + maintenance_mode: bool = Field( default=False, ) diff --git a/horde_sdk/ai_horde_api/apimodels/styles.py b/horde_sdk/ai_horde_api/apimodels/styles.py new file mode 100644 index 00000000..a110675e --- /dev/null +++ b/horde_sdk/ai_horde_api/apimodels/styles.py @@ -0,0 +1,965 @@ +from enum import auto +from typing import Literal, override + +from pydantic import Field +from strenum import StrEnum + +from horde_sdk.ai_horde_api.apimodels.base import ( + BaseAIHordeRequest, + _BaseImageGenerateParamMixin, +) +from horde_sdk.ai_horde_api.apimodels.generate.text.async_ import _BasePayloadKoboldMixin +from horde_sdk.ai_horde_api.apimodels.sharedkeys import ( + ExpiryStrSharedKeyDetailsResponse, +) +from horde_sdk.ai_horde_api.endpoints import AI_HORDE_API_ENDPOINT_SUBPATH +from horde_sdk.consts import _ANONYMOUS_MODEL, HTTPMethod +from horde_sdk.generic_api.apimodels import ( + APIKeyAllowedInRequestMixin, + ContainsMessageResponseMixin, + ContainsWarningsResponseMixin, + HordeAPIObjectBaseModel, + HordeResponseBaseModel, + HordeResponseRootModel, +) +from horde_sdk.generic_api.decoration import Unequatable, Unhashable + + +class StyleType(StrEnum): + """An enum representing the different types of styles.""" + + image = auto() + text = auto() + + +class ResponseModelStylesUser(HordeAPIObjectBaseModel): + """Represents a style created by a user. + + v2 API Model: `ResponseModelStylesUser` + """ + + name: str + """The name of the style.""" + id_: str = Field(alias="id") + """The ID of the style.""" + type_: StyleType = Field(alias="type") + """The type of the style.""" + + @override + @classmethod + def get_api_model_name(cls) -> str: + return "ResponseModelStylesUser" + + +class StyleExample(HordeAPIObjectBaseModel): + """Represents an example of an image generated by a style. + + v2 API Model: `StyleExample` + """ + + url: str = Field( + examples=[ + "https://lemmy.dbzer0.com/pictrs/image/c9915186-ca30-4f5a-873c-a91287fb4419.webp", + ], + ) + """The URL of the image generated by this style.""" + + primary: bool = False + """When true this image is to be used as the primary example for this style.""" + + id_: str = Field(alias="id") + """The UUID of this example.""" + + @override + @classmethod + def get_api_model_name(cls) -> str: + return "StyleExample" + + +class ModelStyleInputParamsStable(_BaseImageGenerateParamMixin): + """The default parameters to use for all generations using a particular style. + + v2 API Model: `ModelStyleInputParamsStable` + """ + + steps: int = Field( + default=20, + examples=[ + 20, + ], + ) + """The number of steps to use for the generation.""" + + @override + @classmethod + def get_api_model_name(cls) -> str: + return "ModelStyleInputParamsStable" + + +@Unhashable +@Unequatable +class _StyleMixin(HordeAPIObjectBaseModel): + """A mixin representing the common fields of a style.""" + + name: str + """The name of the style.""" + + info: str | None = Field( + default=None, + examples=[ + "photorealism excellence.", + ], + ) + """Extra information or comments about this style provided by its creator.""" + + prompt: str + """The prompt template which will be sent to generate an image. + + The user's prompt will be injected into this. This argument MUST include a '{p}' which specifies the part where \ + the user's prompt will be injected and an '{np}' where the user's negative prompt will be injected (if any)""" + + public: bool = True + """When true this style will be listed among all styles publicly. + + When false, information about this style can only be seen by people who know its ID or name.""" + + nsfw: bool = False + """When true, it signified this style is expected to generate NSFW images primarily.""" + + tags: list[str] | None = Field( + default=None, + examples=[ + "photorealistic", + ], + ) + """Tags associated with this style.""" + + models: list[str] | None = None + """The models which this style will attempt to use.""" + + +class _StyleResponseMixin(_StyleMixin): + """A mixin representing the common fields of a style endpoint response.""" + + id_: str = Field(alias="id") + """The UUID of the style. Use this to use the style or retrieve its information in the future.""" + + creator: str | None = Field( + default=None, + examples=["db0#1"], + ) + """The alias of the user which created this style.""" + + use_count: int | None = None + """The amount of times this style has been used in generations.""" + + sharedkey: str | None = Field( + default=None, + examples=[ + "00000000-0000-0000-0000-000000000000", + ], + min_length=36, + max_length=36, + ) + """The UUID of a shared key which will be used to fulfil this style when active.""" + + +@Unhashable +@Unequatable +class StyleStable(HordeResponseBaseModel, _StyleResponseMixin): + """The details of a style, including its parameters and examples. + + Represents the data returned from the following endpoints and http status codes: + - /v2/styles/image_by_name/{style_name} | SingleStyleImageByNameRequest [GET] -> 200 + - /v2/styles/image/{style_id} | SingleStyleImageByIDRequest [GET] -> 200 + + v2 API Model: `StyleStable` + """ + + params: ModelStyleInputParamsStable | None = None + """The parameters to use for all generations using this style, if not set by the user.""" + + examples: list[StyleExample] | None = None + """A list of examples of images generated by this style.""" + shared_key: ExpiryStrSharedKeyDetailsResponse | None = None + """The shared key backing this style, if any.""" + + @override + @classmethod + def get_api_model_name(cls) -> str: + return "StyleStable" + + +@Unhashable +@Unequatable +class AllStylesImageResponse(HordeResponseRootModel[list[StyleStable]]): + """The a list of styles. + + Represents the data returned from the /v2/styles/image endpoint with http status code 200. + + v2 API Model: `_ANONYMOUS_MODEL` + """ + + root: list[StyleStable] + """The underlying list of styles.""" + + @override + @classmethod + def get_api_model_name(cls) -> str: + return _ANONYMOUS_MODEL + + +class AllStylesImageRequest( + BaseAIHordeRequest, +): + """Request to get image styles. Use `page` to paginate through the results. + + Represents a GET request to the /v2/styles/image endpoint. + + v2 API Model: `_ANONYMOUS_MODEL` + """ + + sort: Literal["popular", "age"] = "popular" + """The sort order of the styles.""" + + page: int = 1 + """The page of styles to retrieve. Each page has 25 styles.""" + + tag: str | None = None + """If specified, return only styles with this tag.""" + + model: str | None = None + """If specified, return only styles which use this model.""" + + @override + @classmethod + def get_api_model_name(cls) -> str: + return _ANONYMOUS_MODEL + + @override + @classmethod + def get_http_method(cls) -> HTTPMethod: + return HTTPMethod.GET + + @override + @classmethod + def get_query_fields(cls) -> list[str]: + return ["sort", "page", "tag", "model"] + + @override + @classmethod + def get_api_endpoint_subpath(cls) -> AI_HORDE_API_ENDPOINT_SUBPATH: + return AI_HORDE_API_ENDPOINT_SUBPATH.v2_styles_image + + @override + @classmethod + def get_default_success_response_type(cls) -> type[AllStylesImageResponse]: + return AllStylesImageResponse + + +class SingleStyleImageByIDRequest( + BaseAIHordeRequest, +): + """Request to get a single image style by its ID. + + Represents a GET request to the /v2/styles/image/{style_id} endpoint. + """ + + style_id: str + """The ID of the style to retrieve.""" + + @override + @classmethod + def get_api_model_name(cls) -> None: + return None + + @override + @classmethod + def get_http_method(cls) -> HTTPMethod: + return HTTPMethod.GET + + @override + @classmethod + def get_api_endpoint_subpath(cls) -> AI_HORDE_API_ENDPOINT_SUBPATH: + return AI_HORDE_API_ENDPOINT_SUBPATH.v2_styles_image_by_id + + @override + @classmethod + def get_default_success_response_type(cls) -> type[StyleStable]: + return StyleStable + + +class SingleStyleImageByNameRequest( + BaseAIHordeRequest, +): + """Request to get a single image style by its name. + + Represents a GET request to the /v2/styles/image_by_name/{style_name} endpoint. + """ + + style_name: str + """The name of the style to retrieve.""" + + @override + @classmethod + def get_api_model_name(cls) -> None: + return None + + @override + @classmethod + def get_http_method(cls) -> HTTPMethod: + return HTTPMethod.GET + + @override + @classmethod + def get_api_endpoint_subpath(cls) -> AI_HORDE_API_ENDPOINT_SUBPATH: + return AI_HORDE_API_ENDPOINT_SUBPATH.v2_styles_image_by_name + + @override + @classmethod + def get_default_success_response_type(cls) -> type[StyleStable]: + return StyleStable + + +class ModifyStyleImageResponse( + HordeResponseBaseModel, + ContainsMessageResponseMixin, + ContainsWarningsResponseMixin, +): + """The response to modifying an image style, including any warnings. + + Represents the data returned from the following endpoints and http status codes: + - /v2/styles/image/{style_id} | ModifyStyleImageRequest [PATCH] -> 200 + - /v2/styles/image | CreateStyleImageRequest [POST] -> 200 + + v2 API Model: `StyleModify` + """ + + id_: str = Field(alias="id") + """The ID of the style.""" + + @override + @classmethod + def get_api_model_name(cls) -> str: + return "StyleModify" + + +class CreateStyleImageRequest( + _StyleMixin, + BaseAIHordeRequest, + APIKeyAllowedInRequestMixin, +): + """Represents a POST request to the /v2/styles/image endpoint. + + v2 API Model: `ModelStyleInputStable` + """ + + params: ModelStyleInputParamsStable + """The parameters to use for all generations using this style, if not set by the user.""" + + sharedkey: str | None = Field( + default=None, + examples=[ + "00000000-0000-0000-0000-000000000000", + ], + min_length=36, + max_length=36, + ) + """The UUID of a shared key which will be used to fulfil this style when active.""" + + @override + @classmethod + def get_api_model_name(cls) -> str: + return "ModelStyleInputStable" + + @override + @classmethod + def get_http_method(cls) -> HTTPMethod: + return HTTPMethod.POST + + @override + @classmethod + def get_api_endpoint_subpath(cls) -> AI_HORDE_API_ENDPOINT_SUBPATH: + return AI_HORDE_API_ENDPOINT_SUBPATH.v2_styles_image + + @override + @classmethod + def get_default_success_response_type(cls) -> type[ModifyStyleImageResponse]: + return ModifyStyleImageResponse + + @override + @classmethod + def is_api_key_required(cls) -> bool: + return True + + +class ModifyStyleImageRequest( + _StyleMixin, + BaseAIHordeRequest, + APIKeyAllowedInRequestMixin, +): + """Represents a PATCH request to the /v2/styles/image/{style_id} endpoint. + + v2 API Model: `ModelStylePatchStable` + """ + + style_id: str + """The ID of the style to modify.""" + + params: ModelStyleInputParamsStable + """The parameters to use for all generations using this style, if not set by the user.""" + + sharedkey: str | None = Field( + examples=["00000000-0000-0000-0000-000000000000"], + min_length=36, + max_length=36, + ) + """The UUID of a shared key which will be used to fulfil this style when active.""" + + @override + @classmethod + def get_api_model_name(cls) -> str: + return "ModelStylePatchStable" + + @override + @classmethod + def get_http_method(cls) -> HTTPMethod: + return HTTPMethod.PATCH + + @override + @classmethod + def get_api_endpoint_subpath(cls) -> AI_HORDE_API_ENDPOINT_SUBPATH: + return AI_HORDE_API_ENDPOINT_SUBPATH.v2_styles_image_by_id + + @override + @classmethod + def get_default_success_response_type(cls) -> type[ModifyStyleImageResponse]: + return ModifyStyleImageResponse + + @override + @classmethod + def is_api_key_required(cls) -> bool: + return True + + +class DeleteStyleImageResponse( + HordeResponseBaseModel, + ContainsMessageResponseMixin, +): + """Indicates that a style was successfully deleted. + + Represents the data returned from the /v2/styles/image/{style_id} endpoint with http status code 200. + + v2 API Model: `SimpleResponse` + """ + + @override + @classmethod + def get_api_model_name(cls) -> str: + return "SimpleResponse" + + +class DeleteStyleImageRequest( + BaseAIHordeRequest, + APIKeyAllowedInRequestMixin, +): + """Represents a DELETE request to the /v2/styles/image/{style_id} endpoint.""" + + style_id: str + """The ID of the style to delete.""" + + @override + @classmethod + def get_api_model_name(cls) -> None: + return None + + @override + @classmethod + def get_http_method(cls) -> HTTPMethod: + return HTTPMethod.DELETE + + @override + @classmethod + def get_api_endpoint_subpath(cls) -> AI_HORDE_API_ENDPOINT_SUBPATH: + return AI_HORDE_API_ENDPOINT_SUBPATH.v2_styles_image_by_id + + @override + @classmethod + def get_default_success_response_type(cls) -> type[DeleteStyleImageResponse]: + return DeleteStyleImageResponse + + +class StyleImageExampleModifyResponse( + HordeResponseBaseModel, + ContainsMessageResponseMixin, + ContainsWarningsResponseMixin, +): + """The response to modifying an image style example, including any warnings. + + Represents the data returned from the following endpoints and http status codes: + - /v2/styles/image/{style_id}/example/{example_id} | StyleImageExampleModifyRequest [PATCH] -> 200 + - /v2/styles/image/{style_id}/example | StyleImageExampleAddRequest [POST] -> 200 + + v2 API Model: `StyleModify` + """ + + id_: str = Field(alias="id") + """The ID of the example.""" + + @override + @classmethod + def get_api_model_name(cls) -> str: + return "StyleModify" + + +class StyleImageExampleAddRequest( + BaseAIHordeRequest, + APIKeyAllowedInRequestMixin, +): + """Represents a POST request to the /v2/styles/image/{style_id}/example endpoint. + + v2 API Model: `InputStyleExamplePost` + """ + + style_id: str + """The ID of the style to add the example to.""" + + url: str + """The URL of the image to add as an example.""" + + primary: bool = False + """When true this image is to be used as the primary example for this style.""" + + @override + @classmethod + def get_api_model_name(cls) -> str: + return "InputStyleExamplePost" + + @override + @classmethod + def get_http_method(cls) -> HTTPMethod: + return HTTPMethod.POST + + @override + @classmethod + def get_api_endpoint_subpath(cls) -> AI_HORDE_API_ENDPOINT_SUBPATH: + return AI_HORDE_API_ENDPOINT_SUBPATH.v2_styles_image_example_by_style_id + + @override + @classmethod + def get_default_success_response_type(cls) -> type[StyleImageExampleModifyResponse]: + return StyleImageExampleModifyResponse + + +class StyleImageExampleDeleteResponse(HordeResponseBaseModel, ContainsMessageResponseMixin): + """Indicates that an example was successfully deleted. + + Represents the data returned from the /v2/styles/image/{style_id}/example/{example_id} endpoint with http status + code 200. + + v2 API Model: `SimpleResponse` + """ + + @override + @classmethod + def get_api_model_name(cls) -> str: + return "SimpleResponse" + + +class StyleImageExampleDeleteRequest( + BaseAIHordeRequest, + APIKeyAllowedInRequestMixin, +): + """Represents a DELETE request to the /v2/styles/image/{style_id}/example/{example_id} endpoint.""" + + style_id: str + """The ID of the style to delete the example from.""" + + example_id: str + """The ID of the example to delete.""" + + @override + @classmethod + def get_api_model_name(cls) -> None: + return None + + @override + @classmethod + def get_http_method(cls) -> HTTPMethod: + return HTTPMethod.DELETE + + @override + @classmethod + def get_api_endpoint_subpath(cls) -> AI_HORDE_API_ENDPOINT_SUBPATH: + return AI_HORDE_API_ENDPOINT_SUBPATH.v2_styles_image_example_by_style_id_example_id + + @override + @classmethod + def get_default_success_response_type(cls) -> type[StyleImageExampleDeleteResponse]: + return StyleImageExampleDeleteResponse + + +class StyleImageExampleModifyRequest( + BaseAIHordeRequest, + APIKeyAllowedInRequestMixin, +): + """Represents a PATCH request to the /v2/styles/image/{style_id}/example/{example_id} endpoint. + + v2 API Model: `InputStyleExamplePost` + """ + + style_id: str + """The ID of the style to modify the example of.""" + + example_id: str + """The ID of the example to modify.""" + + url: str + """The URL of the image to add as an example.""" + + primary: bool = False + """When true this image is to be used as the primary example for this style.""" + + @override + @classmethod + def get_api_model_name(cls) -> str: + return "InputStyleExamplePost" + + @override + @classmethod + def get_http_method(cls) -> HTTPMethod: + return HTTPMethod.PATCH + + @override + @classmethod + def get_api_endpoint_subpath(cls) -> AI_HORDE_API_ENDPOINT_SUBPATH: + return AI_HORDE_API_ENDPOINT_SUBPATH.v2_styles_image_example_by_style_id_example_id + + @override + @classmethod + def get_default_success_response_type(cls) -> type[StyleImageExampleModifyResponse]: + return StyleImageExampleModifyResponse + + +class ModelStyleInputParamsKobold(HordeResponseBaseModel, _BasePayloadKoboldMixin): + """The parameters than can be set for a text generation style. + + v2 API Model: `ModelStyleInputParamsKobold` + """ + + @override + @classmethod + def get_api_model_name(cls) -> str: + return "ModelStyleInputParamsKobold" + + +class StyleKobold(HordeResponseBaseModel, _StyleResponseMixin): + """The details of a text style, including its parameters. + + Represents the data returned from the following endpoints and http status codes: + - /v2/styles/text_by_name/{style_name} | SingleStyleTextByNameRequest [GET] -> 200 + - /v2/styles/text/{style_id} | SingleStyleTextByIDRequest [GET] -> 200 + + v2 API Model: `StyleKobold` + """ + + params: ModelStyleInputParamsKobold | None = None + """The parameters to use for all generations using this style, if not set by the user.""" + + @override + @classmethod + def get_api_model_name(cls) -> str: + return "StyleKobold" + + +@Unhashable +@Unequatable +class AllStylesTextResponse(HordeResponseRootModel[list[StyleKobold]]): + """A list of text styles. + + Represents the data returned from the /v2/styles/text endpoint with http status code 200. + + v2 API Model: `_ANONYMOUS_MODEL` + """ + + root: list[StyleKobold] + """The underlying list of styles.""" + + @override + @classmethod + def get_api_model_name(cls) -> str: + return _ANONYMOUS_MODEL + + +class AllStylesTextRequest( + BaseAIHordeRequest, +): + """Request to get text styles. Use `page` to paginate through the results. + + Represents a GET request to the /v2/styles/text endpoint. + """ + + sort: Literal["popular", "age"] = "popular" + """The sort order of the styles.""" + + page: int = 1 + """The page of styles to retrieve. Each page has 25 styles.""" + + tag: str | None = None + """If specified, return only styles with this tag.""" + + model: str | None = None + """If specified, return only styles which use this model.""" + + @override + @classmethod + def get_api_model_name(cls) -> None: + return None + + @override + @classmethod + def get_http_method(cls) -> HTTPMethod: + return HTTPMethod.GET + + @override + @classmethod + def get_query_fields(cls) -> list[str]: + return ["sort", "page", "tag", "model"] + + @override + @classmethod + def get_api_endpoint_subpath(cls) -> AI_HORDE_API_ENDPOINT_SUBPATH: + return AI_HORDE_API_ENDPOINT_SUBPATH.v2_styles_text + + @override + @classmethod + def get_default_success_response_type(cls) -> type[AllStylesTextResponse]: + return AllStylesTextResponse + + +class SingleStyleTextByIDRequest( + BaseAIHordeRequest, +): + """Request to get a single text style by its ID. + + Represents a GET request to the /v2/styles/text/{style_id} endpoint. + """ + + style_id: str + """The ID of the style to retrieve.""" + + @override + @classmethod + def get_api_model_name(cls) -> None: + return None + + @override + @classmethod + def get_http_method(cls) -> HTTPMethod: + return HTTPMethod.GET + + @override + @classmethod + def get_api_endpoint_subpath(cls) -> AI_HORDE_API_ENDPOINT_SUBPATH: + return AI_HORDE_API_ENDPOINT_SUBPATH.v2_styles_text_by_id + + @override + @classmethod + def get_default_success_response_type(cls) -> type[StyleKobold]: + return StyleKobold + + +class SingleStyleTextByNameRequest( + BaseAIHordeRequest, +): + """Request to get a single text style by its name. + + Represents a GET request to the /v2/styles/text_by_name/{style_name} endpoint. + """ + + style_name: str + """The name of the style to retrieve.""" + + @override + @classmethod + def get_api_model_name(cls) -> None: + return None + + @override + @classmethod + def get_http_method(cls) -> HTTPMethod: + return HTTPMethod.GET + + @override + @classmethod + def get_api_endpoint_subpath(cls) -> AI_HORDE_API_ENDPOINT_SUBPATH: + return AI_HORDE_API_ENDPOINT_SUBPATH.v2_styles_text_by_name + + @override + @classmethod + def get_default_success_response_type(cls) -> type[StyleKobold]: + return StyleKobold + + +class ModifyStyleTextResponse( + HordeResponseBaseModel, + ContainsMessageResponseMixin, + ContainsWarningsResponseMixin, +): + """The response to modifying a text style, including any warnings. + + Represents the data returned from the following endpoints and http status codes: + - /v2/styles/text/{style_id} | ModifyStyleTextRequest [PATCH] -> 200 + - /v2/styles/text | CreateStyleTextRequest [POST] -> 200 + + v2 API Model: `StyleModify` + """ + + id_: str = Field(alias="id") + """The ID of the style.""" + + @override + @classmethod + def get_api_model_name(cls) -> str: + return "StyleModify" + + +class CreateStyleTextRequest( + BaseAIHordeRequest, + _StyleMixin, + APIKeyAllowedInRequestMixin, +): + """Request to create a new text style with the given parameters. + + Represents a POST request to the /v2/styles/text endpoint. + + v2 API Model: `ModelStyleInputKobold` + """ + + params: ModelStyleInputParamsKobold + """The parameters to use for all generations using this style, if not set by the user.""" + + @override + @classmethod + def get_api_model_name(cls) -> str: + return "ModelStyleInputKobold" + + @override + @classmethod + def get_http_method(cls) -> HTTPMethod: + return HTTPMethod.POST + + @override + @classmethod + def get_api_endpoint_subpath(cls) -> AI_HORDE_API_ENDPOINT_SUBPATH: + return AI_HORDE_API_ENDPOINT_SUBPATH.v2_styles_text + + @override + @classmethod + def get_default_success_response_type(cls) -> type[ModifyStyleTextResponse]: + return ModifyStyleTextResponse + + @override + @classmethod + def is_api_key_required(cls) -> bool: + return True + + +class ModifyStyleTextRequest( + BaseAIHordeRequest, + _StyleMixin, + APIKeyAllowedInRequestMixin, +): + """Represents a PATCH request to the /v2/styles/text/{style_id} endpoint. + + v2 API Model: `ModelStylePatchKobold` + """ + + style_id: str + """The ID of the style to modify.""" + + params: ModelStyleInputParamsKobold + """The parameters to use for all generations using this style, if not set by the user.""" + + @override + @classmethod + def get_api_model_name(cls) -> str: + return "ModelStylePatchKobold" + + @override + @classmethod + def get_http_method(cls) -> HTTPMethod: + return HTTPMethod.PATCH + + @override + @classmethod + def get_api_endpoint_subpath(cls) -> AI_HORDE_API_ENDPOINT_SUBPATH: + return AI_HORDE_API_ENDPOINT_SUBPATH.v2_styles_text_by_id + + @override + @classmethod + def get_default_success_response_type(cls) -> type[ModifyStyleTextResponse]: + return ModifyStyleTextResponse + + @override + @classmethod + def is_api_key_required(cls) -> bool: + return True + + +class DeleteStyleTextResponse( + HordeResponseBaseModel, + ContainsMessageResponseMixin, +): + """Indicates that a style was successfully deleted. + + Represents the data returned from the /v2/styles/text/{style_id} endpoint with http status code 200. + + v2 API Model: `SimpleResponse` + """ + + @override + @classmethod + def get_api_model_name(cls) -> str: + return "SimpleResponse" + + +class DeleteStyleTextRequest( + BaseAIHordeRequest, + APIKeyAllowedInRequestMixin, +): + """Request to delete a text style by its ID. + + Note that this is a privileged operation and requires the API key that created the style or + admin/moderator privileges. + + Represents a DELETE request to the /v2/styles/text/{style_id} endpoint. + """ + + style_id: str + """The ID of the style to delete.""" + + @override + @classmethod + def get_api_model_name(cls) -> None: + return None + + @override + @classmethod + def get_http_method(cls) -> HTTPMethod: + return HTTPMethod.DELETE + + @override + @classmethod + def get_api_endpoint_subpath(cls) -> AI_HORDE_API_ENDPOINT_SUBPATH: + return AI_HORDE_API_ENDPOINT_SUBPATH.v2_styles_text_by_id + + @override + @classmethod + def get_default_success_response_type(cls) -> type[DeleteStyleTextResponse]: + return DeleteStyleTextResponse + + @override + @classmethod + def is_api_key_required(cls) -> bool: + return True diff --git a/horde_sdk/ai_horde_api/apimodels/teams.py b/horde_sdk/ai_horde_api/apimodels/teams.py new file mode 100644 index 00000000..e230bb76 --- /dev/null +++ b/horde_sdk/ai_horde_api/apimodels/teams.py @@ -0,0 +1,322 @@ +from typing import override + +from pydantic import Field + +from horde_sdk.ai_horde_api.apimodels.base import ActiveModelLite, BaseAIHordeRequest +from horde_sdk.ai_horde_api.apimodels.workers.workers import TeamDetailsLite, WorkerDetailLite +from horde_sdk.ai_horde_api.endpoints import AI_HORDE_API_ENDPOINT_SUBPATH +from horde_sdk.consts import _ANONYMOUS_MODEL, HTTPMethod +from horde_sdk.generic_api.apimodels import ( + APIKeyAllowedInRequestMixin, + HordeAPIObjectBaseModel, + HordeResponseBaseModel, + HordeResponseRootModel, +) +from horde_sdk.generic_api.decoration import Unequatable, Unhashable + + +@Unhashable +@Unequatable +class TeamDetails(HordeResponseBaseModel, TeamDetailsLite): + """Details about a team, including the models and workers that are part of it. + + Represents the data returned from the /v2/teams/{team_id} endpoint with http status code 200. + + v2 API Model: `TeamDetails` + """ + + info: str | None = Field( + default=None, + examples=[ + "Anarchy is emergent order.", + ], + ) + """Extra information or comments about this team provided by its owner.""" + creator: str | None = Field( + default=None, + examples=["db0#1"], + ) + """The alias of the user which created this team.""" + kudos: float | None = Field( + default=None, + ) + """How many Kudos the workers in this team have been rewarded while part of this team.""" + models: list[ActiveModelLite] | None = None + """The models that this team has run.""" + requests_fulfilled: int | None = Field( + default=None, + ) + """How many images this team's workers have generated.""" + uptime: int | None = Field( + default=None, + ) + """The total amount of time workers have stayed online while on this team.""" + worker_count: int | None = Field( + default=None, + examples=[10], + ) + """How many workers have been dedicated to this team.""" + workers: list[WorkerDetailLite] | None = None + """The workers that have been dedicated to this team.""" + + @override + @classmethod + def get_api_model_name(cls) -> str: + return "TeamDetails" + + +@Unhashable +@Unequatable +class AllTeamDetailsResponse(HordeResponseRootModel[list[TeamDetails]]): + """Details about all teams, including the models and workers that are part of them. + + Represents the data returned from the /v2/teams endpoint with http status code 200. + + v2 API Model: `_ANONYMOUS_MODEL` + """ + + root: list[TeamDetails] + """The underlying list of teams.""" + + @override + @classmethod + def get_api_model_name(cls) -> str: + return _ANONYMOUS_MODEL + + +class AllTeamDetailsRequest( + BaseAIHordeRequest, +): + """Request to get details about all teams. + + Represents a GET request to the /v2/teams endpoint. + """ + + @override + @classmethod + def get_api_model_name(cls) -> None: + return None + + @override + @classmethod + def get_http_method(cls) -> HTTPMethod: + return HTTPMethod.GET + + @override + @classmethod + def get_api_endpoint_subpath(cls) -> AI_HORDE_API_ENDPOINT_SUBPATH: + return AI_HORDE_API_ENDPOINT_SUBPATH.v2_teams_all + + @override + @classmethod + def get_default_success_response_type(cls) -> type[AllTeamDetailsResponse]: + return AllTeamDetailsResponse + + +class SingleTeamDetailsRequest( + BaseAIHordeRequest, +): + """Request to get details about a single team by ID. + + Represents a GET request to the /v2/teams/{team_id} endpoint. + """ + + team_id: str + """The ID of the team to get details for.""" + + @override + @classmethod + def get_api_model_name(cls) -> None: + return None + + @override + @classmethod + def get_http_method(cls) -> HTTPMethod: + return HTTPMethod.GET + + @override + @classmethod + def get_api_endpoint_subpath(cls) -> AI_HORDE_API_ENDPOINT_SUBPATH: + return AI_HORDE_API_ENDPOINT_SUBPATH.v2_teams_single + + @override + @classmethod + def get_default_success_response_type(cls) -> type[TeamDetails]: + return TeamDetails + + +class ModifyTeam(HordeResponseBaseModel): + """Details about a team that has been modified. + + Represents the data returned from the following endpoints and http status codes: + - /v2/teams/{team_id} | ModifyTeamRequest [PATCH] -> 200 + - /v2/teams | CreateTeamRequest [POST] -> 200 + + v2 API Model: `ModifyTeam` + """ + + id_: str = Field(alias="id") + """The ID of the team.""" + name: str + """The name of the team.""" + info: str | None = Field( + default=None, + examples=[ + "Anarchy is emergent order.", + ], + ) + """Extra information or comments about this team provided by its owner.""" + + @override + @classmethod + def get_api_model_name(cls) -> str: + return "ModifyTeam" + + +class ModifyTeamInput(HordeAPIObjectBaseModel): + """Input data for modifying a team. + + v2 API Model: `ModifyTeamInput` + """ + + name: str + """The name of the team.""" + + info: str | None = Field( + default=None, + examples=[ + "Anarchy is emergent order.", + ], + ) + """Extra information or comments about this team provided by its owner.""" + + @override + @classmethod + def get_api_model_name(cls) -> str: + return "ModifyTeamInput" + + +class CreateTeamRequest( + BaseAIHordeRequest, + APIKeyAllowedInRequestMixin, + ModifyTeamInput, +): + """Request to create a new team. + + Represents a POST request to the /v2/teams endpoint. + + v2 API Model: `CreateTeamInput` + """ + + @override + @classmethod + def get_api_model_name(cls) -> str: + return "CreateTeamInput" + + @override + @classmethod + def get_http_method(cls) -> HTTPMethod: + return HTTPMethod.POST + + @override + @classmethod + def get_api_endpoint_subpath(cls) -> AI_HORDE_API_ENDPOINT_SUBPATH: + return AI_HORDE_API_ENDPOINT_SUBPATH.v2_teams_all + + @override + @classmethod + def get_default_success_response_type(cls) -> type[ModifyTeam]: + return ModifyTeam + + +class ModifyTeamRequest( + BaseAIHordeRequest, + APIKeyAllowedInRequestMixin, + ModifyTeamInput, +): + """Request to modify a team by ID. + + Represents a PATCH request to the /v2/teams/{team_id} endpoint. + + v2 API Model: `ModifyTeamInput` + """ + + team_id: str + """The ID of the team to modify.""" + + @override + @classmethod + def get_api_model_name(cls) -> str: + return "ModifyTeamInput" + + @override + @classmethod + def get_http_method(cls) -> HTTPMethod: + return HTTPMethod.PATCH + + @override + @classmethod + def get_api_endpoint_subpath(cls) -> AI_HORDE_API_ENDPOINT_SUBPATH: + return AI_HORDE_API_ENDPOINT_SUBPATH.v2_teams_single + + @override + @classmethod + def get_default_success_response_type(cls) -> type[ModifyTeam]: + return ModifyTeam + + @override + @classmethod + def is_api_key_required(cls) -> bool: + return True + + +class DeleteTeamResponse(HordeResponseBaseModel): + """The team id and name that was just deleted. + + Represents the data returned from the /v2/teams/{team_id} endpoint with http status code 200. + + v2 API Model: `DeletedTeam` + """ + + deleted_id: str + """The ID of the team that was deleted.""" + deleted_name: str + """The name of the team that was deleted.""" + + @override + @classmethod + def get_api_model_name(cls) -> str: + return "DeletedTeam" + + +class DeleteTeamRequest( + BaseAIHordeRequest, + APIKeyAllowedInRequestMixin, +): + """Request to delete a team by ID. + + Represents a DELETE request to the /v2/teams/{team_id} endpoint. + """ + + team_id: str + """The ID of the team to delete.""" + + @override + @classmethod + def get_api_model_name(cls) -> None: + return None + + @override + @classmethod + def get_http_method(cls) -> HTTPMethod: + return HTTPMethod.DELETE + + @override + @classmethod + def get_api_endpoint_subpath(cls) -> AI_HORDE_API_ENDPOINT_SUBPATH: + return AI_HORDE_API_ENDPOINT_SUBPATH.v2_teams_single + + @override + @classmethod + def get_default_success_response_type(cls) -> type[DeleteTeamResponse]: + return DeleteTeamResponse diff --git a/horde_sdk/ai_horde_api/apimodels/_users.py b/horde_sdk/ai_horde_api/apimodels/users.py similarity index 83% rename from horde_sdk/ai_horde_api/apimodels/_users.py rename to horde_sdk/ai_horde_api/apimodels/users.py index e785da86..cf2bf991 100644 --- a/horde_sdk/ai_horde_api/apimodels/_users.py +++ b/horde_sdk/ai_horde_api/apimodels/users.py @@ -1,19 +1,20 @@ from datetime import datetime +from typing import override from pydantic import Field -from typing_extensions import override -from horde_sdk.ai_horde_api.apimodels._styles import ResponseModelStylesUser from horde_sdk.ai_horde_api.apimodels.base import BaseAIHordeRequest +from horde_sdk.ai_horde_api.apimodels.styles import ResponseModelStylesUser from horde_sdk.ai_horde_api.endpoints import AI_HORDE_API_ENDPOINT_SUBPATH from horde_sdk.ai_horde_api.fields import UUID_Identifier from horde_sdk.consts import _ANONYMOUS_MODEL, HTTPMethod from horde_sdk.generic_api.apimodels import ( APIKeyAllowedInRequestMixin, + ContainsMessageResponseMixin, HordeAPIObjectBaseModel, HordeResponseBaseModel, HordeResponseRootModel, - RequestSpecifiesUserIDMixin, + MessageSpecifiesUserIDMixin, ) from horde_sdk.generic_api.decoration import Unequatable, Unhashable @@ -21,7 +22,7 @@ class ContributionsDetails(HordeAPIObjectBaseModel): """How many images and megapixelsteps this user has generated. - v2 API Model: ContributionsDetails + v2 API Model: `ContributionsDetails` """ fulfillments: int | None = Field( @@ -42,7 +43,7 @@ def get_api_model_name(cls) -> str | None: class UserKudosDetails(HordeAPIObjectBaseModel): """The details of the kudos this user has accumulated, used, sent and received. - v2 API Model: UserKudosDetails + v2 API Model: `UserKudosDetails` """ accumulated: float | None = Field(0) @@ -78,7 +79,7 @@ def get_api_model_name(cls) -> str | None: class MonthlyKudos(HordeAPIObjectBaseModel): """The details of the monthly kudos this user receives. - v2 API Model: MonthlyKudos + v2 API Model: `MonthlyKudos` """ amount: int | None = Field(default=None) @@ -96,7 +97,7 @@ def get_api_model_name(cls) -> str | None: class UserThingRecords(HordeAPIObjectBaseModel): """How many images, texts, megapixelsteps and tokens this user has generated or requested. - v2 API Model: UserThingRecords + v2 API Model: `UserThingRecords` """ megapixelsteps: float | None = Field(0) @@ -114,7 +115,7 @@ def get_api_model_name(cls) -> str | None: class UserAmountRecords(HordeAPIObjectBaseModel): """How many images, texts, megapixelsteps and tokens this user has generated or requested. - v2 API Model: UserAmountRecords + v2 API Model: `UserAmountRecords` """ image: int | None = Field(0) @@ -135,7 +136,7 @@ def get_api_model_name(cls) -> str | None: class UserRecords(HordeAPIObjectBaseModel): """How many images, texts, megapixelsteps, tokens and styles this user has generated, requested or has had used. - v2 API Model: UserRecords + v2 API Model: `UserRecords` """ contribution: UserThingRecords | None = None @@ -158,7 +159,7 @@ def get_api_model_name(cls) -> str | None: class UsageDetails(HordeAPIObjectBaseModel): """How many images and megapixelsteps this user has requested. - v2 API Model: UsageDetails + v2 API Model: `UsageDetails` """ megapixelsteps: float | None = Field(default=None) @@ -178,7 +179,7 @@ def get_api_model_name(cls) -> str | None: class ActiveGenerations(HordeAPIObjectBaseModel): """A list of generations that are currently active for this user. - v2 API Model: ActiveGenerations + v2 API Model: `ActiveGenerations` """ """A list of generations that are currently active for this user.""" @@ -203,7 +204,14 @@ def get_api_model_name(cls) -> str | None: class UserDetailsResponse(HordeResponseBaseModel): """The details of a user. - v2 API Model: UserDetails + Note that the response will contain only information the requesting user has permission to see. + The owner of the account, moderators and admins will see more information than other users. + + Represents the data returned from the following endpoints and http status codes: + - /v2/users/{user_id} | SingleUserDetailsRequest [GET] -> 200 + - /v2/find_user | FindUserRequest [GET] -> 200 + + v2 API Model: `UserDetails` """ @override @@ -307,6 +315,12 @@ def get_api_model_name(cls) -> str | None: ) """(Privileged) This user has been given the Special role.""" + deleted: bool | None = Field( + default=None, + examples=[False], + ) + """If True, this user has been deleted.""" + suspicious: int | None = Field( default=None, examples=[0], @@ -366,7 +380,9 @@ def get_api_model_name(cls) -> str | None: class ListUsersDetailsResponse(HordeResponseRootModel[list[UserDetailsResponse]]): """The response for a list of user details. - v2 API Model: _ANONYMOUS_MODEL + Represents the data returned from the /v2/users endpoint with http status code 200. + + v2 API Model: `_ANONYMOUS_MODEL` """ root: list[UserDetailsResponse] @@ -379,7 +395,10 @@ def get_api_model_name(cls) -> str: class ListUsersDetailsRequest(BaseAIHordeRequest): - """Represents a request to list all users.""" + """Represents a request to list all users. + + Represents a GET request to the /v2/users endpoint. + """ page: int """The page number to request. There are up to 25 users per page.""" @@ -413,7 +432,9 @@ def get_query_fields(cls) -> list[str]: return ["page", "sort"] -class SingleUserDetailsRequest(BaseAIHordeRequest, RequestSpecifiesUserIDMixin): +class SingleUserDetailsRequest(BaseAIHordeRequest, MessageSpecifiesUserIDMixin): + """Represents a GET request to the /v2/users/{user_id} endpoint.""" + @override @classmethod def get_api_model_name(cls) -> None: @@ -537,21 +558,35 @@ class _ModifyUserBase(HordeAPIObjectBaseModel): class ModifyUser(_ModifyUserBase): + """Underlying model for modifying a user.""" + kudos: float | None = Field(default=None) """The amount of kudos to modify (can be negative).""" reset_suspicion: bool | None = Field(default=None) """Set the user's suspicion back to 0.""" + undelete: bool | None = Field(default=None) + """When set to true, A user's who's marked to be deleted will become active again.""" + class ModifyUserReply(_ModifyUserBase): + """Base class for the response returned when modifying a user.""" + new_kudos: float | None = Field(default=None) """The new amount of kudos this user has.""" new_suspicion: int | None = Field(default=None) """The new amount of suspicion this user has.""" + undeleted: bool | None = Field(default=None) + """True if the user was undeleted with this operation.""" class ModifyUserResponse(HordeResponseBaseModel, ModifyUserReply): + """Represents the data returned from the /v2/users/{user_id} endpoint with http status code 200. + + v2 API Model: `ModifyUser` + """ + @override @classmethod def get_api_model_name(cls) -> str: @@ -560,10 +595,15 @@ def get_api_model_name(cls) -> str: class ModifyUserRequest( BaseAIHordeRequest, - RequestSpecifiesUserIDMixin, + MessageSpecifiesUserIDMixin, ModifyUser, APIKeyAllowedInRequestMixin, ): + """Represents a PUT request to the /v2/users/{user_id} endpoint. + + v2 API Model: `ModifyUserInput` + """ + @override @classmethod def get_api_model_name(cls) -> str: @@ -588,3 +628,58 @@ def get_default_success_response_type(cls) -> type[ModifyUserResponse]: @classmethod def is_api_key_required(cls) -> bool: return True + + +class DeleteUserResponse( + HordeResponseBaseModel, + ContainsMessageResponseMixin, +): + """Confirmation that a user was deleted. + + Represents the data returned from the /v2/users/{user_id} endpoint with http status code 200. + + v2 API Model: `SimpleResponse` + """ + + @override + @classmethod + def get_api_model_name(cls) -> str: + return "SimpleResponse" + + +class DeleteUserRequest( + BaseAIHordeRequest, + MessageSpecifiesUserIDMixin, + APIKeyAllowedInRequestMixin, +): + """Request to delete a user. + + Note that this is a privileged operation and requires the owning user, a moderator, or admin API key. + + Represents a DELETE request to the /v2/users/{user_id} endpoint. + """ + + @override + @classmethod + def get_api_model_name(cls) -> str | None: + return None + + @override + @classmethod + def get_http_method(cls) -> HTTPMethod: + return HTTPMethod.DELETE + + @override + @classmethod + def get_api_endpoint_subpath(cls) -> AI_HORDE_API_ENDPOINT_SUBPATH: + return AI_HORDE_API_ENDPOINT_SUBPATH.v2_users_single + + @override + @classmethod + def get_default_success_response_type(cls) -> type[DeleteUserResponse]: + return DeleteUserResponse + + @override + @classmethod + def is_api_key_required(cls) -> bool: + return True diff --git a/horde_sdk/ai_horde_api/apimodels/workers/messages.py b/horde_sdk/ai_horde_api/apimodels/workers/messages.py new file mode 100644 index 00000000..f3f2e190 --- /dev/null +++ b/horde_sdk/ai_horde_api/apimodels/workers/messages.py @@ -0,0 +1,246 @@ +from typing import Literal, override + +from pydantic import Field + +from horde_sdk.ai_horde_api.apimodels.base import BaseAIHordeRequest +from horde_sdk.ai_horde_api.endpoints import AI_HORDE_API_ENDPOINT_SUBPATH +from horde_sdk.consts import HTTPMethod +from horde_sdk.generic_api.apimodels import ( + APIKeyAllowedInRequestMixin, + ContainsMessageResponseMixin, + HordeAPIObjectBaseModel, + HordeResponseBaseModel, + HordeResponseRootModel, +) +from horde_sdk.generic_api.decoration import Unequatable, Unhashable + + +class _ResponseModelMessageData(HordeAPIObjectBaseModel): + """The data for a message, including the message, origin and expiry.""" + + worker_id: str | None = None + """The ID of the worker that the message is for.""" + message: str + """The message.""" + origin: str | None = None + """The origin of the message.""" + + +class ResponseModelMessage(HordeResponseBaseModel, _ResponseModelMessageData): + """A single message object. + + Represents the data returned from the following endpoints and http status codes: + - /v2/workers/messages/{message_id} | SingleWorkerMessageRequest [GET] -> 200 + - /v2/workers/messages | CreateWorkerMessageRequest [POST] -> 200 + + v2 API Model: `ResponseModelMessage` + """ + + expiry: int | None = None + """The number of hours after which this message expires.""" + + @override + @classmethod + def get_api_model_name(cls) -> str: + return "ResponseModelMessage" + + +@Unhashable +@Unequatable +class ResponseModelMessages(HordeResponseRootModel[list[ResponseModelMessage]]): + """A list of messages. + + Represents the data returned from the /v2/workers/messages endpoint with http status code 200. + + v2 API Model: `SimpleResponse` + """ + + root: list[ResponseModelMessage] + """The underlying list of messages.""" + + @override + @classmethod + def get_api_model_name(cls) -> str: + return "SimpleResponse" + + +class AllWorkerMessagesRequest( + BaseAIHordeRequest, + APIKeyAllowedInRequestMixin, +): + """Request paginated worker messages, optionally filtered by user/worker ID and validity. + + Represents a GET request to the /v2/workers/messages endpoint. + """ + + user_id: str | None = None + """The ID of the user to retrieve messages for. If not specified, all messages will be retrieved.""" + worker_id: str | None = None + """The ID of the worker to retrieve messages for. If not specified, all messages will be retrieved.""" + validity: Literal["active", "expired", "all"] = "active" + """The validity of the messages to retrieve.""" + page: int = 1 + """The page of messages to retrieve. Each page has 50 messages.""" + + @override + @classmethod + def get_api_model_name(cls) -> None: + return None + + @override + @classmethod + def get_http_method(cls) -> HTTPMethod: + return HTTPMethod.GET + + @override + @classmethod + def get_query_fields(cls) -> list[str]: + return ["validity", "page"] + + @override + @classmethod + def get_api_endpoint_subpath(cls) -> AI_HORDE_API_ENDPOINT_SUBPATH: + return AI_HORDE_API_ENDPOINT_SUBPATH.v2_workers_messages + + @override + @classmethod + def get_default_success_response_type(cls) -> type[ResponseModelMessages]: + return ResponseModelMessages + + @override + @classmethod + def is_api_key_required(cls) -> bool: + return True + + +class SingleWorkerMessageRequest( + BaseAIHordeRequest, +): + """Request a single worker message by ID. + + Represents a GET request to the /v2/workers/messages/{message_id} endpoint. + """ + + message_id: str = Field(alias="id") + """The ID of the message to retrieve.""" + + @override + @classmethod + def get_api_model_name(cls) -> None: + return None + + @override + @classmethod + def get_http_method(cls) -> HTTPMethod: + return HTTPMethod.GET + + @override + @classmethod + def get_api_endpoint_subpath(cls) -> AI_HORDE_API_ENDPOINT_SUBPATH: + return AI_HORDE_API_ENDPOINT_SUBPATH.v2_workers_messages_single + + @override + @classmethod + def get_default_success_response_type(cls) -> type[ResponseModelMessage]: + return ResponseModelMessage + + +class CreateWorkerMessageRequest( + BaseAIHordeRequest, + APIKeyAllowedInRequestMixin, + _ResponseModelMessageData, +): + """Request to create a new worker message. + + Note that you can only create messages for your own workers unless you are a moderator/admin. + + Represents a POST request to the /v2/workers/messages endpoint. + + v2 API Model: `ResponseModelMessage` + """ + + expiry: int + """The number of hours after which this message expires.""" + + @override + @classmethod + def get_api_model_name(cls) -> str: + return "ResponseModelMessage" + + @override + @classmethod + def get_http_method(cls) -> HTTPMethod: + return HTTPMethod.POST + + @override + @classmethod + def get_api_endpoint_subpath(cls) -> AI_HORDE_API_ENDPOINT_SUBPATH: + return AI_HORDE_API_ENDPOINT_SUBPATH.v2_workers_messages + + @override + @classmethod + def get_default_success_response_type(cls) -> type[ResponseModelMessage]: + return ResponseModelMessage + + @override + @classmethod + def is_api_key_required(cls) -> bool: + return True + + +class DeleteWorkerMessageResponse( + HordeResponseBaseModel, + ContainsMessageResponseMixin, +): + """Confirmation that a worker message was deleted. + + Represents the data returned from the /v2/workers/messages/{message_id} endpoint with http status code 200. + + v2 API Model: `SimpleResponse` + """ + + @override + @classmethod + def get_api_model_name(cls) -> str: + return "SimpleResponse" + + +class DeleteWorkerMessageRequest( + BaseAIHordeRequest, + APIKeyAllowedInRequestMixin, +): + """Request to delete a worker message by ID. + + Note that this is a privileged operation and requires the API key that created the message or + admin/moderator privileges. + + Represents a DELETE request to the /v2/workers/messages/{message_id} endpoint. + """ + + message_id: str = Field(alias="id") + """The ID of the message to delete.""" + + @override + @classmethod + def get_api_model_name(cls) -> None: + return None + + @override + @classmethod + def get_http_method(cls) -> HTTPMethod: + return HTTPMethod.DELETE + + @override + @classmethod + def get_api_endpoint_subpath(cls) -> AI_HORDE_API_ENDPOINT_SUBPATH: + return AI_HORDE_API_ENDPOINT_SUBPATH.v2_workers_messages_single + + @override + @classmethod + def get_default_success_response_type(cls) -> type[DeleteWorkerMessageResponse]: + return DeleteWorkerMessageResponse + + @override + @classmethod + def is_api_key_required(cls) -> bool: + return True diff --git a/horde_sdk/ai_horde_api/apimodels/workers/_workers.py b/horde_sdk/ai_horde_api/apimodels/workers/workers.py similarity index 83% rename from horde_sdk/ai_horde_api/apimodels/workers/_workers.py rename to horde_sdk/ai_horde_api/apimodels/workers/workers.py index 2fdee30f..e3ede847 100644 --- a/horde_sdk/ai_horde_api/apimodels/workers/_workers.py +++ b/horde_sdk/ai_horde_api/apimodels/workers/workers.py @@ -1,10 +1,11 @@ from collections.abc import Iterator +from typing import override from pydantic import AliasChoices, Field -from typing_extensions import override from horde_sdk.ai_horde_api.apimodels.base import BaseAIHordeRequest, WorkerRequestMixin, WorkerRequestNameMixin -from horde_sdk.ai_horde_api.consts import WORKER_TYPE +from horde_sdk.ai_horde_api.apimodels.workers.messages import ResponseModelMessage +from horde_sdk.ai_horde_api.consts import AI_HORDE_WORKER_TYPES from horde_sdk.ai_horde_api.endpoints import AI_HORDE_API_ENDPOINT_SUBPATH from horde_sdk.ai_horde_api.fields import TeamID, WorkerID from horde_sdk.consts import HTTPMethod @@ -18,6 +19,11 @@ class TeamDetailsLite(HordeAPIObjectBaseModel): + """The name and ID of a team. + + v2 API Model: `TeamDetailsLite` + """ + name: str | None = None """The Name given to this team.""" id_: str | TeamID | None = Field(default=None, alias="id") @@ -30,6 +36,11 @@ def get_api_model_name(cls) -> str | None: class WorkerKudosDetails(HordeAPIObjectBaseModel): + """The Kudos details of a worker. + + v2 API Model: `WorkerKudosDetails` + """ + generated: float | None = None """How much Kudos this worker has received for generating images.""" uptime: int | None = None @@ -41,9 +52,38 @@ def get_api_model_name(cls) -> str | None: return "WorkerKudosDetails" +class WorkerDetailLite(HordeAPIObjectBaseModel): + """The details of a worker, including its name and ID. + + v2 API Model: `WorkerDetailLite` + """ + + type_: AI_HORDE_WORKER_TYPES = Field(alias="type") + """The type of worker.""" + + name: str + """The Name given to this worker.""" + + id_: str | WorkerID = Field(alias="id") + """The UUID of this worker.""" + + online: bool | None = None + """True if the worker has checked-in the past 5 minutes.""" + + @override + @classmethod + def get_api_model_name(cls) -> str | None: + return "WorkerDetailLite" + + @Unhashable class WorkerDetailItem(HordeAPIObjectBaseModel): - type_: WORKER_TYPE = Field(alias="type") + """The details of a worker, including its performance, uptime, permissions, and other details. + + v2 API Model: `WorkerDetailItem` + """ + + type_: AI_HORDE_WORKER_TYPES = Field(alias="type") """The type of worker.""" name: str """The Name given to this worker.""" @@ -121,6 +161,9 @@ class WorkerDetailItem(HordeAPIObjectBaseModel): sdxl_controlnet: bool | None = Field(default=None, examples=[False]) """If True, this worker supports and allows sdxl controlnet requests.""" + messages: list[ResponseModelMessage] | None = None + """The messages that have been sent to this worker.""" + @override @classmethod def get_api_model_name(cls) -> str | None: @@ -170,6 +213,13 @@ def __eq__(self, other: object) -> bool: @Unhashable @Unequatable class AllWorkersDetailsResponse(HordeResponseRootModel[list[WorkerDetailItem]]): + """A list of worker details. + + Represents the data returned from the /v2/workers endpoint with http status code 200. + + v2 API Model: `WorkerDetails` + """ + # @tazlin: The typing of __iter__ in BaseModel seems to assume that RootModel wouldn't also be a parent class. # without a `type: ignore``, mypy feels that this is a bad override. This is probably a sub-optimal solution # on my part with me hoping to come up with a more elegant path in the future. @@ -197,9 +247,11 @@ class AllWorkersDetailsRequest(BaseAIHordeRequest, APIKeyAllowedInRequestMixin): """Returns information on all workers. If a moderator API key is specified, it will return additional information. + + Represents a GET request to the /v2/workers endpoint. """ - type_: WORKER_TYPE = Field(WORKER_TYPE.all, alias="type") + type_: AI_HORDE_WORKER_TYPES = Field(default=AI_HORDE_WORKER_TYPES.all, alias="type") """Filter workers by type. Default is 'all' which returns all workers.""" name: str | None = Field(default=None) """Returns a worker matching the exact name provided. Case insensitive.""" @@ -238,6 +290,15 @@ def is_api_key_required(cls) -> bool: @Unhashable @Unequatable class SingleWorkerDetailsResponse(HordeResponseBaseModel, WorkerDetailItem): + """The details of a single worker. + + Represents the data returned from the following endpoints and http status codes: + - /v2/workers/name/{worker_name} | SingleWorkerNameDetailsRequest [GET] -> 200 + - /v2/workers/{worker_id} | SingleWorkerDetailsRequest [GET] -> 200 + + v2 API Model: `WorkerDetails` + """ + @override @classmethod def get_api_model_name(cls) -> str | None: @@ -245,9 +306,9 @@ def get_api_model_name(cls) -> str | None: class SingleWorkerNameDetailsRequest(BaseAIHordeRequest, WorkerRequestNameMixin, APIKeyAllowedInRequestMixin): - """Returns information on a single worker. + """Returns information on a single worker by name. - If a moderator API key is specified, additional information is returned. + Represents a GET request to the /v2/workers/name/{worker_name} endpoint. """ @override @@ -277,9 +338,9 @@ def is_api_key_required(cls) -> bool: class SingleWorkerDetailsRequest(BaseAIHordeRequest, WorkerRequestMixin, APIKeyAllowedInRequestMixin): - """Returns information on a single worker. + """Returns information on a single worker by ID. - If a moderator API key is specified, additional information is returned. + Represents a GET request to the /v2/workers/{worker_id} endpoint. """ @override @@ -309,6 +370,13 @@ def is_api_key_required(cls) -> bool: class ModifyWorkerResponse(HordeResponseBaseModel): + """Information about a worker that has been created or modified. + + Represents the data returned from the /v2/workers/{worker_id} endpoint with http status code 200. + + v2 API Model: `ModifyWorker` + """ + info: str | None = Field(default=None) """The new state of the 'info' var for this worker.""" maintenance: bool | None = Field(default=None) @@ -332,6 +400,15 @@ class ModifyWorkerRequest( APIKeyAllowedInRequestMixin, WorkerRequestMixin, ): + """Request to modify a worker. + + Note that this is a privileged endpoint and requires a moderator or admin API key. + + Represents a PUT request to the /v2/workers/{worker_id} endpoint. + + v2 API Model: `ModifyWorkerInput` + """ + info: str | None = Field(default=None, max_length=1000) """You can optionally provide a server note which will be seen in the server details. No profanity allowed!""" maintenance: bool | None = Field(default=None) @@ -368,6 +445,13 @@ def get_default_success_response_type(cls) -> type[ModifyWorkerResponse]: class DeleteWorkerResponse(HordeResponseBaseModel): + """The id and name of a worker that has been deleted. + + Represents the data returned from the /v2/workers/{worker_id} endpoint with http status code 200. + + v2 API Model: `DeletedWorker` + """ + deleted_id: str | None = None """The ID of the deleted worker.""" deleted_name: str | None = None @@ -384,6 +468,13 @@ class DeleteWorkerRequest( APIKeyAllowedInRequestMixin, WorkerRequestMixin, ): + """Request to delete a worker. + + Note that this is a privileged endpoint and requires a moderator or admin API key. + + Represents a DELETE request to the /v2/workers/{worker_id} endpoint. + """ + @override @classmethod def get_api_model_name(cls) -> str | None: diff --git a/horde_sdk/ai_horde_api/consts.py b/horde_sdk/ai_horde_api/consts.py index dc2c8868..b412a968 100644 --- a/horde_sdk/ai_horde_api/consts.py +++ b/horde_sdk/ai_horde_api/consts.py @@ -10,6 +10,23 @@ This is the amount of time that passes before the server will delete the request. """ +DEFAULT_HIRES_DENOISE_STRENGTH = 0.65 + + +class AI_HORDE_WORKER_TYPES(StrEnum): + """The worker types that are known to the API.""" + + all = "" + """All worker types.""" + image = auto() + """Image generation worker.""" + text = auto() + """Text generation worker.""" + interrogation = auto() + """Alchemy/Interrogation worker.""" + alchemist = "interrogation" + """Alchemy/Interrogation worker.""" + class GENERATION_STATE(StrEnum): """The generation states that are known to the API. @@ -38,199 +55,6 @@ class GENERATION_STATE(StrEnum): done = auto() -class WORKER_TYPE(StrEnum): - """The worker types that are known to the API. - - (alchemy, image, text, etc...) - """ - - all = "" - """All worker types.""" - image = auto() - """Image generation worker.""" - text = auto() - """Text generation worker.""" - interrogation = auto() - """Alchemy/Interrogation worker.""" - alchemist = "interrogation" - """Alchemy/Interrogation worker.""" - - -class ALCHEMY_FORMS(StrEnum): - """Forms (type of services) for alchemist type workers. - - (nsfw, caption, interrogation, post_process, etc...) - """ - - nsfw = auto() - caption = auto() - """Captioning (i.e., BLIP).""" - interrogation = auto() - """Interrogation (i.e., CLIP).""" - post_process = auto() - """Upscaling, facefixing, etc.""" - - -class KNOWN_SAMPLERS(StrEnum): - """The samplers that are known to the API. - - (k_lms, k_heun, DDIM, etc) - """ - - k_lms = auto() - k_heun = auto() - k_euler = auto() - k_euler_a = auto() - k_dpm_2 = auto() - k_dpm_2_a = auto() - k_dpm_fast = auto() - k_dpm_adaptive = auto() - k_dpmpp_2s_a = auto() - k_dpmpp_2m = auto() - dpmsolver = auto() - k_dpmpp_sde = auto() - lcm = auto() - DDIM = "DDIM" - - -class KNOWN_CONTROLNETS(StrEnum): - """The controlnets that are known to the API.""" - - canny = auto() - hed = auto() - depth = auto() - normal = auto() - openpose = auto() - seg = auto() - scribble = auto() - fakescribbles = auto() - hough = auto() - - -class KNOWN_WORKFLOWS(StrEnum): - """The controlnets that are known to the API.""" - - qr_code = auto() - - -class KNOWN_SOURCE_PROCESSING(StrEnum): - """The source processing methods that are known to the API. - - (txt2img, img2img, inpainting, etc) - """ - - txt2img = auto() - img2img = auto() - inpainting = auto() - outpainting = auto() - remix = auto() - """Stable Cascade Remix""" - - -class KNOWN_UPSCALERS(StrEnum): - """The upscalers that are known to the API. - - (RealESRGAN_x4plus, RealESRGAN_x2plus, RealESRGAN_x4plus_anime_6B, etc) - """ - - RealESRGAN_x4plus = auto() - RealESRGAN_x2plus = auto() - RealESRGAN_x4plus_anime_6B = auto() - NMKD_Siax = auto() - four_4x_AnimeSharp = "4x_AnimeSharp" - """AKA 4x_AnimeSharp""" - - -class KNOWN_FACEFIXERS(StrEnum): - """The facefixers that are known to the API. - - (CodeFormers, etc) - """ - - GFPGAN = auto() - CodeFormers = auto() - - -class KNOWN_MISC_POST_PROCESSORS(StrEnum): - """The misc post processors that are known to the API. - - (strip_background, etc) - """ - - strip_background = auto() - - -_all_valid_post_processors_names_and_values = ( - list(KNOWN_UPSCALERS.__members__.keys()) - + list(KNOWN_UPSCALERS.__members__.values()) - + list(KNOWN_FACEFIXERS.__members__.keys()) - + list(KNOWN_FACEFIXERS.__members__.values()) - + list(KNOWN_MISC_POST_PROCESSORS.__members__.keys()) - + list(KNOWN_MISC_POST_PROCESSORS.__members__.values()) -) -"""Used to validate post processor names and values. \ - This is because some post processor names are not valid python variable names.""" - - -class POST_PROCESSOR_ORDER_TYPE(StrEnum): - """The post processor order types that are known to the API. - - (facefixers_first, upscalers_first, custom, etc) - """ - - facefixers_first = auto() - """The facefixers are processed first.""" - upscalers_first = auto() - """The upscalers are processed first.""" - custom = auto() - """User specified post processor order.""" - - -DEFAULT_POST_PROCESSOR_ORDER = POST_PROCESSOR_ORDER_TYPE.facefixers_first -"""The default post processor order.""" - - -class KNOWN_CLIP_BLIP_TYPES(StrEnum): - """The CLIP and BLIP models that are known to the API.""" - - caption = auto() - """The caption (BLIP) model.""" - interrogation = auto() - """The interrogation (CLIP) model.""" - nsfw = auto() - """The NSFW model.""" - - -class KNOWN_INTERROGATORS(StrEnum): - """The interrogators that are known to the API.""" - - vit_l_14 = "ViT-L/14" - - -class KNOWN_ALCHEMY_TYPES(StrEnum): - """The alchemy processes (types) that are known to the API. - - (caption, GFPGAN, strip_background, etc) - """ - - _NONE = "" # FIXME - - caption = KNOWN_CLIP_BLIP_TYPES.caption - interrogation = KNOWN_CLIP_BLIP_TYPES.interrogation - nsfw = KNOWN_CLIP_BLIP_TYPES.nsfw - - RealESRGAN_x4plus = KNOWN_UPSCALERS.RealESRGAN_x4plus - RealESRGAN_x2plus = KNOWN_UPSCALERS.RealESRGAN_x2plus - RealESRGAN_x4plus_anime_6B = KNOWN_UPSCALERS.RealESRGAN_x4plus_anime_6B - NMKD_Siax = KNOWN_UPSCALERS.NMKD_Siax - fourx_AnimeSharp = KNOWN_UPSCALERS.four_4x_AnimeSharp - - GFPGAN = KNOWN_FACEFIXERS.GFPGAN - CodeFormers = KNOWN_FACEFIXERS.GFPGAN - - strip_background = KNOWN_MISC_POST_PROCESSORS.strip_background - - class METADATA_TYPE(StrEnum): """The generation metadata types that are known to the API. @@ -286,15 +110,6 @@ class MODEL_STATE(StrEnum): """Custom models.""" -class MODEL_TYPE(StrEnum): - """The model types that are known to the API.""" - - text = auto() - """Text generation models.""" - image = auto() - """Image generation models.""" - - class WarningCode(StrEnum): """The warning codes that are known to the API.""" @@ -520,3 +335,30 @@ class PROGRESS_STATE(StrEnum): waiting = auto() finished = auto() timed_out = auto() + + +class MODEL_TYPE(StrEnum): + """The model types that are known to the API.""" + + text = auto() + """Text generation models.""" + image = auto() + """Image generation models.""" + + +class POST_PROCESSOR_ORDER_TYPE(StrEnum): + """The post processor order types that are known to the API. + + (facefixers_first, upscalers_first, custom, etc) + """ + + facefixers_first = auto() + """The facefixers are processed first.""" + upscalers_first = auto() + """The upscalers are processed first.""" + custom = auto() + """User specified post processor order.""" + + +DEFAULT_POST_PROCESSOR_ORDER = POST_PROCESSOR_ORDER_TYPE.facefixers_first +"""The default post processor order.""" diff --git a/horde_sdk/ai_horde_api/endpoints.py b/horde_sdk/ai_horde_api/endpoints.py index ab42a903..1dc79caa 100644 --- a/horde_sdk/ai_horde_api/endpoints.py +++ b/horde_sdk/ai_horde_api/endpoints.py @@ -73,7 +73,7 @@ class AI_HORDE_API_ENDPOINT_SUBPATH(GENERIC_API_ENDPOINT_SUBPATH): v2_status_performance = "/v2/status/performance" v2_teams_all = "/v2/teams" - v2_teams = "/v2/teams/{team_id}" + v2_teams_single = "/v2/teams/{team_id}" v2_find_user = "/v2/find_user" """Note that this is an API key lookup, not a user ID lookup.""" @@ -97,17 +97,23 @@ class AI_HORDE_API_ENDPOINT_SUBPATH(GENERIC_API_ENDPOINT_SUBPATH): v2_documents_sponsors = "/v2/documents/sponsors" v2_documents_terms = "/v2/documents/terms" - v2_styles_image_by_name = "/v2/styles/image_by_name/{style_name}" - v2_styles_image_by_id = "/v2/styles/image/{style_id}" + v2_collections = "/v2/collections" v2_collections_by_name = "/v2/collection_by_name/{collection_name}" - v2_styles_image_example_by_id = "/v2/styles/image/{style_id}/example" - v2_styles_text_by_id = "/v2/styles/text/{style_id}" - v2_styles_image = "/v2/styles/image" v2_collections_by_id = "/v2/collections/{collection_id}" - v2_styles_image_example_by_id_example = "/v2/styles/image/{style_id}/example/{example_id}" + + v2_styles_image = "/v2/styles/image" + v2_styles_image_by_id = "/v2/styles/image/{style_id}" + v2_styles_image_by_name = "/v2/styles/image_by_name/{style_name}" + + v2_styles_image_example_by_style_id = "/v2/styles/image/{style_id}/example" + v2_styles_image_example_by_style_id_example_id = "/v2/styles/image/{style_id}/example/{example_id}" + v2_styles_text = "/v2/styles/text" + v2_styles_text_by_id = "/v2/styles/text/{style_id}" v2_styles_text_by_name = "/v2/styles/text_by_name/{style_name}" - v2_collections = "/v2/collections" + + v2_workers_messages = "/v2/workers/messages" + v2_workers_messages_single = "/v2/workers/messages/{message_id}" def get_ai_horde_swagger_url() -> str: diff --git a/horde_sdk/ai_horde_api/exceptions.py b/horde_sdk/ai_horde_api/exceptions.py index 24be6c44..16e5b7cc 100644 --- a/horde_sdk/ai_horde_api/exceptions.py +++ b/horde_sdk/ai_horde_api/exceptions.py @@ -1,5 +1,3 @@ -from typing import Any - from loguru import logger from horde_sdk.ai_horde_api.consts import GENERATION_MAX_LIFE, RC @@ -10,7 +8,12 @@ class AIHordeRequestError(HordeException): """Exception for when the AI Horde API returns an error response.""" - def __init__(self, error_response: RequestErrorResponse) -> None: # noqa: D107 + def __init__(self, error_response: RequestErrorResponse) -> None: + """Initialize an AI Horde request error. + + Args: + error_response (RequestErrorResponse): The error response returned by the AI Horde API. + """ logger.error(f"The AI Horde API returned an error response. Response: {error_response.message}") super().__init__(error_response.message) try: @@ -21,15 +24,6 @@ def __init__(self, error_response: RequestErrorResponse) -> None: # noqa: D107 ) -class AIHordePayloadValidationError(HordeException): - """Exception for when the AI Horde API cannot parse a request payload.""" - - def __init__(self, errors: dict[str, Any], message: str) -> None: - """Exception for when the AI Horde API cannot parse a request payload.""" - logger.error(f"The AI Horde API returned an error response. Response: {message}. Errors: {errors}") - super().__init__(message) - - class AIHordeImageValidationError(AIHordeRequestError): """Exception for when the AI Horde API cannot parse a source image for img2img.""" @@ -37,7 +31,12 @@ class AIHordeImageValidationError(AIHordeRequestError): class AIHordeGenerationTimedOutError(HordeException): """Exception for when the time limit for a generation request is reached.""" - def __init__(self, error_response: RequestErrorResponse) -> None: # noqa: D107 + def __init__(self, error_response: RequestErrorResponse) -> None: + """Initialize an AI Horde generation timed out error. + + Args: + error_response (RequestErrorResponse): The error response returned by the AI Horde API. + """ logger.error( f"The AI Horde API returned an error response. Response: {error_response.message}. " "This is likely because the generation timed out. " diff --git a/horde_sdk/ai_horde_api/fields.py b/horde_sdk/ai_horde_api/fields.py index 7ecaf0fe..242c7408 100644 --- a/horde_sdk/ai_horde_api/fields.py +++ b/horde_sdk/ai_horde_api/fields.py @@ -5,10 +5,10 @@ """ import uuid -from typing import Any, ClassVar +from typing import Any, ClassVar, override +from loguru import logger from pydantic import ConfigDict, RootModel, field_validator, model_serializer -from typing_extensions import override class UUID_Identifier(RootModel[uuid.UUID]): @@ -31,6 +31,7 @@ def id_must_be_uuid(cls, v: str | uuid.UUID) -> str | uuid.UUID: return v if v == "": # FIXME? This is a workaround for the swagger doc having `""` + logger.trace("Empty UUID string being converted to 0") return uuid.UUID(int=0) try: @@ -48,6 +49,12 @@ def __str__(self) -> str: @override def __eq__(self, other: Any) -> bool: + if other is None or ( + not (isinstance(self.__class__, uuid.UUID) or isinstance(other, uuid.UUID)) + and self.__class__ != other.__class__ + ): + logger.debug(f"Comparing {self.root.__class__} with {other.__class__}") + if isinstance(other, UUID_Identifier): return self.root == other.root @@ -112,7 +119,7 @@ def __ge__(self, other: object) -> bool: return NotImplemented -class JobID(UUID_Identifier): +class GenerationID(UUID_Identifier): """Represents the ID of a generation job. Instances of this class can be compared with a `str` or a UUID object.""" @@ -126,3 +133,7 @@ class ImageID(UUID_Identifier): class TeamID(UUID_Identifier): """Represents the ID of a team. Instances of this class can be compared with a `str` or a UUID object.""" + + +class SharedKeyID(UUID_Identifier): + """Represents the ID of a shared key. Instances of this class can be compared with a `str` or a UUID object.""" diff --git a/horde_sdk/ai_horde_api/metadata.py b/horde_sdk/ai_horde_api/metadata.py index df4905ce..77a76ae2 100644 --- a/horde_sdk/ai_horde_api/metadata.py +++ b/horde_sdk/ai_horde_api/metadata.py @@ -1,6 +1,7 @@ """Request metadata specific to the AI-Horde API.""" from enum import auto +from uuid import UUID from horde_sdk.generic_api.metadata import GenericPathFields, GenericQueryFields @@ -26,10 +27,40 @@ class AIHordePathData(GenericPathFields): """The name of a model.""" ipaddr = auto() """An IP address.""" + message_id = auto() + """The ID of a message.""" + style_id = auto() + """The ID of a style.""" + style_name = auto() + """The name of a style.""" + collection_id = auto() + """The ID of a collection.""" + example_id = auto() + """The ID of an example.""" + + +_dummy_uuid = str(UUID(int=0)) + +_default_path_values: dict[GenericPathFields, str] = { + AIHordePathData.id_: _dummy_uuid, + AIHordePathData.user_id: "1", + AIHordePathData.filter_id: _dummy_uuid, + AIHordePathData.team_id: _dummy_uuid, + AIHordePathData.worker_id: _dummy_uuid, + AIHordePathData.sharedkey_id: _dummy_uuid, + AIHordePathData.message_id: _dummy_uuid, + AIHordePathData.style_id: _dummy_uuid, + AIHordePathData.collection_id: _dummy_uuid, + AIHordePathData.example_id: _dummy_uuid, + AIHordePathData.ipaddr: "8.8.8.8", + AIHordePathData.model_name: "dummy model name", + AIHordePathData.worker_name: "dummy worker name", + AIHordePathData.style_name: "dummy style name", +} class AIHordeQueryData(GenericQueryFields): """AI Horde specific query data. See parent class for more information.""" - model_state = auto() + api_model_state = "model_state" """The level of official support by the API.""" diff --git a/horde_sdk/ai_horde_api/model_records.py b/horde_sdk/ai_horde_api/model_records.py new file mode 100644 index 00000000..bd60d21a --- /dev/null +++ b/horde_sdk/ai_horde_api/model_records.py @@ -0,0 +1,16 @@ +from typing import override + +from horde_model_reference.model_reference_records import GenericModelRecord + +from horde_sdk.generation_parameters.generic.object_models import ModelRecordResolver + + +class AIHordeModelRecordResolver(ModelRecordResolver): + """Resolver for AI Horde model records.""" + + @override + def resolve_model_by_name( + self, + model_name: str, + ) -> GenericModelRecord | None: + pass diff --git a/horde_sdk/ai_horde_worker/__init__.py b/horde_sdk/ai_horde_worker/__init__.py deleted file mode 100644 index ac53ec7c..00000000 --- a/horde_sdk/ai_horde_worker/__init__.py +++ /dev/null @@ -1 +0,0 @@ -"""Helper methods for creating a worker for the AI Horde.""" diff --git a/horde_sdk/backend_parsing/__init__.py b/horde_sdk/backend_parsing/__init__.py new file mode 100644 index 00000000..11619e47 --- /dev/null +++ b/horde_sdk/backend_parsing/__init__.py @@ -0,0 +1 @@ +"""Model backend parsing object models and helpers.""" diff --git a/horde_sdk/backend_parsing/alchemy/__init__.py b/horde_sdk/backend_parsing/alchemy/__init__.py new file mode 100644 index 00000000..ee4c8353 --- /dev/null +++ b/horde_sdk/backend_parsing/alchemy/__init__.py @@ -0,0 +1 @@ +"""Alchemy (post-processing) backend parsing object models and helpers.""" diff --git a/horde_sdk/backend_parsing/consts.py b/horde_sdk/backend_parsing/consts.py new file mode 100644 index 00000000..e69de29b diff --git a/horde_sdk/backend_parsing/image/__init__.py b/horde_sdk/backend_parsing/image/__init__.py new file mode 100644 index 00000000..7eaca0df --- /dev/null +++ b/horde_sdk/backend_parsing/image/__init__.py @@ -0,0 +1 @@ +"""Image model backend parsing object models and helpers.""" diff --git a/horde_sdk/backend_parsing/image/comfyui/__init__.py b/horde_sdk/backend_parsing/image/comfyui/__init__.py new file mode 100644 index 00000000..a6b8b095 --- /dev/null +++ b/horde_sdk/backend_parsing/image/comfyui/__init__.py @@ -0,0 +1 @@ +"""ComfyUI image backend parsing object models and helpers.""" diff --git a/horde_sdk/backend_parsing/image/comfyui/hordelib.py b/horde_sdk/backend_parsing/image/comfyui/hordelib.py new file mode 100644 index 00000000..90169442 --- /dev/null +++ b/horde_sdk/backend_parsing/image/comfyui/hordelib.py @@ -0,0 +1,95 @@ +from enum import auto +from typing import ClassVar + +from strenum import StrEnum + +from horde_sdk.backend_parsing.object_models import ImageBackendValuesMapper +from horde_sdk.generation_parameters.image.consts import ( + KNOWN_IMAGE_CONTROLNETS, + KNOWN_IMAGE_SAMPLERS, + KNOWN_IMAGE_SCHEDULERS, +) + + +class KNOWN_COMFYUI_IMAGE_SAMPLERS(StrEnum): + """The samplers that are known to the API. + + (k_lms, k_heun, DDIM, etc) + """ + + euler = auto() + euler_cfg_pp = auto() + euler_ancestral = auto() + euler_ancestral_cfg_pp = auto() + heun = auto() + heunpp2 = auto() + dpm_2 = auto() + dpm_2_ancestral = auto() + lms = auto() + dpm_fast = auto() + dpm_adaptive = auto() + dpmpp_2s_ancestral = auto() + dpmpp_2s_ancestral_cfg_pp = auto() + dpmpp_sde = auto() + dpmpp_sde_gpu = auto() + dpmpp_2m = auto() + dpmpp_2m_cfg_pp = auto() + dpmpp_2m_sde = auto() + dpmpp_2m_sde_gpu = auto() + dpmpp_3m_sde = auto() + dpmpp_3m_sde_gpu = auto() + ddpm = auto() + lcm = auto() + ipndm = auto() + ipndm_v = auto() + deis = auto() + res_multistep = auto() + res_multistep_cfg_pp = auto() + res_multistep_ancestral = auto() + res_multistep_ancestral_cfg_pp = auto() + gradient_estimation = auto() + + ddim = auto() + uni_pc = auto() + uni_pc_bh2 = auto() + + +KNOWN_COMFYUI_IMAGE_SCHEDULERS = KNOWN_IMAGE_SCHEDULERS +KNOWN_COMFYUI_CONTROLNETS = KNOWN_IMAGE_CONTROLNETS + + +class ComfyUIBackendValuesMapper( + ImageBackendValuesMapper[ + KNOWN_COMFYUI_IMAGE_SAMPLERS, + KNOWN_COMFYUI_IMAGE_SCHEDULERS, + KNOWN_COMFYUI_CONTROLNETS, + ], +): + """Mapper for ComfyUI backend values.""" + + _COMFYUI_SAMPLERS_CONVERT_MAP: ClassVar[dict[KNOWN_COMFYUI_IMAGE_SAMPLERS | str, KNOWN_IMAGE_SAMPLERS]] = { + KNOWN_COMFYUI_IMAGE_SAMPLERS.euler: KNOWN_IMAGE_SAMPLERS.k_euler, + KNOWN_COMFYUI_IMAGE_SAMPLERS.euler_ancestral: KNOWN_IMAGE_SAMPLERS.k_euler_a, + KNOWN_COMFYUI_IMAGE_SAMPLERS.heun: KNOWN_IMAGE_SAMPLERS.k_heun, + KNOWN_COMFYUI_IMAGE_SAMPLERS.dpm_2: KNOWN_IMAGE_SAMPLERS.k_dpm_2, + KNOWN_COMFYUI_IMAGE_SAMPLERS.dpm_2_ancestral: KNOWN_IMAGE_SAMPLERS.k_dpm_2_a, + KNOWN_COMFYUI_IMAGE_SAMPLERS.lms: KNOWN_IMAGE_SAMPLERS.k_lms, + KNOWN_COMFYUI_IMAGE_SAMPLERS.dpm_fast: KNOWN_IMAGE_SAMPLERS.k_dpm_fast, + KNOWN_COMFYUI_IMAGE_SAMPLERS.dpm_adaptive: KNOWN_IMAGE_SAMPLERS.k_dpm_adaptive, + KNOWN_COMFYUI_IMAGE_SAMPLERS.dpmpp_2s_ancestral: KNOWN_IMAGE_SAMPLERS.k_dpmpp_2s_a, + KNOWN_COMFYUI_IMAGE_SAMPLERS.dpmpp_sde: KNOWN_IMAGE_SAMPLERS.k_dpmpp_sde, + KNOWN_COMFYUI_IMAGE_SAMPLERS.dpmpp_2m: KNOWN_IMAGE_SAMPLERS.k_dpmpp_2m, + KNOWN_COMFYUI_IMAGE_SAMPLERS.ddim: KNOWN_IMAGE_SAMPLERS.DDIM, + KNOWN_COMFYUI_IMAGE_SAMPLERS.lcm: KNOWN_IMAGE_SAMPLERS.lcm, + } + + def __init__(self) -> None: + """Initialize the ComfyUI backend values mapper.""" + super().__init__( + backend_samplers_type=KNOWN_COMFYUI_IMAGE_SAMPLERS, + backend_schedulers_type=KNOWN_COMFYUI_IMAGE_SCHEDULERS, + backend_controlnets_type=KNOWN_COMFYUI_CONTROLNETS, + sdk_samplers_map=self._COMFYUI_SAMPLERS_CONVERT_MAP, + sdk_schedulers_map={}, + sdk_controlnets_map={}, + ) diff --git a/horde_sdk/backend_parsing/image/consts.py b/horde_sdk/backend_parsing/image/consts.py new file mode 100644 index 00000000..e69de29b diff --git a/horde_sdk/backend_parsing/multimodal/__init__.py b/horde_sdk/backend_parsing/multimodal/__init__.py new file mode 100644 index 00000000..d1f54dd8 --- /dev/null +++ b/horde_sdk/backend_parsing/multimodal/__init__.py @@ -0,0 +1 @@ +"""Multimodal model backend parsing object models and helpers.""" diff --git a/horde_sdk/backend_parsing/object_models.py b/horde_sdk/backend_parsing/object_models.py new file mode 100644 index 00000000..b4b6aa34 --- /dev/null +++ b/horde_sdk/backend_parsing/object_models.py @@ -0,0 +1,241 @@ +from abc import ABC + +from strenum import StrEnum +from typing_extensions import TypeVar + +from horde_sdk.consts import KNOWN_INFERENCE_BACKEND, WORKER_TYPE +from horde_sdk.generation_parameters.generic import CompositeParametersBase +from horde_sdk.generation_parameters.image.consts import ( + KNOWN_IMAGE_CONTROLNETS, + KNOWN_IMAGE_SAMPLERS, + KNOWN_IMAGE_SCHEDULERS, +) +from horde_sdk.generation_parameters.image.object_models import ImageGenerationParametersTemplate + +SDKParameterSetTypeVar = TypeVar("SDKParameterSetTypeVar", bound=CompositeParametersBase) + +BackendSamplersTypeVar = TypeVar("BackendSamplersTypeVar", bound=StrEnum) +BackendSchedulersTypeVar = TypeVar("BackendSchedulersTypeVar", bound=StrEnum) +BackendControlnetsTypeVar = TypeVar("BackendControlnetsTypeVar", bound=StrEnum) + + +MappingOutputTypeVar = TypeVar("MappingOutputTypeVar", bound=StrEnum) + + +class BackendValuesMapper[SDKParameterSetTypeVar: CompositeParametersBase](ABC): + """Base class for all backend values mappers. + + Value mappers provide a way to convert between the backend representations and the SDK representations of + certain values. For example, a backend may use a different name for a sampler than the SDK, even though they are + referring to the same thing. + """ + + _worker_type: WORKER_TYPE + _inference_backend: KNOWN_INFERENCE_BACKEND + + def _map_value( + self, + value: str, + mapping: dict[str, MappingOutputTypeVar], + known_input_type: type[StrEnum], + known_target_type: type[MappingOutputTypeVar], + ) -> MappingOutputTypeVar: + if len(mapping) == 0: + return known_target_type(value) + + if isinstance(value, known_input_type): + if value in mapping: + return mapping[value] + + return known_target_type[value.name] + + if isinstance(value, str): + if value in mapping: + return mapping[value] + if value in known_target_type.__members__: + return known_target_type[value] + if value in known_target_type.__members__.values(): + return known_target_type(value) + + raise TypeError( + f"Invalid type for value: {type(value)}, {value}", + ) + + def _is_valid_value( + self, + value: str, + known_type: type[StrEnum], + ) -> bool: + """Check if a value is valid for a given known type.""" + if isinstance(value, known_type): + return True + + if isinstance(value, str): + if value in known_type.__members__: + return True + if value in known_type.__members__.values(): + return True + + return False + + +class ImageBackendValuesMapper[ + BackendSamplersTypeVar: StrEnum, + BackendSchedulersTypeVar: StrEnum, + BackendControlnetsTypeVar: StrEnum, +]( + BackendValuesMapper[ImageGenerationParametersTemplate], +): + """Base class for all image backend values mappers. + + Image backends often refer to samplers and schedulers in different ways in addition to the SDK having its own + representation of these values. + + See :class:`BackendValuesMapper` for more information. + """ + + _backend_samplers_type: type[BackendSamplersTypeVar] + _backend_schedulers_type: type[BackendSchedulersTypeVar] + _backend_controlnets_type: type[BackendControlnetsTypeVar] + + _to_sdk_sampler_map: dict[BackendSamplersTypeVar | str, KNOWN_IMAGE_SAMPLERS] + _to_sdk_scheduler_map: dict[BackendSchedulersTypeVar | str, KNOWN_IMAGE_SCHEDULERS] + _to_sdk_controlnet_map: dict[BackendControlnetsTypeVar | str, KNOWN_IMAGE_CONTROLNETS] + + _to_backend_sampler_map: dict[KNOWN_IMAGE_SAMPLERS | str, BackendSamplersTypeVar] + _to_backend_scheduler_map: dict[KNOWN_IMAGE_SCHEDULERS | str, BackendSchedulersTypeVar] + _to_backend_controlnet_map: dict[KNOWN_IMAGE_CONTROLNETS | str, BackendControlnetsTypeVar] + + def __init__( + self, + *, + backend_samplers_type: type[BackendSamplersTypeVar], + backend_schedulers_type: type[BackendSchedulersTypeVar], + backend_controlnets_type: type[BackendControlnetsTypeVar], + sdk_samplers_map: dict[BackendSamplersTypeVar | str, KNOWN_IMAGE_SAMPLERS], + sdk_schedulers_map: dict[BackendSchedulersTypeVar | str, KNOWN_IMAGE_SCHEDULERS], + sdk_controlnets_map: dict[BackendControlnetsTypeVar | str, KNOWN_IMAGE_CONTROLNETS], + ) -> None: + """Initialize the image backend values mapper. + + Args: + backend_samplers_type: The backend samplers type. + backend_schedulers_type: The backend schedulers type. + backend_controlnets_type: The backend controlnets type. + sdk_samplers_map: The SDK samplers mapping. + sdk_schedulers_map: The SDK schedulers mapping. + sdk_controlnets_map: The SDK controlnets mapping. + """ + self._backend_samplers_type = backend_samplers_type + self._backend_schedulers_type = backend_schedulers_type + self._backend_controlnets = backend_controlnets_type + + self._to_sdk_sampler_map = sdk_samplers_map + self._to_sdk_scheduler_map = sdk_schedulers_map + self._to_sdk_controlnet_map = sdk_controlnets_map + + self._to_backend_sampler_map = {v: backend_samplers_type(k) for k, v in sdk_samplers_map.items()} + self._to_backend_scheduler_map = {v: backend_schedulers_type(k) for k, v in sdk_schedulers_map.items()} + self._to_backend_controlnet_map = {v: backend_controlnets_type(k) for k, v in sdk_controlnets_map.items()} + + def map_to_sdk_sampler( + self, + backend_sampler: BackendSamplersTypeVar | str, + ) -> KNOWN_IMAGE_SAMPLERS: + """Map a backend sampler to a SDK sampler.""" + return self._map_value( + value=backend_sampler, + mapping=self._to_sdk_sampler_map, + known_input_type=self._backend_samplers_type, + known_target_type=KNOWN_IMAGE_SAMPLERS, + ) + + def map_to_backend_sampler( + self, + sdk_sampler: KNOWN_IMAGE_SAMPLERS | str, + ) -> BackendSamplersTypeVar | str: + """Map a SDK sampler to a backend sampler.""" + return self._map_value( + value=sdk_sampler, + mapping=self._to_backend_sampler_map, + known_input_type=KNOWN_IMAGE_SAMPLERS, + known_target_type=self._backend_samplers_type, + ) + + def is_valid_backend_sampler( + self, + backend_sampler: BackendSamplersTypeVar | str, + ) -> bool: + """Check if a backend sampler is valid.""" + return self._is_valid_value( + value=backend_sampler, + known_type=self._backend_samplers_type, + ) + + def map_to_sdk_scheduler( + self, + backend_scheduler: BackendSchedulersTypeVar | str, + ) -> KNOWN_IMAGE_SCHEDULERS: + """Map a backend scheduler to a SDK scheduler.""" + return self._map_value( + value=backend_scheduler, + mapping=self._to_sdk_scheduler_map, + known_input_type=self._backend_schedulers_type, + known_target_type=KNOWN_IMAGE_SCHEDULERS, + ) + + def map_to_backend_scheduler( + self, + sdk_scheduler: KNOWN_IMAGE_SCHEDULERS | str, + ) -> BackendSchedulersTypeVar | str: + """Map a SDK scheduler to a backend scheduler.""" + return self._map_value( + value=sdk_scheduler, + mapping=self._to_backend_scheduler_map, + known_input_type=KNOWN_IMAGE_SCHEDULERS, + known_target_type=self._backend_schedulers_type, + ) + + def is_valid_backend_scheduler( + self, + backend_scheduler: BackendSchedulersTypeVar | str, + ) -> bool: + """Check if a backend scheduler is valid.""" + return self._is_valid_value( + value=backend_scheduler, + known_type=self._backend_schedulers_type, + ) + + def map_to_sdk_controlnet( + self, + backend_controlnet: BackendControlnetsTypeVar | str, + ) -> KNOWN_IMAGE_CONTROLNETS: + """Map a backend controlnet to a SDK controlnet.""" + return self._map_value( + value=backend_controlnet, + mapping=self._to_sdk_controlnet_map, + known_input_type=self._backend_controlnets, + known_target_type=KNOWN_IMAGE_CONTROLNETS, + ) + + def map_to_backend_controlnet( + self, + sdk_controlnet: KNOWN_IMAGE_CONTROLNETS | str, + ) -> BackendControlnetsTypeVar | str: + """Map a SDK controlnet to a backend controlnet.""" + return self._map_value( + value=sdk_controlnet, + mapping=self._to_backend_controlnet_map, + known_input_type=KNOWN_IMAGE_CONTROLNETS, + known_target_type=self._backend_controlnets, + ) + + def is_valid_backend_controlnet( + self, + backend_controlnet: BackendControlnetsTypeVar | str, + ) -> bool: + """Check if a backend controlnet is valid.""" + return self._is_valid_value( + value=backend_controlnet, + known_type=self._backend_controlnets, + ) diff --git a/horde_sdk/backend_parsing/text/__init__.py b/horde_sdk/backend_parsing/text/__init__.py new file mode 100644 index 00000000..9e47b7f6 --- /dev/null +++ b/horde_sdk/backend_parsing/text/__init__.py @@ -0,0 +1 @@ +"""Text backend parsing object models and helpers.""" diff --git a/horde_sdk/backend_parsing/video/__init__.py b/horde_sdk/backend_parsing/video/__init__.py new file mode 100644 index 00000000..e3685bb9 --- /dev/null +++ b/horde_sdk/backend_parsing/video/__init__.py @@ -0,0 +1 @@ +"""Video backend parsing object models and helpers.""" diff --git a/horde_sdk/consts.py b/horde_sdk/consts.py index 2b98de25..c7b4816e 100644 --- a/horde_sdk/consts.py +++ b/horde_sdk/consts.py @@ -1,12 +1,47 @@ """Constants used by the SDK.""" -from enum import IntEnum +import os +from enum import IntEnum, auto +from uuid import UUID +from pydantic import BaseModel, ConfigDict, Field from strenum import StrEnum _ANONYMOUS_MODEL = "_ANONYMOUS_MODEL" """This model is on the API but does not have a name.""" +_OVERLOADED_MODEL = "_MODEL_OVERLOADED" +"""The model is used incorrectly on the API.""" + +ID_TYPES = str | UUID +"""The types that can be used as IDs.""" + +horde_sdk_github_url = "https://github.com/Haidra-Org/horde_sdk" + + +def get_default_frozen_model_config_dict() -> ConfigDict: + """Return the default horde-sdk frozen model config dict for a pydantic `BaseModel`. + + Critically, models configured this way will behave differently when used in tests, preventing + the use of extra fields being passed to constructors. However, this is not the case in production, + where pass-through is allowed and up to implementors to choose to handle. + """ + return ( + ConfigDict( + frozen=True, + use_attribute_docstrings=True, + from_attributes=True, + extra="allow", + ) + if not os.getenv("TESTS_ONGOING") + else ConfigDict( + frozen=True, + use_attribute_docstrings=True, + from_attributes=True, + extra="forbid", + ) + ) + class HTTPMethod(StrEnum): """An enum representing all HTTP methods.""" @@ -73,3 +108,194 @@ def is_error_status_code(status_code: HTTPStatusCode | int) -> bool: if isinstance(status_code, HTTPStatusCode): status_code = status_code.value return 400 <= status_code < 600 + + +class KNOWN_DISPATCH_SOURCE(StrEnum): + """The known sources of a dispatch.""" + + UNKNOWN = auto() + """The source of the dispatch is unknown.""" + + LOCAL_CUSTOM_3RD_PARTY = auto() + """The source of the dispatch is a local custom 3rd party API.""" + + AI_HORDE_API_OFFICIAL = auto() + """The source of the dispatch is the official AI Horde API.""" + + AI_HORDE_API_FORK = auto() + """The source of the dispatch is a fork of the official AI Horde API.""" + + +class KNOWN_NSFW_DETECTOR(StrEnum): + """The NSFW detectors that are known to the API.""" + + BACKEND_DEFAULT = auto() + """The default model for the worker backend.""" + + HORDE_SAFETY = auto() + """The AI-Horde horde_safety package.""" + + COMPVIS_SAFETY_CHECKER = auto() + """The compvis safety checker model released with stable diffusion.""" + + +class WORKER_TYPE(StrEnum): + """The worker types that are known. + + (alchemy, image, text, etc...) + """ + + image = auto() + """Image generation worker.""" + text = auto() + """Text generation worker.""" + interrogation = auto() + """Alchemy/Interrogation worker.""" + alchemist = "interrogation" + """Alchemy/Interrogation worker.""" + video = auto() + """Video generation worker.""" + audio = auto() + """Audio generation worker.""" + + +class KNOWN_INFERENCE_BACKEND(StrEnum): + """The known generative inference backends.""" + + UNKNOWN = auto() + """The inference backend is unknown.""" + + IN_MODEL_NAME = auto() + """The model name is prepended with the backend name.""" + + CUSTOM_UNPUBLISHED = auto() + """The inference backend is a custom, unpublished backend.""" + + COMFYUI = auto() + """The inference backend is ComfyUI.""" + + A1111 = auto() + """The inference backend is A1111.""" + + HORDE_ALCHEMIST = auto() + """The inference backend is the Horde Alchemist.""" + + KOBOLD_CPP = auto() + """The inference backend is Kobold CPP.""" + + APHRODITE = auto() + """The inference backend is Aphrodite.""" + + +class KNOWN_ALCHEMY_BACKEND(StrEnum): + """The known alchemy backends.""" + + UNKNOWN = auto() + """The alchemy backend is unknown.""" + + CUSTOM_UNPUBLISHED = auto() + """The alchemy backend is a custom, unpublished backend.""" + + HORDE_ALCHEMIST = auto() + """The alchemy backend is the Horde Alchemist.""" + + +class KNOWN_SERVICE(StrEnum): + """The known services that can be used for generation.""" + + UNKNOWN = auto() + """The service is unknown.""" + + CUSTOM_UNPUBLISHED = auto() + """The service is a custom, unpublished service.""" + + AI_HORDE = auto() + """The AI Horde service.""" + + CIVITAI = auto() + """The CivitAI service.""" + + +class ServiceInfo(BaseModel): + """Represents information about a service.""" + + model_config = ConfigDict( + use_attribute_docstrings=True, + ) + + known_identifier: KNOWN_SERVICE = KNOWN_SERVICE.UNKNOWN + """The known identifier for the service, if any.""" + + display_name: str = Field(..., min_length=1) + """The name of the service.""" + + description: str | None = None + """A description of the service.""" + + url: str | None = None + """The URL of the service.""" + + +UnknownServiceInfo = ServiceInfo( + known_identifier=KNOWN_SERVICE.UNKNOWN, + display_name="Unknown Service", + description="This service is not recognized.", + url=None, +) + +CustomUnpublishedServiceInfo = ServiceInfo( + known_identifier=KNOWN_SERVICE.CUSTOM_UNPUBLISHED, + display_name="Custom Unpublished Service", + description="This is a custom unpublished service.", + url=None, +) + +AIHordeServiceInfo = ServiceInfo( + known_identifier=KNOWN_SERVICE.AI_HORDE, + display_name="AI Horde", + description="AI Horde is a decentralized, crowd-sourced platform for AI model generation and more.", + url="https://aihorde.net", +) + +CivitAIServiceInfo = ServiceInfo( + known_identifier=KNOWN_SERVICE.CIVITAI, + display_name="CivitAI", + description="CivitAI is a platform for AI model hosting and deployment.", + url="https://civitai.com", +) + +_known_service_infos = { + KNOWN_SERVICE.UNKNOWN: UnknownServiceInfo, + KNOWN_SERVICE.CUSTOM_UNPUBLISHED: CustomUnpublishedServiceInfo, + KNOWN_SERVICE.AI_HORDE: AIHordeServiceInfo, + KNOWN_SERVICE.CIVITAI: CivitAIServiceInfo, +} + + +def register_known_service_info(service_info: ServiceInfo) -> None: + """Register a known service info. + + Args: + service_info: The service info to register. + + Raises: + ValueError: If the service info is already registered. + """ + _known_service_infos[service_info.known_identifier] = service_info + + +def get_known_service_info(service: KNOWN_SERVICE) -> ServiceInfo: + """Get the known service info for a given service. + + Args: + service: The known service. + + Returns: + ServiceInfo: The service info for the given service. + """ + try: + service = KNOWN_SERVICE(service) + except ValueError as e: + raise ValueError("Invalid service type") from e + + return _known_service_infos.get(service, UnknownServiceInfo) diff --git a/horde_sdk/deploy_config/__init__.py b/horde_sdk/deploy_config/__init__.py new file mode 100644 index 00000000..1212fe5f --- /dev/null +++ b/horde_sdk/deploy_config/__init__.py @@ -0,0 +1 @@ +"""Deployment configuration package.""" diff --git a/horde_sdk/deploy_config/workers/__init__.py b/horde_sdk/deploy_config/workers/__init__.py new file mode 100644 index 00000000..62f40305 --- /dev/null +++ b/horde_sdk/deploy_config/workers/__init__.py @@ -0,0 +1 @@ +"""Worker deployment configuration package.""" diff --git a/horde_sdk/exceptions.py b/horde_sdk/exceptions.py index 1a40effb..16ac6bf4 100644 --- a/horde_sdk/exceptions.py +++ b/horde_sdk/exceptions.py @@ -1,2 +1,16 @@ +from typing import Any + +from loguru import logger + + class HordeException(Exception): """Base exception for all Horde SDK exceptions.""" + + +class PayloadValidationError(HordeException): + """Exception for when the API cannot parse a request payload.""" + + def __init__(self, errors: dict[str, Any], message: str) -> None: + """Exception for when the AI Horde API cannot parse a request payload.""" + logger.error(f"The AI Horde API returned an error response. Response: {message}. Errors: {errors}") + super().__init__(message) diff --git a/horde_sdk/generation_parameters/__init__.py b/horde_sdk/generation_parameters/__init__.py new file mode 100644 index 00000000..c611d19b --- /dev/null +++ b/horde_sdk/generation_parameters/__init__.py @@ -0,0 +1,96 @@ +"""Contains class definitions and handling for object models of generation parameters. + +For example, the parameters for text generation (inference) are defined in +:class:`horde_sdk.generation_parameters.text.object_models.TextGenerationParameters` +which contains fields such as `prompt`, `max_length`, `temperature`, etc. + +""" + +from horde_sdk.consts import KNOWN_NSFW_DETECTOR +from horde_sdk.generation_parameters.alchemy import AlchemyParameters, SingleAlchemyParameters +from horde_sdk.generation_parameters.alchemy.consts import ( + KNOWN_ALCHEMY_FORMS, + KNOWN_ALCHEMY_TYPES, + KNOWN_CAPTION_MODELS, + KNOWN_CLIP_BLIP_TYPES, + KNOWN_FACEFIXERS, + KNOWN_INTERROGATORS, + KNOWN_MISC_POST_PROCESSORS, + KNOWN_UPSCALERS, + is_caption_form, + is_facefixer_form, + is_interrogator_form, + is_nsfw_detector_form, + is_strip_background_form, + is_upscaler_form, +) +from horde_sdk.generation_parameters.generic import ( + CompositeParametersBase, + GenerationParameterBaseModel, + GenerationParameterList, + GenerationWithModelParameters, +) +from horde_sdk.generation_parameters.generic.consts import KNOWN_AUX_MODEL_SOURCE +from horde_sdk.generation_parameters.image.consts import ( + CLIP_SKIP_REPRESENTATION, + KNOWN_IMAGE_CONTROLNETS, + KNOWN_IMAGE_SAMPLERS, + KNOWN_IMAGE_SCHEDULERS, + KNOWN_IMAGE_SOURCE_PROCESSING, + KNOWN_IMAGE_WORKFLOWS, + LORA_TRIGGER_INJECT_CHOICE, + TI_TRIGGER_INJECT_CHOICE, +) +from horde_sdk.generation_parameters.image.object_models import ( + BasicImageGenerationParameters, + BasicImageGenerationParametersTemplate, + ImageGenerationParameters, + ImageGenerationParametersTemplate, +) +from horde_sdk.generation_parameters.text.object_models import ( + BasicTextGenerationFormatParameters, + BasicTextGenerationParameters, + KoboldAITextGenerationParameters, + TextGenerationParameters, +) + +__all__ = [ + "CLIP_SKIP_REPRESENTATION", + "KNOWN_ALCHEMY_FORMS", + "KNOWN_ALCHEMY_TYPES", + "KNOWN_AUX_MODEL_SOURCE", + "KNOWN_CAPTION_MODELS", + "KNOWN_CLIP_BLIP_TYPES", + "KNOWN_FACEFIXERS", + "KNOWN_IMAGE_CONTROLNETS", + "KNOWN_IMAGE_SAMPLERS", + "KNOWN_IMAGE_SCHEDULERS", + "KNOWN_IMAGE_SOURCE_PROCESSING", + "KNOWN_IMAGE_WORKFLOWS", + "KNOWN_INTERROGATORS", + "KNOWN_MISC_POST_PROCESSORS", + "KNOWN_NSFW_DETECTOR", + "KNOWN_UPSCALERS", + "LORA_TRIGGER_INJECT_CHOICE", + "TI_TRIGGER_INJECT_CHOICE", + "AlchemyParameters", + "BasicImageGenerationParameters", + "BasicImageGenerationParametersTemplate", + "BasicTextGenerationFormatParameters", + "BasicTextGenerationParameters", + "CompositeParametersBase", + "GenerationParameterBaseModel", + "GenerationParameterList", + "GenerationWithModelParameters", + "ImageGenerationParameters", + "ImageGenerationParametersTemplate", + "KoboldAITextGenerationParameters", + "SingleAlchemyParameters", + "TextGenerationParameters", + "is_caption_form", + "is_facefixer_form", + "is_interrogator_form", + "is_nsfw_detector_form", + "is_strip_background_form", + "is_upscaler_form", +] diff --git a/horde_sdk/generation_parameters/alchemy/__init__.py b/horde_sdk/generation_parameters/alchemy/__init__.py new file mode 100644 index 00000000..c1a8a844 --- /dev/null +++ b/horde_sdk/generation_parameters/alchemy/__init__.py @@ -0,0 +1,35 @@ +"""Contains the Alchemy parameters models and related classes. + +See :class:`horde_sdk.generation_parameters.alchemy.object_models.AlchemyParameters` for the main +Alchemy parameters model. +""" + +from horde_sdk.generation_parameters.alchemy.object_models import ( + AlchemyParameters, + CaptionAlchemyParameters, + FacefixAlchemyParameters, + InterrogateAlchemyParameters, + NSFWAlchemyParameters, + ResolverRule, + SingleAlchemyParameters, + UpscaleAlchemyParameters, + instantiate_alchemy_parameters, + register_alchemy_parameter_rule, + resolve_alchemy_parameter_model, + unregister_alchemy_parameter_rule, +) + +__all__ = [ + "AlchemyParameters", + "CaptionAlchemyParameters", + "FacefixAlchemyParameters", + "InterrogateAlchemyParameters", + "NSFWAlchemyParameters", + "ResolverRule", + "SingleAlchemyParameters", + "UpscaleAlchemyParameters", + "instantiate_alchemy_parameters", + "register_alchemy_parameter_rule", + "resolve_alchemy_parameter_model", + "unregister_alchemy_parameter_rule", +] diff --git a/horde_sdk/generation_parameters/alchemy/consts.py b/horde_sdk/generation_parameters/alchemy/consts.py new file mode 100644 index 00000000..a01273a2 --- /dev/null +++ b/horde_sdk/generation_parameters/alchemy/consts.py @@ -0,0 +1,176 @@ +from enum import auto + +from strenum import StrEnum + + +class ALCHEMY_PARAMETER_FIELDS(StrEnum): + """Field names that commonly appear in alchemy parameter payloads.""" + + result_id = auto() + form = auto() + source_image = auto() + upscaler = auto() + facefixer = auto() + interrogator = auto() + caption_model = auto() + nsfw_detector = auto() + + +class KNOWN_ALCHEMY_FORMS(StrEnum): + """Forms (type of services) for alchemist type workers. + + (nsfw, caption, interrogation, post_process, etc...) + """ + + nsfw = auto() + """NSFW detection.""" + caption = auto() + """Captioning (i.e., BLIP).""" + interrogation = auto() + """Interrogation (i.e., CLIP).""" + post_process = auto() + """Upscaling, facefixing, etc.""" + + +class KNOWN_UPSCALERS(StrEnum): + """The upscalers that are known to the API. + + (RealESRGAN_x4plus, RealESRGAN_x2plus, RealESRGAN_x4plus_anime_6B, etc) + """ + + BACKEND_DEFAULT = auto() + """The default model for the worker backend.""" + + RealESRGAN_x4plus = auto() + RealESRGAN_x2plus = auto() + RealESRGAN_x4plus_anime_6B = auto() + NMKD_Siax = auto() + four_4x_AnimeSharp = "4x_AnimeSharp" + """AKA 4x_AnimeSharp""" + + +class KNOWN_FACEFIXERS(StrEnum): + """The facefixers that are known to the API. + + (CodeFormers, etc) + """ + + BACKEND_DEFAULT = auto() + """The default model for the worker backend.""" + + GFPGAN = auto() + CodeFormers = auto() + + +class KNOWN_MISC_POST_PROCESSORS(StrEnum): + """The misc post processors that are known to the API. + + (strip_background, etc) + """ + + strip_background = auto() + + +_all_valid_post_processors_names_and_values = ( + list(KNOWN_UPSCALERS.__members__.keys()) + + list(KNOWN_UPSCALERS.__members__.values()) + + list(KNOWN_FACEFIXERS.__members__.keys()) + + list(KNOWN_FACEFIXERS.__members__.values()) + + list(KNOWN_MISC_POST_PROCESSORS.__members__.keys()) + + list(KNOWN_MISC_POST_PROCESSORS.__members__.values()) +) +"""Used to validate post processor names and values. \ + This is because some post processor names are not valid python variable names.""" + + +class KNOWN_CLIP_BLIP_TYPES(StrEnum): + """The CLIP and BLIP models that are known to the API.""" + + caption = auto() + """The caption (BLIP) model.""" + interrogation = auto() + """The interrogation (CLIP) model.""" + nsfw = auto() + """The NSFW model.""" + + +class KNOWN_INTERROGATORS(StrEnum): + """The interrogators that are known to the API.""" + + BACKEND_DEFAULT = auto() + """The default model for the worker backend.""" + + vit_l_14 = "sentence-transformers/clip-ViT-L-14" + vit_big_g_14_laion2b_39b_b160k = "laion/CLIP-ViT-bigG-14-laion2B-39B-b160k" + + +class KNOWN_CAPTION_MODELS(StrEnum): + """The caption models that are known to the API.""" + + BACKEND_DEFAULT = auto() + """The default model for the worker backend.""" + + BLIP_BASE_SALESFORCE = "Salesforce/blip-image-captioning-base" + BLIP_LARGE_SALESFORCE = "Salesforce/blip-image-captioning-large" + + +class KNOWN_ALCHEMY_TYPES(StrEnum): + """The alchemy processes (types) that are known to the API. + + (caption, GFPGAN, strip_background, etc) + """ + + _NONE = "" # FIXME + + caption = KNOWN_CLIP_BLIP_TYPES.caption + interrogation = KNOWN_CLIP_BLIP_TYPES.interrogation + nsfw = KNOWN_CLIP_BLIP_TYPES.nsfw + + RealESRGAN_x4plus = KNOWN_UPSCALERS.RealESRGAN_x4plus + RealESRGAN_x2plus = KNOWN_UPSCALERS.RealESRGAN_x2plus + RealESRGAN_x4plus_anime_6B = KNOWN_UPSCALERS.RealESRGAN_x4plus_anime_6B + NMKD_Siax = KNOWN_UPSCALERS.NMKD_Siax + fourx_AnimeSharp = KNOWN_UPSCALERS.four_4x_AnimeSharp + + GFPGAN = KNOWN_FACEFIXERS.GFPGAN + CodeFormers = KNOWN_FACEFIXERS.GFPGAN + + strip_background = KNOWN_MISC_POST_PROCESSORS.strip_background + + +def is_upscaler_form(form: KNOWN_ALCHEMY_TYPES | str) -> bool: + """Check if the form is an upscaler form.""" + value = form + if isinstance(form, KNOWN_ALCHEMY_TYPES): + value = form.value + + return value in KNOWN_UPSCALERS.__members__ or value in KNOWN_UPSCALERS.__members__.values() + + +def is_facefixer_form(form: KNOWN_ALCHEMY_TYPES | str) -> bool: + """Check if the form is a facefixer form.""" + value = form + if isinstance(form, KNOWN_ALCHEMY_TYPES): + value = form.value + + return value in KNOWN_FACEFIXERS.__members__ or value in KNOWN_FACEFIXERS.__members__.values() + + +def is_interrogator_form(form: KNOWN_ALCHEMY_TYPES | str) -> bool: + """Check if the form is an interrogator form.""" + return form == KNOWN_CLIP_BLIP_TYPES.interrogation + + +def is_caption_form(form: KNOWN_ALCHEMY_TYPES | str) -> bool: + """Check if the form is a caption form.""" + return form == KNOWN_CLIP_BLIP_TYPES.caption + + +def is_nsfw_detector_form(form: KNOWN_ALCHEMY_TYPES | str) -> bool: + """Check if the form is an NSFW form.""" + return form == KNOWN_CLIP_BLIP_TYPES.nsfw + + +def is_strip_background_form(form: KNOWN_ALCHEMY_TYPES | str) -> bool: + """Check if the form is a strip background form.""" + return form == KNOWN_MISC_POST_PROCESSORS.strip_background diff --git a/horde_sdk/generation_parameters/alchemy/object_models.py b/horde_sdk/generation_parameters/alchemy/object_models.py new file mode 100644 index 00000000..1b30842a --- /dev/null +++ b/horde_sdk/generation_parameters/alchemy/object_models.py @@ -0,0 +1,309 @@ +from __future__ import annotations + +from collections.abc import Callable, Mapping +from dataclasses import dataclass +from typing import Literal, override + +from pydantic import Field + +from horde_sdk.consts import ID_TYPES, KNOWN_NSFW_DETECTOR +from horde_sdk.generation_parameters.alchemy.consts import ( + ALCHEMY_PARAMETER_FIELDS, + KNOWN_ALCHEMY_FORMS, + KNOWN_ALCHEMY_TYPES, + KNOWN_CAPTION_MODELS, + KNOWN_FACEFIXERS, + KNOWN_INTERROGATORS, + KNOWN_UPSCALERS, +) +from horde_sdk.generation_parameters.generic import CompositeParametersBase +from horde_sdk.generation_parameters.generic.object_models import GenerationFeatureFlags +from horde_sdk.generation_parameters.utils import ( + ResultIdAllocator, + finalize_template_for_parameters, + resolve_result_id_from_payload, +) + + +class AlchemyFeatureFlags(GenerationFeatureFlags): + """Feature flags for an alchemy worker.""" + + alchemy_types: list[KNOWN_ALCHEMY_TYPES] = Field(default_factory=list) + """The alchemy types supported by the worker.""" + + +class SingleAlchemyParametersTemplate(CompositeParametersBase): + """Template for alchemy parameters with all fields optional. + + Use this class during chain construction when not all parameters are known yet. + Convert to SingleAlchemyParameters (or subclasses) before execution. + """ + + result_id: str | None = None + """The generation ID to use for the generation.""" + + form: KNOWN_ALCHEMY_FORMS | str | None = None + """The form to use for the generation.""" + + source_image: bytes | str | None = None + """The source image to use for the generation.""" + + @override + def get_number_expected_results(self) -> int: + """Get the number of expected results.""" + return 1 + + def to_parameters( + self, + *, + result_id: ID_TYPES | None = None, + source_image: bytes | str | None = None, + default_form: KNOWN_ALCHEMY_FORMS | str | None = None, + allocator: ResultIdAllocator | None = None, + seed: str = "alchemy", + ) -> SingleAlchemyParameters: + """Convert this template into concrete alchemy generation parameters.""" + overrides: dict[str, object] = {} + if source_image is not None: + overrides[ALCHEMY_PARAMETER_FIELDS.source_image] = source_image + + current_form = self.form + if default_form is not None and current_form is None: + overrides[ALCHEMY_PARAMETER_FIELDS.form] = default_form + + finalization = finalize_template_for_parameters( + self, + overrides=overrides, + exclude_none=False, + fingerprint_exclude_fields=(ALCHEMY_PARAMETER_FIELDS.result_id,), + ) + + resolved_result_id = resolve_result_id_from_payload( + explicit_id=result_id, + payload_value=finalization.payload.get(ALCHEMY_PARAMETER_FIELDS.result_id), + allocator=allocator, + seed=seed, + fingerprint=finalization.fingerprint, + ) + + finalized_template = finalization.template.model_copy( + update={ALCHEMY_PARAMETER_FIELDS.result_id: resolved_result_id}, + ) + + return self._instantiate_alchemy_parameters(finalized_template) + + @staticmethod + def _instantiate_alchemy_parameters( + payload: SingleAlchemyParametersTemplate | Mapping[str, object], + ) -> SingleAlchemyParameters: + """Instantiate the appropriate alchemy parameter model based on payload contents.""" + return instantiate_alchemy_parameters(payload) + + +class SingleAlchemyParameters(CompositeParametersBase): + """Represents the common bare minimum parameters for any alchemy generation.""" + + result_id: str + """The generation ID to use for the generation.""" + + form: KNOWN_ALCHEMY_FORMS | str + """The form to use for the generation.""" + + source_image: bytes | str | None + """The source image to use for the generation.""" + + @override + def get_number_expected_results(self) -> int: + """Get the number of expected results.""" + return 1 + + +class UpscaleAlchemyParametersTemplate(SingleAlchemyParametersTemplate): + """Template for upscale alchemy parameters with all fields optional.""" + + upscaler: KNOWN_UPSCALERS | str | None = None + + +class UpscaleAlchemyParameters(SingleAlchemyParameters): + """Represents the parameters for an upscale alchemy generation.""" + + form: Literal[KNOWN_ALCHEMY_FORMS.post_process] = KNOWN_ALCHEMY_FORMS.post_process + + upscaler: KNOWN_UPSCALERS | str + + +class FacefixAlchemyParametersTemplate(SingleAlchemyParametersTemplate): + """Template for facefix alchemy parameters with all fields optional.""" + + facefixer: KNOWN_FACEFIXERS | str | None = None + + +class FacefixAlchemyParameters(SingleAlchemyParameters): + """Represents the parameters for a facefix alchemy generation.""" + + facefixer: KNOWN_FACEFIXERS | str + + +class InterrogateAlchemyParametersTemplate(SingleAlchemyParametersTemplate): + """Template for interrogate alchemy parameters with all fields optional.""" + + interrogator: KNOWN_INTERROGATORS | str | None = None + + +class InterrogateAlchemyParameters(SingleAlchemyParameters): + """Represents the parameters for an interrogation alchemy generation.""" + + interrogator: KNOWN_INTERROGATORS | str + + +class CaptionAlchemyParametersTemplate(SingleAlchemyParametersTemplate): + """Template for caption alchemy parameters with all fields optional.""" + + caption_model: KNOWN_CAPTION_MODELS | str | None = None + + +class CaptionAlchemyParameters(SingleAlchemyParameters): + """Represents the parameters for a caption alchemy generation.""" + + caption_model: KNOWN_CAPTION_MODELS | str + + +class NSFWAlchemyParametersTemplate(SingleAlchemyParametersTemplate): + """Template for NSFW alchemy parameters with all fields optional.""" + + nsfw_detector: KNOWN_NSFW_DETECTOR | str | None = None + + +class NSFWAlchemyParameters(SingleAlchemyParameters): + """Represents the parameters for a NSFW alchemy generation.""" + + nsfw_detector: KNOWN_NSFW_DETECTOR | str + + +class AlchemyParameters(CompositeParametersBase): + """Represents the parameters for an alchemy generation.""" + + upscalers: list[UpscaleAlchemyParameters] | None = None + """The upscale operations requested.""" + facefixers: list[FacefixAlchemyParameters] | None = None + """The facefix operations requested.""" + interrogators: list[InterrogateAlchemyParameters] | None = None + """The interrogation operations requested.""" + captions: list[CaptionAlchemyParameters] | None = None + """The caption operations requested.""" + nsfw_detectors: list[NSFWAlchemyParameters] | None = None + """The NSFW detection operations requested.""" + + misc_post_processors: list[SingleAlchemyParameters] | None = None + """Any other post-processing operations requested.""" + + _all_alchemy_operations: list[SingleAlchemyParameters] | None = None + """The list of all alchemy operations requested.""" + + @property + def all_alchemy_operations(self) -> list[SingleAlchemyParameters]: + """Get all operations.""" + if self._all_alchemy_operations is not None: + return self._all_alchemy_operations.copy() + + all_operations: list[SingleAlchemyParameters] = [] + if self.upscalers: + all_operations.extend(self.upscalers) + if self.facefixers: + all_operations.extend(self.facefixers) + if self.interrogators: + all_operations.extend(self.interrogators) + if self.captions: + all_operations.extend(self.captions) + if self.nsfw_detectors: + all_operations.extend(self.nsfw_detectors) + if self.misc_post_processors: + all_operations.extend(self.misc_post_processors) + + self._all_alchemy_operations = all_operations + + return all_operations.copy() + + @override + def get_number_expected_results(self) -> int: + """Get the number of expected results.""" + return len(self.all_alchemy_operations) + + +Predicate = Callable[[Mapping[str, object]], bool] + + +@dataclass(frozen=True) +class ResolverRule: + """Ordered rule binding a predicate to the concrete parameter model it selects.""" + + predicate: Predicate + model: type[SingleAlchemyParameters] + + +_ALCHEMY_PARAMETER_RULES: list[ResolverRule] = [] + + +def register_alchemy_parameter_rule(rule: ResolverRule, *, append: bool = True) -> None: + """Register a resolver rule that may select a concrete alchemy parameter model.""" + if append: + _ALCHEMY_PARAMETER_RULES.append(rule) + else: + _ALCHEMY_PARAMETER_RULES.insert(0, rule) + + +def unregister_alchemy_parameter_rule(rule: ResolverRule) -> None: + """Remove a previously registered resolver rule.""" + _ALCHEMY_PARAMETER_RULES.remove(rule) + + +def resolve_alchemy_parameter_model( + payload: Mapping[str, object] | SingleAlchemyParametersTemplate, +) -> type[SingleAlchemyParameters]: + """Resolve the concrete alchemy parameter model for the supplied payload.""" + payload_mapping: Mapping[str, object] + if isinstance(payload, CompositeParametersBase): + payload_mapping = payload.model_dump(exclude_none=False) + else: + payload_mapping = payload + for rule in _ALCHEMY_PARAMETER_RULES: + if rule.predicate(payload_mapping): + return rule.model + return SingleAlchemyParameters + + +def instantiate_alchemy_parameters( + payload: Mapping[str, object] | SingleAlchemyParametersTemplate, +) -> SingleAlchemyParameters: + """Instantiate the resolved alchemy parameter model with the given payload.""" + model = resolve_alchemy_parameter_model(payload) + if isinstance(payload, CompositeParametersBase): + return model.model_validate(payload, from_attributes=True) + return model(**dict(payload)) + + +def _register_default_rules() -> None: + def _has_field(field_name: ALCHEMY_PARAMETER_FIELDS) -> Predicate: + return lambda payload: payload.get(field_name) is not None + + def _is_post_process_without_upscaler(payload: Mapping[str, object]) -> bool: + form_value = payload.get(ALCHEMY_PARAMETER_FIELDS.form) + if form_value is None: + return False + if str(form_value) != str(KNOWN_ALCHEMY_FORMS.post_process): + return False + return ALCHEMY_PARAMETER_FIELDS.upscaler not in payload + + default_rules = [ + ResolverRule(predicate=_has_field(ALCHEMY_PARAMETER_FIELDS.upscaler), model=UpscaleAlchemyParameters), + ResolverRule(predicate=_has_field(ALCHEMY_PARAMETER_FIELDS.facefixer), model=FacefixAlchemyParameters), + ResolverRule(predicate=_has_field(ALCHEMY_PARAMETER_FIELDS.interrogator), model=InterrogateAlchemyParameters), + ResolverRule(predicate=_has_field(ALCHEMY_PARAMETER_FIELDS.caption_model), model=CaptionAlchemyParameters), + ResolverRule(predicate=_has_field(ALCHEMY_PARAMETER_FIELDS.nsfw_detector), model=NSFWAlchemyParameters), + ResolverRule(predicate=_is_post_process_without_upscaler, model=UpscaleAlchemyParameters), + ] + + _ALCHEMY_PARAMETER_RULES.extend(default_rules) + + +_register_default_rules() diff --git a/horde_sdk/generation_parameters/generic/__init__.py b/horde_sdk/generation_parameters/generic/__init__.py new file mode 100644 index 00000000..2c2c4855 --- /dev/null +++ b/horde_sdk/generation_parameters/generic/__init__.py @@ -0,0 +1,219 @@ +"""Contains base class definitions and handling for object models of generation parameters. + +See :class:`horde_sdk.generation_parameters.generic.object_models.BasicModelGenerationParameters` for the main +base model for generation parameters. +""" + +from abc import ABC, abstractmethod +from collections.abc import Iterator +from typing import Any, ClassVar, TypeVar + +from pydantic import BaseModel, ConfigDict, Field, RootModel, model_validator + +from horde_sdk.consts import ServiceInfo +from horde_sdk.generation_parameters.generic.consts import UNDERLYING_GENERATION_SCHEME + + +class AbstractGenerationParameter: + """Base class for all generation parameters components. + + These classes represent components of generation parameters that can be combined to form a complete + parameter set. This includes the use of specific auxiliary features such as controlnets, LoRAs, etc. + + You should never instantiate or subclass this class directly. Always use a (pydantic) derived class, such as + `GenerationParameterBaseModel` or `GenerationParameterList`. + """ + + +class SchemaVersionedBaseModel(BaseModel): + """Base model that stamps serialized payloads with a schema version.""" + + SCHEMA_VERSION: ClassVar[str] = "1.0" + """Most recent schema version for this payload.""" + + LEGACY_SCHEMA_VERSION: ClassVar[str] = "1.0" + """Oldest schema version supported for deserialization when unspecified.""" + + schema_version: str = Field( + default="", + description="Schema version recorded when the payload was serialized.", + ) + + model_config = ConfigDict( + use_attribute_docstrings=True, + from_attributes=True, + ) + + @model_validator(mode="before") + @classmethod + def _assign_schema_version(cls, data: Any) -> Any: # noqa: ANN401 + """Populate ``schema_version`` when omitted by callers.""" + if data is None: + return {"schema_version": cls.SCHEMA_VERSION} + + if isinstance(data, dict): + if "schema_version" not in data or not data["schema_version"]: + updated = dict(data) + updated["schema_version"] = cls.SCHEMA_VERSION + return updated + return data + + return data + + @classmethod + def current_schema_version(cls) -> str: + """Return the canonical schema version for newly created instances.""" + return cls.SCHEMA_VERSION + + @classmethod + def legacy_schema_version(cls) -> str: + """Return the version assumed for pre-metadata payloads.""" + return getattr(cls, "LEGACY_SCHEMA_VERSION", cls.SCHEMA_VERSION) + + +class GenerationParameterBaseModel(SchemaVersionedBaseModel, AbstractGenerationParameter): + """Base class for all generation parameters models. + + Contrast this class with `GenerationParameterList`, which is a *collection* of components. + RootModel should be used when a list of the same component type is allowed. For example, use + `GenerationParameterBaseModel` for a single LoRa entry and `GenerationParameterList` + for a list of those LoRa entries. + """ + + underlying_generation_scheme: UNDERLYING_GENERATION_SCHEME | None = None + """The underlying method the generation uses to produce results. + + - If associated with a auxiliary process that is model based, such as a LoRa, this should be set to `MODEL`. + - If instead a service is used to produce results, this should be set to `MODEL_FROM_SERVICE`. + - If there is no generative model involved, and instead a "traditional" algorithm is used, this should be set to + `NON_MODEL_ALGORITHM`. + + Otherwise, if this component is simply a set of parameters that, in itself, does not produce results, this + should be set to `None`. + """ + + +T = TypeVar("T", bound=GenerationParameterBaseModel) +Y = TypeVar("Y", bound=GenerationParameterBaseModel) + + +class GenerationParameterList(RootModel[list[T]], AbstractGenerationParameter): + """Base class for all generation parameters models that represent a collection of components. + + This should be a homogeneous collection of the same type of component. For example, if this is a LoRa + list, all entries should be a `LoRaEntry` instance. Note that this do not prohibit further subclassing + of the contained components, but you should not mix different component types in the same list. A bad + example would be having both `LoRaEntry` and `TIEntry` instances in the same list. + + Contrast this class with `GenerationParameterBaseModel`, which is a single component. + For example, use `GenerationParameterBaseModel` for a single LoRa entry and + `GenerationParameterList` for a list of those LoRa entries. + """ + + model_config = ConfigDict( + use_attribute_docstrings=True, + from_attributes=True, + ) + + root: list[T] = Field(default_factory=list) + + def __iter__(self) -> Iterator[T]: # type: ignore + return iter(self.root) + + def __len__(self) -> int: + """Return the length of the root model's data.""" + return len(self.root) + + def __getitem__(self, index: int) -> T: + """Get an item from the root model's data by index.""" + return self.root[index] + + def get_component_by_type(self, desired_type: type[Y]) -> Y | None: + """Get all components of a specific type. + + Args: + desired_type (type[T]): The type of the component to get. + + Returns: + list[T]: A list of components of the specified type. + """ + if not issubclass(desired_type, GenerationParameterBaseModel): + raise TypeError(f"Expected a subclass of GenerationParameterBaseModel, got {desired_type}") + + components = [component for component in self.root if isinstance(component, desired_type)] + return components[0] if components else None + + def add_component(self, component: T) -> None: + """Add a component to the container. + + Args: + component (T): The component to add. + """ + self.root.append(component) + + +class GenerationWithModelParameters(GenerationParameterBaseModel): + """Represents the common bare minimum parameters for any model-based generative inference or similar.""" + + underlying_generation_scheme: UNDERLYING_GENERATION_SCHEME = UNDERLYING_GENERATION_SCHEME.MODEL + """See :attr:`ComposedParameterSetBase.underlying_generation_scheme` for more information.""" + + model: str | None = None + model_baseline: str | None = None + + +class GenerationByServiceParameters(GenerationParameterBaseModel): + """A base class for service-based generation parameters. + + This class is intended to be used as a base for all service-based generation parameters, which may include + additional fields or methods specific to the service being used. + """ + + underlying_generation_scheme: UNDERLYING_GENERATION_SCHEME = UNDERLYING_GENERATION_SCHEME.MODEL_FROM_SERVICE + """See :attr:`ComposedParameterSetBase.underlying_generation_scheme` for more information.""" + + service: ServiceInfo + """The service to use for the generation, if applicable.""" + + @property + def model_name(self) -> str | None: + """Get the name of the generation model.""" + raise NotImplementedError() # FIXME + + +class CompositeParametersBase(ABC, SchemaVersionedBaseModel): + """Base class for all combined (composed) parameter sets. + + The top level classes which contain BasicModelGenerationParameters instance and/or other specific parameters + should inherit from this class. These classes should always represent complete parameter sets that can be used + for generation. + """ + + model_config = ConfigDict( + use_attribute_docstrings=True, + from_attributes=True, + arbitrary_types_allowed=True, # FIXME + ) + + @abstractmethod + def get_number_expected_results(self) -> int: + """Return the number of expected results for this parameter set. + + Returns: + int: The number of expected results. + """ + + underlying_generation_scheme: UNDERLYING_GENERATION_SCHEME | None = None + """The underlying method the generation uses to produce results. + + Note that this refers only to the top-level generation itself. Component parameters may contain their own + underlying generation schemes. For example, for stable diffusion image generation, this would always be + `MODEL` even if some of the contained components use another scheme such as `MODEL_FROM_SERVICE` or + `NON_MODEL_ALGORITHM`. + + - If the top-level generation is model-based, this should be set to `MODEL`. + - If the top-level generation uses an outside service to produce results, this should be set + to `MODEL_FROM_SERVICE`. + - If the top-level generation does not use a generative model and instead uses a "traditional" algorithm, this + should be set to `NON_MODEL_ALGORITHM`. + """ diff --git a/horde_sdk/generation_parameters/generic/consts.py b/horde_sdk/generation_parameters/generic/consts.py new file mode 100644 index 00000000..11d267e4 --- /dev/null +++ b/horde_sdk/generation_parameters/generic/consts.py @@ -0,0 +1,43 @@ +from enum import auto + +from strenum import StrEnum + +from horde_sdk.consts import KNOWN_SERVICE + + +class UNDERLYING_GENERATION_SCHEME(StrEnum): + """The underlying method the generation uses to produce results. + + In the case that an outside service is used to produce results, this should be set to `MODEL_FROM_SERVICE`. + This is to prevent potential confusion by identifying a service name as the "model". + + Most generative AI is model-based, but certain alchemy or other operations may not have a traditional model + and instead use alternative methods and in this case you can use `NON_MODEL_ALGORITHM` to indicate that. + """ + + MODEL = auto() + + MODEL_FROM_SERVICE = auto() + + NON_MODEL_ALGORITHM = auto() + + +class KNOWN_AUX_MODEL_SOURCE(StrEnum): + """The known sources of an auxiliary model (aux models are LoRas, TIs, etc).""" + + LOCAL = auto() + """The aux model is worker-provided on their local machine.""" + + CIVITAI = auto() + """The aux model is provided by CivitAI.""" + + HORDELING = auto() + """The aux model is provided by the AI-Horde hordeling service.""" + + def map_to_known_service(self) -> KNOWN_SERVICE: + """Map the aux model source to a known service.""" + if self == KNOWN_AUX_MODEL_SOURCE.CIVITAI: + return KNOWN_SERVICE.CIVITAI + if self == KNOWN_AUX_MODEL_SOURCE.HORDELING: + return KNOWN_SERVICE.AI_HORDE + return KNOWN_SERVICE.UNKNOWN diff --git a/horde_sdk/generation_parameters/generic/object_models.py b/horde_sdk/generation_parameters/generic/object_models.py new file mode 100644 index 00000000..da55b3a6 --- /dev/null +++ b/horde_sdk/generation_parameters/generic/object_models.py @@ -0,0 +1,36 @@ +from abc import ABC, abstractmethod +from typing import Any + +from pydantic import BaseModel, ConfigDict, Field + + +class GenerationFeatureFlags(BaseModel): + """Base class for all generation features flags, which represent what support is either required or available.""" + + model_config = ConfigDict( + use_attribute_docstrings=True, + ) + + extra_texts: bool = Field(default=False) + """Whether there is support for extra texts.""" + + extra_source_images: bool = Field(default=False) + """Whether there is support for extra source images.""" + + +class ModelRecordResolver(ABC): + """Abstract base class for classes responsible for resolving model records from a service or data source.""" + + @abstractmethod + def resolve_model_by_name( + self, + model_name: str, + ) -> BaseModel | dict[Any, Any] | None: + """Resolve a model by its name. + + Args: + model_name: The name of the model to resolve. + + Returns: + The resolved model record, or None if not found. + """ diff --git a/horde_sdk/generation_parameters/image/__init__.py b/horde_sdk/generation_parameters/image/__init__.py new file mode 100644 index 00000000..024e478a --- /dev/null +++ b/horde_sdk/generation_parameters/image/__init__.py @@ -0,0 +1,35 @@ +"""Object models which represent the parameters for an image generation.""" + +from horde_sdk.generation_parameters.image.object_models import ( + DEFAULT_BASELINE_RESOLUTION, + HIRES_FIX_DENOISE_STRENGTH_DEFAULT, + AuxModelEntry, + BasicImageGenerationParameters, + ControlnetGenerationParameters, + CustomWorkflowGenerationParameters, + HiresFixGenerationParameters, + Image2ImageGenerationParameters, + ImageGenerationParameters, + LoRaEntry, + RemixGenerationParameters, + RemixImageEntry, + TIEntry, + default_basic_image_generation_parameters, +) + +__all__ = [ + "DEFAULT_BASELINE_RESOLUTION", + "HIRES_FIX_DENOISE_STRENGTH_DEFAULT", + "AuxModelEntry", + "BasicImageGenerationParameters", + "ControlnetGenerationParameters", + "CustomWorkflowGenerationParameters", + "HiresFixGenerationParameters", + "Image2ImageGenerationParameters", + "ImageGenerationParameters", + "LoRaEntry", + "RemixGenerationParameters", + "RemixImageEntry", + "TIEntry", + "default_basic_image_generation_parameters", +] diff --git a/horde_sdk/generation_parameters/image/consts.py b/horde_sdk/generation_parameters/image/consts.py new file mode 100644 index 00000000..8d996c2b --- /dev/null +++ b/horde_sdk/generation_parameters/image/consts.py @@ -0,0 +1,125 @@ +from enum import auto + +from strenum import StrEnum + + +class KNOWN_IMAGE_SAMPLERS(StrEnum): + """The samplers that are known to the API. + + (k_lms, k_heun, DDIM, etc) + """ + + k_lms = auto() + k_heun = auto() + k_euler = auto() + k_euler_a = auto() + k_dpm_2 = auto() + k_dpm_2_a = auto() + k_dpm_fast = auto() + k_dpm_adaptive = auto() + k_dpmpp_2s_a = auto() + k_dpmpp_2m = auto() + dpmsolver = auto() + k_dpmpp_sde = auto() + lcm = auto() + DDIM = "DDIM" + + +class KNOWN_IMAGE_SCHEDULERS(StrEnum): + """The schedulers that are known to the API. + + (normal, karras, exponential, etc) + """ + + normal = auto() + karras = auto() + exponential = auto() + sgm_uniform = auto() + simple = auto() + ddim_uniform = auto() + beta = auto() + linear_quadratic = auto() + kl_optimal = auto() + + +class KNOWN_IMAGE_CONTROLNETS(StrEnum): + """The controlnets that are known to the API.""" + + canny = auto() + hed = auto() + depth = auto() + normal = auto() + openpose = auto() + seg = auto() + scribble = auto() + fakescribbles = auto() + hough = auto() + + +class KNOWN_IMAGE_SOURCE_PROCESSING(StrEnum): + """The source processing methods that are known to the API. + + (txt2img, img2img, inpainting, etc) + """ + + txt2img = auto() + img2img = auto() + inpainting = auto() + outpainting = "inpainting" + """Outpainting is just""" + remix = auto() + """Stable Cascade Remix""" + + +class TI_TRIGGER_INJECT_CHOICE(StrEnum): + """The TI trigger inject choices that are known to the API.""" + + NO_INJECT = auto() + """No injection - the model either does not require it or the user will add the trigger manually.""" + + POSITIVE_PROMPT = auto() + """Injects into the 'positive' prompt.""" + + NEGATIVE_PROMPT = auto() + """Injects into the 'negative' prompt.""" + + +class LORA_TRIGGER_INJECT_CHOICE(StrEnum): + """The LoRa trigger inject choices that are known to the API.""" + + NO_INJECT = auto() + """No injection - the model either does not require it or the user will add the trigger manually.""" + + EXACT_POSITIVE = auto() + """Injects the exact specified trigger term into the 'positive' prompt.""" + + EXACT_NEGATIVE = auto() + """Injects the exact specified trigger term into the 'negative' prompt.""" + + FUZZY_POSITIVE = auto() + """Attempt to match the specified trigger term to a published trigger term into the 'positive' prompt.""" + + FUZZY_NEGATIVE = auto() + """Attempt to match the specified trigger term to a published trigger term into the 'negative' prompt.""" + + +class KNOWN_IMAGE_WORKFLOWS(StrEnum): + """The controlnets that are known to the API.""" + + qr_code = auto() + + +class CLIP_SKIP_REPRESENTATION(StrEnum): + """The CLIP skip representations that are known.""" + + NEGATIVE_OFFSET = auto() + """CLIP skip is used with a negative offset. + + For example, -1 means "no skipped layers" and -2 means "skip the last layer". This is the case for comfyui. + """ + + POSITIVE_OFFSET = auto() + """CLIP skip is used with a positive offset. + + For example, 1 means "skip no layers" and 2 means "skip the first layer". This is the case for many frontends. + """ diff --git a/horde_sdk/generation_parameters/image/object_models.py b/horde_sdk/generation_parameters/image/object_models.py new file mode 100644 index 00000000..c5fbfc9f --- /dev/null +++ b/horde_sdk/generation_parameters/image/object_models.py @@ -0,0 +1,790 @@ +from __future__ import annotations + +from collections.abc import Sequence +from pathlib import Path +from typing import override + +from horde_model_reference.meta_consts import KNOWN_IMAGE_GENERATION_BASELINE +from pydantic import ConfigDict, Field, field_validator, model_validator + +from horde_sdk.consts import ID_TYPES, get_default_frozen_model_config_dict +from horde_sdk.generation_parameters.alchemy import AlchemyParameters +from horde_sdk.generation_parameters.alchemy.consts import KNOWN_ALCHEMY_TYPES +from horde_sdk.generation_parameters.generic import ( + CompositeParametersBase, + GenerationParameterBaseModel, + GenerationWithModelParameters, +) +from horde_sdk.generation_parameters.generic.consts import KNOWN_AUX_MODEL_SOURCE +from horde_sdk.generation_parameters.generic.object_models import GenerationFeatureFlags +from horde_sdk.generation_parameters.image.consts import ( + CLIP_SKIP_REPRESENTATION, + KNOWN_IMAGE_CONTROLNETS, + KNOWN_IMAGE_SAMPLERS, + KNOWN_IMAGE_SCHEDULERS, + KNOWN_IMAGE_SOURCE_PROCESSING, + KNOWN_IMAGE_WORKFLOWS, + LORA_TRIGGER_INJECT_CHOICE, + TI_TRIGGER_INJECT_CHOICE, +) +from horde_sdk.generation_parameters.utils import ( + ResultIdAllocator, + finalize_template_for_parameters, + resolve_result_ids_from_payload, +) + +DEFAULT_BASELINE_RESOLUTION: int = 512 +"""The default assumed (single side) trained resolution for image generation models if unspecified.""" +HIRES_FIX_DENOISE_STRENGTH_DEFAULT: float = 0.65 +"""The default second-pass denoise strength for hires-fix generations.""" + + +class ControlnetFeatureFlags(GenerationParameterBaseModel): + """Feature flags for controlnet.""" + + model_config = get_default_frozen_model_config_dict() + + controlnets: list[KNOWN_IMAGE_CONTROLNETS | str] = Field( + examples=[ + [KNOWN_IMAGE_CONTROLNETS.canny], + [KNOWN_IMAGE_CONTROLNETS.canny, KNOWN_IMAGE_CONTROLNETS.depth], + ], + ) + """The controlnets supported by the worker.""" + + image_is_control: bool = Field(default=False) + """Whether there is support for passing a pre-parsed control image.""" + + return_control_map: bool = Field(default=False) + """Whether there is support returning the control map.""" + + +class ImageGenerationFeatureFlags(GenerationFeatureFlags): + """Feature flags for an image worker.""" + + baselines: list[KNOWN_IMAGE_GENERATION_BASELINE | str] = Field( + examples=[ + [KNOWN_IMAGE_GENERATION_BASELINE.infer], + [KNOWN_IMAGE_GENERATION_BASELINE.infer, KNOWN_IMAGE_GENERATION_BASELINE.stable_diffusion_1], + ], + ) + """The baselines supported for standard image generation. + + If `infer`, the worker will attempt to infer the model type from the model name. + """ + + clip_skip: bool = Field(default=False) + """Whether there is support for clip skipping.""" + + hires_fix: bool = Field(default=False) + """Whether there is support for hires fix.""" + + tiling: bool = Field(default=False) + """Whether there is support for seamless tiling.""" + + schedulers: list[KNOWN_IMAGE_SCHEDULERS | str] = Field( + examples=[ + [KNOWN_IMAGE_SCHEDULERS.normal], + [KNOWN_IMAGE_SCHEDULERS.normal, KNOWN_IMAGE_SCHEDULERS.simple], + ], + ) + """The schedulers supported.""" + + samplers: list[KNOWN_IMAGE_SAMPLERS | str] = Field( + examples=[ + [KNOWN_IMAGE_SAMPLERS.k_euler], + [KNOWN_IMAGE_SAMPLERS.k_lms, KNOWN_IMAGE_SAMPLERS.k_euler], + ], + ) + """The samplers supported.""" + + controlnets_feature_flags: ControlnetFeatureFlags | None = Field( + default=None, + examples=[ + ControlnetFeatureFlags( + controlnets=[KNOWN_IMAGE_CONTROLNETS.canny], + image_is_control=False, + return_control_map=False, + ), + ControlnetFeatureFlags( + controlnets=[KNOWN_IMAGE_CONTROLNETS.canny, KNOWN_IMAGE_CONTROLNETS.depth], + image_is_control=True, + return_control_map=True, + ), + ], + ) + """The controlnet feature flags for the worker.""" + + post_processing: list[KNOWN_ALCHEMY_TYPES | str] | None = Field( + default=None, + examples=[ + [KNOWN_ALCHEMY_TYPES.RealESRGAN_x4plus], + [KNOWN_ALCHEMY_TYPES.RealESRGAN_x4plus, KNOWN_ALCHEMY_TYPES.GFPGAN], + ], + ) + """The post processing methods.""" + + source_processing: list[KNOWN_IMAGE_SOURCE_PROCESSING | str] = Field( + examples=[ + [KNOWN_IMAGE_SOURCE_PROCESSING.txt2img], + [KNOWN_IMAGE_SOURCE_PROCESSING.txt2img, KNOWN_IMAGE_SOURCE_PROCESSING.img2img], + [ + KNOWN_IMAGE_SOURCE_PROCESSING.txt2img, + KNOWN_IMAGE_SOURCE_PROCESSING.img2img, + KNOWN_IMAGE_SOURCE_PROCESSING.inpainting, + ], + ], + ) + """The source processing methods.""" + + workflows: list[KNOWN_IMAGE_WORKFLOWS | str] | None = Field( + default=None, + examples=[ + [KNOWN_IMAGE_WORKFLOWS.qr_code], + ], + ) + """The workflows supported.""" + + tis: list[KNOWN_AUX_MODEL_SOURCE | str] | None = Field( + default=None, + examples=[ + [KNOWN_AUX_MODEL_SOURCE.HORDELING], + [KNOWN_AUX_MODEL_SOURCE.LOCAL], + ], + ) + """If textual inversions are supported, the sources of the textual inversions supported.""" + + loras: list[KNOWN_AUX_MODEL_SOURCE | str] | None = Field( + default=None, + examples=[ + [KNOWN_AUX_MODEL_SOURCE.CIVITAI], + [KNOWN_AUX_MODEL_SOURCE.LOCAL], + ], + ) + + """If loras are supported, the sources of the loras supported.""" + + @field_validator("baselines") + @classmethod + def ensure_baseline_non_empty( + cls, + v: list[KNOWN_IMAGE_GENERATION_BASELINE | str], + ) -> list[KNOWN_IMAGE_GENERATION_BASELINE | str]: + """Ensure that the baselines are not empty.""" + if not v: + raise ValueError("Baselines cannot be empty.") + return v + + +class BasicImageGenerationParametersTemplate(GenerationWithModelParameters): + """Represents the common parameters for an image generation.""" + + prompt: str | None = None + """The prompt to use for the generation.""" + seed: str | None = None + """The seed to use for the generation.""" + + height: int | None = Field( + default=None, + multiple_of=64, + ge=64, + examples=[512, 768], + ) + """The height to use for the generation.""" + width: int | None = Field( + default=None, + multiple_of=64, + ge=64, + examples=[512, 768], + ) + """The width to use for the generation.""" + + steps: int | None = Field( + default=None, + ge=1, + examples=[4, 20, 50], + ) + """The number of steps to use for the generation.""" + + cfg_scale: float | None = Field( + default=None, + ge=0, + examples=[0.0, 1.0, 7.0], + ) + """The scale to use for the generation.""" + + sampler_name: KNOWN_IMAGE_SAMPLERS | str | None = Field( + default=None, + examples=[KNOWN_IMAGE_SAMPLERS.k_lms], + ) + """The sampler to use for the generation.""" + + scheduler: KNOWN_IMAGE_SCHEDULERS | str | None = Field( + default=None, + examples=[KNOWN_IMAGE_SCHEDULERS.normal], + ) + """The scheduler to use for the generation.""" + + clip_skip: int | None = Field( + default=None, + examples=[-3, -2, -1, 1, 2, 3], + ) + """The offset of layer numbers to skip. Be sure to check `clip_skip_representation` for the representation.""" + + clip_skip_representation: CLIP_SKIP_REPRESENTATION | None = Field( + default=None, + ) + """The representation of the clip skip. See `CLIP_SKIP_REPRESENTATION` for more information. + + Typically front-ends use positive values, while comfyui used the same value but negative. + """ + + denoising_strength: float | None = Field( + default=None, + ge=0, + le=1, + examples=[0.0, 0.5, 1.0], + ) + """The denoising strength to use for the generation.""" + + tiling: bool | None = None + """If true, the generation will be generated with seamless tiling.""" + + +class BasicImageGenerationParameters(BasicImageGenerationParametersTemplate): + """Represents the common bare minimum parameters for an image generation.""" + + model_config = ConfigDict( + frozen=True, + from_attributes=True, + ) + + model: str + """The model to use for the generation.""" + + prompt: str + """The prompt to use for the generation.""" + + height: int | None = Field( + default=DEFAULT_BASELINE_RESOLUTION, + multiple_of=64, + ge=64, + examples=[512, 768], + ) + """The height to use for the generation.""" + width: int | None = Field( + default=DEFAULT_BASELINE_RESOLUTION, + multiple_of=64, + ge=64, + examples=[512, 768], + ) + """The width to use for the generation.""" + + clip_skip_representation: CLIP_SKIP_REPRESENTATION | None = Field( + default=CLIP_SKIP_REPRESENTATION.NEGATIVE_OFFSET, + ) + """The representation of the clip skip. See `CLIP_SKIP_REPRESENTATION` for more information. + + Typically front-ends use positive values, while comfyui used the same value but negative. + """ + + +default_basic_image_generation_parameters = BasicImageGenerationParameters( + prompt="EXAMPLE_PROMPT", + model="EXAMPLE_MODEL", + model_baseline="infer", + seed="1", + height=DEFAULT_BASELINE_RESOLUTION, + width=DEFAULT_BASELINE_RESOLUTION, + steps=20, + cfg_scale=7.0, + sampler_name=KNOWN_IMAGE_SAMPLERS.k_lms, + scheduler=KNOWN_IMAGE_SCHEDULERS.normal, + clip_skip=1, + denoising_strength=0.75, +) + + +class Image2ImageGenerationParameters(GenerationParameterBaseModel): + """Represents the parameters for an image-to-image generation.""" + + source_image: bytes | str | None + """The source image to use for the generation.""" + source_mask: bytes | str | None + """The source mask to use for the generation.""" + + +class RemixImageEntry(GenerationParameterBaseModel): + """Represents a special image entry for a generation.""" + + image: bytes | str + """The image data.""" + + strength: float = 1.0 + """The weight to apply this image to the remix generation.""" + + +class RemixGenerationParameters(GenerationParameterBaseModel): + """Represents the parameters for a stable cascade remix generation.""" + + source_image: bytes | str + """The source image to use for the generation.""" + + remix_images: list[RemixImageEntry] + """The images to remix the source image with.""" + + +class ControlnetGenerationParameters(GenerationParameterBaseModel): + """Represents the parameters for a controlnet generation.""" + + controlnet_type: KNOWN_IMAGE_CONTROLNETS | str + """The type of controlnet to use for the generation.""" + + source_image: bytes | str | None + """The source image to use for the generation, if img2img.""" + control_map: bytes | str | None + """The control map to use for the generation, if img2img.""" + + return_control_map: bool = False + """If true, return the control map created by the controlnet pre-processor.""" + + +class HiresFixGenerationParameters(GenerationParameterBaseModel): + """Represents the parameters for a high-resolution fix generation.""" + + first_pass: BasicImageGenerationParameters + second_pass: BasicImageGenerationParameters + + +class AuxModelEntry(GenerationParameterBaseModel): + """Represents a single entry of an aux model, (LoRas, TIs, etc).""" + + name: str | None + """The name of the aux model. If this is a hosted aux model, the name to search for. See `remote_version_id` if + targeting a specific version of a hosted aux model.""" + release_version: str | None = None + """The version of the aux model. This is v1, v2, etc. If this is a hosted aux model, you should instead use + `remote_version_id` and reference the platform-specific file identifier.""" + remote_version_id: str | None + """If this aux model is sourced from a website/API, the version ID specific to that website/API + to identify the specific version of the aux model. This is *not* v1, v2, but a numeric ID that the + service assigns and is typically in the URL of the download link.""" + + source: KNOWN_AUX_MODEL_SOURCE | str + """The source of the aux model. This can be a known source or a custom source.""" + + remote_url: str | None = None + """The remote URL to download the aux model from.""" + local_filename: Path | None = None + """The local filename to load the aux model from.""" + file_hash: str | None = None + """The hash of the aux model file.""" + + model_strength: float = 1 + """The strength of the aux model on the generation model. 1 is the default strength.""" + + @model_validator(mode="after") + def verify_identifier_set(self: AuxModelEntry) -> AuxModelEntry: + """Ensure that at least one of name, version, or remote_version_id is provided.""" + if self.name is None and self.release_version is None and self.remote_version_id is None: + raise ValueError("At least one of name, version, or remote_version_id must be provided.") + + return self + + +class LoRaEntry(AuxModelEntry): + """Represents a single entry of a LoRa.""" + + clip_strength: float = 1 + """The strength of the LoRa on the clip model. 1 is the default strength.""" + + lora_triggers: list[str] | None = None + """The triggers to use for the LoRa. Specify the behavior with `lora_inject_trigger_choice`.""" + + lora_inject_trigger_choice: LORA_TRIGGER_INJECT_CHOICE = LORA_TRIGGER_INJECT_CHOICE.NO_INJECT + """If true and if supported by the backend, inject a trigger term into the prompt.""" + + +class TIEntry(AuxModelEntry): + """Represents a single entry of a Textual Inversion.""" + + ti_inject_trigger_choice: TI_TRIGGER_INJECT_CHOICE = TI_TRIGGER_INJECT_CHOICE.NO_INJECT + """If true and if supported by the backend, inject a trigger term into the prompt.""" + + +class CustomWorkflowGenerationParameters(GenerationParameterBaseModel): + """Represents the parameters for a custom workflow generation.""" + + custom_workflow_name: KNOWN_IMAGE_WORKFLOWS | str + """The name of the custom workflow to use for the generation.""" + custom_workflow_version: str | None = None + """The version of the custom workflow to use for the generation. \ + If None, the latest version will be used. Defaults to None.""" + + custom_parameters: dict[ID_TYPES, str] | None = None + """The custom parameters to use for the generation. Defaults to None.""" + + +class ImageGenerationComponentContainer(GenerationParameterBaseModel): + """Container for optional image generation components. + + This container holds auxiliary components for image generation such as LoRa entries, + Textual Inversion entries, ControlNet parameters, and more. It uses a simple list + that naturally supports multiple instances of the same component type. + """ + + model_config = ConfigDict( + use_attribute_docstrings=True, + from_attributes=True, + ) + + components: list[ + Image2ImageGenerationParameters + | RemixGenerationParameters + | ControlnetGenerationParameters + | HiresFixGenerationParameters + | LoRaEntry + | TIEntry + | CustomWorkflowGenerationParameters + ] = Field(default_factory=list) + """The list of generation components.""" + + def add( + self, + component: ( + Image2ImageGenerationParameters + | RemixGenerationParameters + | ControlnetGenerationParameters + | HiresFixGenerationParameters + | LoRaEntry + | TIEntry + | CustomWorkflowGenerationParameters + ), + ) -> None: + """Add a component to the container. + + Args: + component: The component to add. + """ + self.components.append(component) + + def add_all( + self, + components: list[ + Image2ImageGenerationParameters + | RemixGenerationParameters + | ControlnetGenerationParameters + | HiresFixGenerationParameters + | LoRaEntry + | TIEntry + | CustomWorkflowGenerationParameters + ], + ) -> None: + """Add multiple components to the container. + + Args: + components: The list of components to add. + """ + self.components.extend(components) + + @property + def image2image_params(self) -> Image2ImageGenerationParameters | None: + """Get the image-to-image parameters if they exist.""" + for component in self.components: + if isinstance(component, Image2ImageGenerationParameters): + return component + return None + + @property + def remix_params(self) -> RemixGenerationParameters | None: + """Get the remix parameters if they exist.""" + for component in self.components: + if isinstance(component, RemixGenerationParameters): + return component + return None + + @property + def controlnet_params(self) -> ControlnetGenerationParameters | None: + """Get the controlnet parameters if they exist.""" + for component in self.components: + if isinstance(component, ControlnetGenerationParameters): + return component + return None + + @property + def hires_fix_params(self) -> HiresFixGenerationParameters | None: + """Get the hires fix parameters if they exist.""" + for component in self.components: + if isinstance(component, HiresFixGenerationParameters): + return component + return None + + @property + def lora_entries(self) -> list[LoRaEntry]: + """Get all LoRa entries.""" + return [c for c in self.components if isinstance(c, LoRaEntry)] + + @property + def ti_entries(self) -> list[TIEntry]: + """Get all Textual Inversion entries.""" + return [c for c in self.components if isinstance(c, TIEntry)] + + @property + def custom_workflow_entries(self) -> list[CustomWorkflowGenerationParameters]: + """Get all custom workflow entries.""" + return [c for c in self.components if isinstance(c, CustomWorkflowGenerationParameters)] + + @property + def lora_params(self) -> list[LoRaEntry]: + """Get all LoRa entries. + + Deprecated: Use lora_entries instead. This property returns a plain list instead of LoRaEntries wrapper. + """ + return self.lora_entries + + @property + def ti_params(self) -> list[TIEntry]: + """Get all Textual Inversion entries. + + Deprecated: Use ti_entries instead. This property returns a plain list instead of TIEntries wrapper. + """ + return self.ti_entries + + @property + def custom_workflows_params(self) -> list[CustomWorkflowGenerationParameters]: + """Get all custom workflow entries. + + Deprecated: Use custom_workflow_entries instead. This property returns a plain list instead of + CustomWorkflows wrapper. + """ + return self.custom_workflow_entries + + +class ImageGenerationParametersTemplate(CompositeParametersBase): + """Represents the parameters for an image generation.""" + + batch_size: int | None = Field(default=None, ge=1) + """The number of images to generated batched (simultaneously). This is the `n_iter` parameter in ComfyUI""" + + source_processing: KNOWN_IMAGE_SOURCE_PROCESSING | str | None = None + """txt2img, img2img, etc. See `KNOWN_IMAGE_SOURCE_PROCESSING` for more information.""" + + base_params: BasicImageGenerationParametersTemplate | None = None + """The base parameters for the generation.""" + + additional_params: ImageGenerationComponentContainer | None = None + """Additional parameters for the generation. This can include parameters for img2img, remix, controlnet, hires fix, + and custom workflows.""" + + alchemy_params: AlchemyParameters | None = None + """If alchemy is also requested, the parameters specific to those operations.""" + + @model_validator(mode="after") + def verify_source_processing(self: ImageGenerationParametersTemplate) -> ImageGenerationParametersTemplate: + """Ensure that the appropriate parameters are set based on the source processing type.""" + if self.source_processing in [ + KNOWN_IMAGE_SOURCE_PROCESSING.img2img, + KNOWN_IMAGE_SOURCE_PROCESSING.inpainting, + KNOWN_IMAGE_SOURCE_PROCESSING.outpainting, + ]: + if self.additional_params is None: + raise ValueError("additional_params must be provided for img2img or inpainting source processing.") + + if self.additional_params.image2image_params is None: + raise ValueError("img2img_params must be provided for img2img source processing.") + elif self.source_processing == KNOWN_IMAGE_SOURCE_PROCESSING.remix: + if self.additional_params is None: + raise ValueError("additional_params must be provided for remix source processing.") + + if self.additional_params.remix_params is None: + raise ValueError("remix_params must be provided for remix source processing.") + + return self + + @override + def get_number_expected_results(self: ImageGenerationParametersTemplate) -> int: + """Return the number of expected results for this parameter set. + + Returns: + int: The number of expected results. + """ + return self.batch_size if self.batch_size is not None else 1 + + def to_parameters( + self, + *, + base_param_updates: BasicImageGenerationParametersTemplate | None = None, + additional_param_updates: ImageGenerationComponentContainer | None = None, + result_ids: Sequence[ID_TYPES] | None = None, + allocator: ResultIdAllocator | None = None, + seed: str = "image", + ) -> ImageGenerationParameters: + """Convert this template into concrete image generation parameters.""" + base_params = self.base_params + if base_params is None: + raise ValueError("Image generation templates must define base_params before conversion.") + + overrides: dict[str, object] | None = None + if base_param_updates: + overrides = { + "base_params": base_params.model_copy(update=base_param_updates.model_dump(exclude_none=True)), + } + + if additional_param_updates: + if overrides is None: + overrides = {} + if not self.additional_params: + raise ValueError("additional_params must be defined before applying updates.") + overrides["additional_params"] = self.additional_params.model_copy(update=dict(additional_param_updates)) + + finalization = finalize_template_for_parameters( + self, + overrides=overrides, + exclude_none=False, + fingerprint_exclude_fields=("result_ids",), + ) + + finalized_template = finalization.template + resolved_base_params = finalized_template.base_params + if resolved_base_params is None: + raise ValueError("Image generation templates must define base_params before conversion.") + + batch_size = finalized_template.batch_size or 1 + + resolved_result_ids = resolve_result_ids_from_payload( + explicit_ids=result_ids, + payload_value=finalization.payload.get("result_ids"), + count=batch_size, + allocator=allocator, + seed=seed, + fingerprint=finalization.fingerprint, + ) + + concrete_base_params = BasicImageGenerationParameters.model_validate( + resolved_base_params, + from_attributes=True, + ) + + resolved_additional_params = ( + finalized_template.additional_params + if finalized_template.additional_params is not None + else ImageGenerationComponentContainer() + ) + + parameter_payload = finalized_template.model_copy( + update={ + "base_params": concrete_base_params, + "result_ids": resolved_result_ids, + "additional_params": resolved_additional_params, + "batch_size": batch_size, + }, + ) + + return ImageGenerationParameters.model_validate( + parameter_payload, + from_attributes=True, + ) + + +class ImageGenerationParameters(ImageGenerationParametersTemplate): + """Represents the common bare-minimum parameters for an image generation.""" + + result_ids: list[ID_TYPES] + """The IDs to assign to the resulting images.""" + + base_params: BasicImageGenerationParameters + """The base parameters for the generation.""" + + additional_params: ImageGenerationComponentContainer = Field(default_factory=ImageGenerationComponentContainer) + """Additional parameters for the generation. This can include parameters for img2img, remix, controlnet, hires fix, + and custom workflows.""" + + batch_size: int | None = Field(default=1, ge=1) + """The number of images to generated batched (simultaneously, not concurrently). + This is the `n_iter` parameter in ComfyUI""" + + @model_validator(mode="after") + def verify_id_count(self: ImageGenerationParameters) -> ImageGenerationParameters: + """Ensure that at least one generation ID is provided.""" + if not self.result_ids: + raise ValueError("At least one generation ID must be provided.") + + if len(self.result_ids) != self.batch_size: + raise ValueError("The number of generation IDs must match the batch size.") + + return self + + +def image_parameters_to_feature_flags( + parameters: ImageGenerationParametersTemplate, +) -> ImageGenerationFeatureFlags: + """Create a feature flag object representing the features used in the parameters.""" + all_alchemy_forms = None + if parameters.alchemy_params is not None and parameters.alchemy_params._all_alchemy_operations is not None: + all_alchemy_forms = [x.form for x in parameters.alchemy_params._all_alchemy_operations if x.form is not None] + + baselines: list[KNOWN_IMAGE_GENERATION_BASELINE | str] + if parameters.base_params and parameters.base_params.model_baseline is not None: + baselines = [parameters.base_params.model_baseline] + else: + baselines = [KNOWN_IMAGE_GENERATION_BASELINE.infer] + + schedulers = [] + samplers = [] + + if parameters.base_params is not None: + if parameters.base_params.scheduler is not None: + schedulers.append(parameters.base_params.scheduler) + if parameters.base_params.sampler_name is not None: + samplers.append(parameters.base_params.sampler_name) + + source_processing = [parameters.source_processing] if parameters.source_processing is not None else [] + + post_processing = all_alchemy_forms + + tiling = bool(parameters.base_params and parameters.base_params.tiling) + + hires_fix = False + controlnets_feature_flags = None + workflows: list[str] | None = None + tis: list[KNOWN_AUX_MODEL_SOURCE | str] | None = None + loras: list[KNOWN_AUX_MODEL_SOURCE | str] | None = None + + if parameters.additional_params: + hires_fix = parameters.additional_params.hires_fix_params is not None + + controlnets_feature_flags = ( + ControlnetFeatureFlags( + controlnets=[parameters.additional_params.controlnet_params.controlnet_type], + image_is_control=parameters.additional_params.controlnet_params.source_image is not None, + return_control_map=parameters.additional_params.controlnet_params.return_control_map, + ) + if parameters.additional_params.controlnet_params is not None + else None + ) + + if parameters.additional_params.custom_workflows_params: + workflow_names = [wf.custom_workflow_name for wf in parameters.additional_params.custom_workflows_params] + workflows = workflow_names or None + + if parameters.additional_params.ti_params: + ti_names = [ti.name for ti in parameters.additional_params.ti_params if ti.name is not None] + tis = ti_names or None + + if parameters.additional_params.lora_params: + lora_names = [lora.name for lora in parameters.additional_params.lora_params if lora.name is not None] + loras = lora_names or None + + return ImageGenerationFeatureFlags( + baselines=baselines, + hires_fix=hires_fix, + tiling=tiling, + schedulers=schedulers, + samplers=samplers, + controlnets_feature_flags=controlnets_feature_flags, + post_processing=post_processing, + source_processing=source_processing, + workflows=workflows, + tis=tis, + loras=loras, + ) diff --git a/horde_sdk/generation_parameters/multimodal/__init__.py b/horde_sdk/generation_parameters/multimodal/__init__.py new file mode 100644 index 00000000..067ac056 --- /dev/null +++ b/horde_sdk/generation_parameters/multimodal/__init__.py @@ -0,0 +1 @@ +"""Multimodal generation parameters object models and helpers.""" diff --git a/horde_sdk/generation_parameters/templates/__init__.py b/horde_sdk/generation_parameters/templates/__init__.py new file mode 100644 index 00000000..89ff2970 --- /dev/null +++ b/horde_sdk/generation_parameters/templates/__init__.py @@ -0,0 +1 @@ +"""Template generation parameters object models and helpers.""" diff --git a/horde_sdk/generation_parameters/text/__init__.py b/horde_sdk/generation_parameters/text/__init__.py new file mode 100644 index 00000000..6164da08 --- /dev/null +++ b/horde_sdk/generation_parameters/text/__init__.py @@ -0,0 +1,15 @@ +"""Object models which represent the parameters for a text generation.""" + +from horde_sdk.generation_parameters.text.object_models import ( + BasicTextGenerationFormatParameters, + BasicTextGenerationParameters, + KoboldAITextGenerationParameters, + TextGenerationParameters, +) + +__all__ = [ + "BasicTextGenerationFormatParameters", + "BasicTextGenerationParameters", + "KoboldAITextGenerationParameters", + "TextGenerationParameters", +] diff --git a/horde_sdk/generation_parameters/text/consts.py b/horde_sdk/generation_parameters/text/consts.py new file mode 100644 index 00000000..e69de29b diff --git a/horde_sdk/generation_parameters/text/object_models.py b/horde_sdk/generation_parameters/text/object_models.py new file mode 100644 index 00000000..756fe5e0 --- /dev/null +++ b/horde_sdk/generation_parameters/text/object_models.py @@ -0,0 +1,301 @@ +from __future__ import annotations + +from collections.abc import Sequence +from enum import auto +from typing import Self, override + +from pydantic import Field +from strenum import StrEnum + +from horde_sdk import get_default_frozen_model_config_dict +from horde_sdk.consts import ID_TYPES +from horde_sdk.generation_parameters.generic import ( + CompositeParametersBase, + GenerationParameterBaseModel, + GenerationWithModelParameters, +) +from horde_sdk.generation_parameters.generic.object_models import GenerationFeatureFlags +from horde_sdk.generation_parameters.utils import ( + ResultIdAllocator, + TemplateFinalization, + finalize_template_for_parameters, + resolve_result_ids_from_payload, +) + + +class FormatImplementationStandard(StrEnum): + """The standards for format implementations.""" + + KOBOLD_AI = auto() + """The KoboldAI standard for a format implementation.""" + + +class FormatFeatureFlags(GenerationFeatureFlags): + """Represents the feature flags for text generation formatting.""" + + format_implementation_standard: FormatImplementationStandard | None = Field( + default=None, + examples=[ + FormatImplementationStandard.KOBOLD_AI, + ], + ) + """The standard for format implementations.""" + + leading_space_to_input_when_missing: bool = Field( + default=False, + examples=[ + False, + ], + ) + """Input formatting option. When enabled, adds a leading space to your input if there is no trailing whitespace at + the end of the previous action.""" + + remove_consecutive_newlines: bool = Field( + default=False, + examples=[ + False, + ], + ) + """Output formatting option. When enabled, replaces all occurrences of two or more consecutive newlines in the + output with one newline.""" + + remove_special_characters: bool = Field( + default=False, + examples=[ + False, + ], + ) + """Output formatting option. When enabled, removes #/@%}{+=~|\\^<> from the output.""" + + remove_end_of_sentence: bool = Field( + default=False, + examples=[ + False, + ], + ) + """Output formatting option. When enabled, removes some characters from the end of the output such that the output + doesn't end in the middle of a sentence. If the output is less than one sentence long, does nothing.""" + + remove_after_first_line: bool = Field( + default=False, + examples=[ + False, + ], + ) + """Output formatting option. When enabled, removes everything after the first line of the output, including the + newline.""" + + +class BasicTextGenerationFormatParameters(GenerationParameterBaseModel): # TODO: Better field names + """Represent common text generation formatting parameters.""" + + frmtadsnsp: bool | None = Field( + default=None, + examples=[ + False, + ], + ) + """Input formatting option. When enabled, adds a leading space to your input if there is no trailing whitespace at + the end of the previous action.""" + frmtrmblln: bool | None = Field( + default=None, + examples=[ + False, + ], + ) + """Output formatting option. When enabled, replaces all occurrences of two or more consecutive newlines in the + output with one newline.""" + frmtrmspch: bool | None = Field( + default=None, + examples=[ + False, + ], + ) + """Output formatting option. When enabled, removes #/@%}{+=~|\\^<> from the output.""" + frmttriminc: bool | None = Field( + default=None, + examples=[ + False, + ], + ) + """Output formatting option. When enabled, removes some characters from the end of the output such that the output + doesn't end in the middle of a sentence. If the output is less than one sentence long, does nothing.""" + singleline: bool | None = Field( + default=None, + examples=[ + False, + ], + ) + """Output formatting option. When enabled, removes everything after the first line of the output, including the + newline.""" + + +class BasicTextGenerationParametersTemplate(GenerationWithModelParameters): # TODO: Non-AI-Horde specific constraints + """Represents the common parameters for a text generation.""" + + prompt: str | None = None + """The prompt to use for the generation.""" + + soft_prompt: str | None = None + """The soft prompt to use for the generation.""" + + max_context_length: int | None = Field( + default=None, + ge=80, + le=32000, + ) + """Maximum number of tokens to send to the model.""" + + max_length: int | None = Field(default=None, ge=16, le=1024) + """Number of tokens to generate.""" + + stop_sequence: list[str] | None = None + """The stop sequences to use for the generation.""" + + temperature: float | None = Field(default=None, ge=0.0, le=5.0) + """Temperature value.""" + + dynamic_temp_exponent: float | None = Field(default=None, ge=0.0, le=5.0) + """Dynamic temperature exponent value.""" + dynamic_temp_range: float | None = Field(default=None, ge=0.0, le=5.0) + """Dynamic temperature range value.""" + + tfs: float | None = Field(default=None, ge=0.0, le=1.0) + """Tail free sampling value.""" + typical: float | None = Field(default=None, ge=0.0, le=1.0) + """Typical sampling value.""" + sampler_order: list[int] | None = None + """The sampler order to use for the generation.""" + smoothing_factor: float | None = Field(default=None, ge=0.0, le=10.0) + """Quadratic sampling value.""" + + top_a: float | None = Field(default=None, ge=0.0, le=1.0) + """Top-a sampling value.""" + top_k: int | None = Field(default=None, ge=0, le=100) + """Top-k sampling value.""" + top_p: float | None = Field(default=None, ge=0.001, le=1.0) + """Top-p sampling value.""" + + min_p: float | None = Field(default=None, ge=0.0, le=1.0) + """Min-p sampling value.""" + rep_pen: float | None = Field(default=None, ge=1.0, le=3.0) + """Base repetition penalty value.""" + rep_pen_range: int | None = Field(default=None, ge=0, le=4096) + """Repetition penalty range.""" + rep_pen_slope: float | None = Field(default=None, ge=0.0, le=10.0) + """Repetition penalty slope.""" + + +class BasicTextGenerationParameters(BasicTextGenerationParametersTemplate): # TODO: Non-AI-Horde specific constraints + """Represents the common bare-minimum parameters for a text generation.""" + + model_config = get_default_frozen_model_config_dict() + + model: str + """The model to use for the generation.""" + + prompt: str # pyright: ignore[reportGeneralTypeIssues, reportIncompatibleVariableOverride] + """The prompt to use for the generation.""" + + +class TextGenerationParametersTemplate(CompositeParametersBase): + """Represents the parameters for a text generation.""" + + base_params: BasicTextGenerationParametersTemplate | None = None + """The basic text generation parameters for the generation.""" + + format_params: BasicTextGenerationFormatParameters | None = None + """The text generation formatting parameters.""" + + @override + def get_number_expected_results(self) -> int: + """Return the number of expected results for this parameter set. + + Returns: + int: The number of expected results. + """ + return 1 + + def to_parameters( + self, + *, + base_param_updates: BasicTextGenerationParametersTemplate | None = None, + result_ids: Sequence[ID_TYPES] | None = None, + allocator: ResultIdAllocator | None = None, + seed: str = "text", + ) -> TextGenerationParameters: + """Convert this template into concrete text generation parameters.""" + base_params = self.base_params + if base_params is None: + raise ValueError("Text generation templates must define base_params before conversion.") + + overrides: dict[str, object] | None = None + if base_param_updates: + overrides = { + "base_params": base_params.model_copy(update=base_param_updates.model_dump(exclude_none=True)), + } + + def _inject_base_params_into_fingerprint( + finalization: TemplateFinalization[Self], + fingerprint_payload: dict[str, object], + ) -> None: + fingerprint_base_params = finalization.template.base_params + if fingerprint_base_params is None: + raise ValueError("Text generation templates must define base_params before conversion.") + fingerprint_payload["base_params"] = fingerprint_base_params.model_dump(exclude_none=False) + + finalization = finalize_template_for_parameters( + self, + overrides=overrides, + exclude_none=False, + fingerprint_exclude_fields=("result_ids",), + fingerprint_transform=_inject_base_params_into_fingerprint, + ) + + finalized_template = finalization.template + resolved_base_params = finalized_template.base_params + if resolved_base_params is None: + raise ValueError("Text generation templates must define base_params before conversion.") + + resolved_result_ids = resolve_result_ids_from_payload( + explicit_ids=result_ids, + payload_value=finalization.payload.get("result_ids"), + count=1, + allocator=allocator, + seed=seed, + fingerprint=finalization.fingerprint, + ) + + concrete_base_params = BasicTextGenerationParameters.model_validate( + resolved_base_params, + from_attributes=True, + ) + + parameter_payload = finalized_template.model_copy( + update={ + "base_params": concrete_base_params, + "result_ids": resolved_result_ids, + }, + ) + + return TextGenerationParameters.model_validate( + parameter_payload, + from_attributes=True, + ) + + +class TextGenerationParameters(TextGenerationParametersTemplate): + """Represents the common bare-minium parameters for a text generation.""" + + result_ids: list[ID_TYPES] + """The generation IDs to assign to the resulting discrete generations.""" + + base_params: BasicTextGenerationParameters + """The basic text generation parameters for the generation.""" + + +class KoboldAITextGenerationParameters(TextGenerationParameters): + """Represents koboldAI text generation parameters.""" + + use_default_bad_words_ids: bool | None = None + """When True, uses the default KoboldAI bad word IDs.""" diff --git a/horde_sdk/generation_parameters/utils.py b/horde_sdk/generation_parameters/utils.py new file mode 100644 index 00000000..a810e8f7 --- /dev/null +++ b/horde_sdk/generation_parameters/utils.py @@ -0,0 +1,232 @@ +from __future__ import annotations + +import base64 +import hashlib +import json +import uuid +from collections.abc import Callable, Collection, Mapping, Sequence +from dataclasses import dataclass +from enum import Enum +from pathlib import Path +from typing import Final, TypeVar +from uuid import UUID + +from pydantic import BaseModel + +from horde_sdk.consts import ID_TYPES + +_DEFAULT_RESULT_ID_NAMESPACE: Final[uuid.UUID] = uuid.uuid5( + uuid.NAMESPACE_URL, + "https://github.com/Haidra-Org/horde-sdk", +) + + +TemplateT = TypeVar("TemplateT", bound=BaseModel) + + +@dataclass(frozen=True) +class TemplateFingerprintSnapshot[TemplateT: BaseModel]: + """Snapshot containing the finalized template payload and its fingerprint.""" + + template: TemplateT + payload: dict[str, object] + fingerprint: str + + +@dataclass(frozen=True) +class TemplateFinalization[TemplateT: BaseModel]: + """Snapshot produced when applying overrides to a template.""" + + template: TemplateT + payload: dict[str, object] + + +def apply_template_overrides[TemplateT: BaseModel]( + template: TemplateT, + *, + overrides: Mapping[str, object] | None = None, + exclude_none: bool = False, +) -> TemplateFinalization[TemplateT]: + """Return a template copy with overrides applied alongside its payload snapshot.""" + updated = template if overrides is None or not overrides else template.model_copy(update=dict(overrides)) + + payload = updated.model_dump(exclude_none=exclude_none) + return TemplateFinalization(template=updated, payload=payload) + + +def finalize_template_for_parameters( + template: TemplateT, + *, + overrides: Mapping[str, object] | None = None, + exclude_none: bool = False, + fingerprint_exclude_fields: Collection[str] | None = None, + fingerprint_transform: Callable[[TemplateFinalization[TemplateT], dict[str, object]], None] | None = None, +) -> TemplateFingerprintSnapshot[TemplateT]: + """Finalize a template payload and compute a deterministic fingerprint.""" + finalization = apply_template_overrides( + template, + overrides=overrides, + exclude_none=exclude_none, + ) + + fingerprint_payload = dict(finalization.payload) + if fingerprint_exclude_fields: + for field in fingerprint_exclude_fields: + fingerprint_payload.pop(field, None) + + if fingerprint_transform is not None: + fingerprint_transform(finalization, fingerprint_payload) + + fingerprint = compute_parameter_fingerprint(fingerprint_payload) + return TemplateFingerprintSnapshot( + template=finalization.template, + payload=finalization.payload, + fingerprint=fingerprint, + ) + + +def _normalise_for_fingerprint(value: object) -> object: + """Convert values into JSON-serialisable structures with deterministic ordering.""" + if value is None: + return None + if isinstance(value, bool | int | float | str): + return value + if isinstance(value, Enum): + enum_value = value.value + if isinstance(enum_value, bool | int | float | str): + return enum_value + return value.name + if isinstance(value, UUID | Path): + return str(value) + if isinstance(value, bytes): + encoded = base64.b64encode(value).decode("ascii") + return {"__type__": "bytes", "base64": encoded} + if isinstance(value, Mapping): + return { + str(key): _normalise_for_fingerprint(value_item) + for key, value_item in sorted(value.items(), key=lambda item: str(item[0])) + } + if isinstance(value, Sequence) and not isinstance(value, str | bytes | bytearray): + return [_normalise_for_fingerprint(item) for item in value] + raise TypeError(f"Unsupported value type for fingerprinting: {type(value)!r}") + + +def compute_parameter_fingerprint(payload: Mapping[str, object]) -> str: + """Produce a stable fingerprint for a parameter payload.""" + normalised = _normalise_for_fingerprint(payload) + serialised = json.dumps(normalised, sort_keys=True, separators=(",", ":")) + digest = hashlib.sha256(serialised.encode("utf-8")) + return digest.hexdigest() + + +class ResultIdAllocator: + """Utility that produces deterministic identifiers for generation results.""" + + _namespace: uuid.UUID + + def __init__(self, namespace: uuid.UUID | None = None) -> None: + """Create an allocator bound to the provided namespace.""" + self._namespace = namespace or _DEFAULT_RESULT_ID_NAMESPACE + + def allocate(self, *, seed: str, index: int, fingerprint: str | None = None) -> str: + """Derive a deterministic identifier from the seed, fingerprint, and positional index.""" + payload = f"{seed}:{fingerprint}:{index}" if fingerprint is not None else f"{seed}:{index}" + return str(uuid.uuid5(self._namespace, payload)) + + +def ensure_result_ids( + existing: Sequence[ID_TYPES] | None, + count: int, + *, + allocator: ResultIdAllocator | None, + seed: str, + fingerprint: str | None = None, +) -> list[ID_TYPES]: + """Return a concrete list of result identifiers, allocating when needed.""" + if existing is not None: + identifiers = list(existing) + if len(identifiers) != count: + raise ValueError(f"Expected {count} result identifiers but received {len(identifiers)}.") + return identifiers + + if allocator is None: + return [str(uuid.uuid4()) for _ in range(count)] + + return [allocator.allocate(seed=seed, index=index, fingerprint=fingerprint) for index in range(count)] + + +def ensure_result_id( + existing: ID_TYPES | None, + *, + allocator: ResultIdAllocator | None, + seed: str, + fingerprint: str | None = None, +) -> ID_TYPES: + """Return a concrete result identifier, allocating when missing.""" + if existing is not None: + return existing + + if allocator is None: + return str(uuid.uuid4()) + + return allocator.allocate(seed=seed, index=0, fingerprint=fingerprint) + + +def resolve_result_ids_from_payload( + *, + explicit_ids: Sequence[ID_TYPES] | None, + payload_value: object, + count: int, + allocator: ResultIdAllocator | None, + seed: str, + fingerprint: str, +) -> list[ID_TYPES]: + """Resolve a concrete list of result identifiers from explicit, payload, or allocated sources.""" + payload_ids = _normalise_result_id_sequence(payload_value) + return ensure_result_ids( + explicit_ids or payload_ids, + count, + allocator=allocator, + seed=seed, + fingerprint=fingerprint, + ) + + +def resolve_result_id_from_payload( + *, + explicit_id: ID_TYPES | None, + payload_value: object, + allocator: ResultIdAllocator | None, + seed: str, + fingerprint: str, +) -> ID_TYPES: + """Resolve a single result identifier from explicit, payload, or allocated sources.""" + payload_identifier = _normalise_result_id(payload_value) + return ensure_result_id( + explicit_id or payload_identifier, + allocator=allocator, + seed=seed, + fingerprint=fingerprint, + ) + + +def _normalise_result_id(value: object) -> ID_TYPES | None: + if value is None: + return None + if isinstance(value, str | UUID): + return value + raise TypeError("result_id entries must be strings or UUIDs.") + + +def _normalise_result_id_sequence(value: object) -> list[ID_TYPES] | None: + if value is None or isinstance(value, str | bytes | bytearray): + return None + if not isinstance(value, Sequence): + return None + + resolved: list[ID_TYPES] = [] + for entry in value: + if not isinstance(entry, str | UUID): + raise TypeError("result_ids entries must be strings or UUIDs.") + resolved.append(entry) + return resolved diff --git a/horde_sdk/generation_parameters/versioning.py b/horde_sdk/generation_parameters/versioning.py new file mode 100644 index 00000000..ee55d602 --- /dev/null +++ b/horde_sdk/generation_parameters/versioning.py @@ -0,0 +1,110 @@ +"""Schema version migration utilities for generation parameter payloads.""" + +from __future__ import annotations + +from collections.abc import Callable, Mapping + +from horde_sdk.generation_parameters.generic import CompositeParametersBase + +ParameterSchemaMigrationFunc = Callable[[dict[str, object]], dict[str, object]] + + +class ParameterSchemaMigrationError(RuntimeError): + """Raised when a parameter payload cannot be migrated to the current schema.""" + + +class _ParameterSchemaMigrationRegistry: + """Registry that tracks schema migration steps per parameter type.""" + + def __init__(self) -> None: + self._entries: dict[type[CompositeParametersBase], dict[str, tuple[str, ParameterSchemaMigrationFunc]]] = {} + + def register( + self, + parameter_type: type[CompositeParametersBase], + *, + from_version: str, + to_version: str, + migration: ParameterSchemaMigrationFunc, + ) -> None: + """Register a migration step for ``parameter_type`` from ``from_version`` to ``to_version``.""" + migration_map = self._entries.setdefault(parameter_type, {}) + if from_version in migration_map: + raise ValueError( + f"Migration from version '{from_version}' is already registered for {parameter_type.__qualname__}.", + ) + migration_map[from_version] = (to_version, migration) + + def apply( + self, + parameter_type: type[CompositeParametersBase], + payload: Mapping[str, object], + ) -> dict[str, object]: + """Apply registered migrations to ``payload`` so it matches the current schema version.""" + payload_dict = dict(payload) + + schema_value = payload_dict.get("schema_version") + if isinstance(schema_value, str) and schema_value: + current_version = schema_value + else: + current_version = parameter_type.legacy_schema_version() + + payload_dict["schema_version"] = current_version + target_version = parameter_type.current_schema_version() + + if current_version == target_version: + payload_dict["schema_version"] = target_version + return payload_dict + + migrations = self._entries.get(parameter_type, {}) + visited_versions: set[str] = set() + updated_payload = payload_dict + + while current_version != target_version: + entry = migrations.get(current_version) + if entry is None: + raise ParameterSchemaMigrationError( + "No migration path from version " + f"'{current_version}' to '{target_version}' for {parameter_type.__qualname__}.", + ) + + next_version, migration = entry + updated_payload = migration(dict(updated_payload)) + updated_payload["schema_version"] = next_version + + if next_version in visited_versions: + raise ParameterSchemaMigrationError( + f"Detected migration cycle when upgrading {parameter_type.__qualname__}.", + ) + + visited_versions.add(next_version) + current_version = next_version + + return updated_payload + + +_PARAMETER_SCHEMA_MIGRATIONS = _ParameterSchemaMigrationRegistry() + + +def register_parameter_schema_migration( + parameter_type: type[CompositeParametersBase], + *, + from_version: str, + to_version: str, + migration: ParameterSchemaMigrationFunc, +) -> None: + """Register a migration step that upgrades ``parameter_type`` payloads.""" + _PARAMETER_SCHEMA_MIGRATIONS.register( + parameter_type, + from_version=from_version, + to_version=to_version, + migration=migration, + ) + + +def apply_parameter_schema_migrations( + parameter_type: type[CompositeParametersBase], + payload: Mapping[str, object], +) -> dict[str, object]: + """Upgrade ``payload`` to match ``parameter_type``'s current schema version.""" + return _PARAMETER_SCHEMA_MIGRATIONS.apply(parameter_type, payload) diff --git a/horde_sdk/generation_parameters/video/__init__.py b/horde_sdk/generation_parameters/video/__init__.py new file mode 100644 index 00000000..bf89ff51 --- /dev/null +++ b/horde_sdk/generation_parameters/video/__init__.py @@ -0,0 +1 @@ +"""Video generation parameters object models and helpers.""" diff --git a/horde_sdk/generic_api/apimodels.py b/horde_sdk/generic_api/apimodels.py index 5fafa630..25c58705 100644 --- a/horde_sdk/generic_api/apimodels.py +++ b/horde_sdk/generic_api/apimodels.py @@ -4,20 +4,21 @@ import abc import base64 -import os +import time import uuid -from typing import Any, TypeVar +from typing import Any, TypeVar, override import aiohttp from loguru import logger -from pydantic import BaseModel, ConfigDict, Field, RootModel, field_validator -from typing_extensions import override +from pydantic import BaseModel, ConfigDict, Field, PrivateAttr, RootModel, field_validator from horde_sdk import _default_sslcontext -from horde_sdk.consts import HTTPMethod, HTTPStatusCode +from horde_sdk.consts import HTTPMethod, HTTPStatusCode, get_default_frozen_model_config_dict from horde_sdk.generic_api.consts import ANON_API_KEY +from horde_sdk.generic_api.decoration import Unequatable, Unhashable from horde_sdk.generic_api.endpoints import GENERIC_API_ENDPOINT_SUBPATH, url_with_path from horde_sdk.generic_api.metadata import GenericAcceptTypes +from horde_sdk.utils import default_bridge_agent_string try: from horde_sdk._version import __version__ @@ -84,19 +85,7 @@ def log_safe_model_dump(self, extra_exclude: set[str] | None = None) -> dict[Any class HordeAPIObjectBaseModel(HordeAPIObject, BaseModel): """Base class for all Horde API data models (leveraging pydantic).""" - model_config = ( - ConfigDict( - frozen=True, - use_attribute_docstrings=True, - extra="allow", - ) - if not os.getenv("TESTS_ONGOING") - else ConfigDict( - frozen=True, - use_attribute_docstrings=True, - extra="forbid", - ) - ) + model_config = get_default_frozen_model_config_dict() class HordeAPIData(BaseModel): @@ -107,19 +96,7 @@ class HordeAPIData(BaseModel): models. """ - model_config = ( - ConfigDict( - frozen=True, - use_attribute_docstrings=True, - extra="allow", - ) - if not os.getenv("TESTS_ONGOING") - else ConfigDict( - frozen=True, - use_attribute_docstrings=True, - extra="forbid", - ) - ) + model_config = get_default_frozen_model_config_dict() class HordeAPIMessage(HordeAPIObject): @@ -129,6 +106,13 @@ class HordeAPIMessage(HordeAPIObject): class HordeResponse(HordeAPIMessage): """Represents any response from any Horde API.""" + _time_constructed: float = PrivateAttr(default_factory=lambda: time.time()) + + @property + def time_constructed(self) -> float: + """The time the model was constructed (in epoch time).""" + return self._time_constructed + T = TypeVar("T") @@ -141,41 +125,23 @@ class HordeResponseRootModel(RootModel[T], HordeResponse): For example: ```python - class MyDataModel(HordeResponseRootModel[MyData]): + class MyDataRootModel(HordeResponseRootModel[MyData]): pass ``` """ - model_config = ( - ConfigDict( - frozen=True, - use_attribute_docstrings=True, - ) - if not os.getenv("TESTS_ONGOING") - else ConfigDict( - frozen=True, - use_attribute_docstrings=True, - ) + model_config = ConfigDict( + frozen=True, + use_attribute_docstrings=True, + # `extra` is not allowed with RootModel ) class HordeResponseBaseModel(HordeResponse, BaseModel): """Base class for all Horde API response data models (leveraging pydantic).""" - model_config = ( - ConfigDict( - frozen=True, - use_attribute_docstrings=True, - extra="allow", - ) - if not os.getenv("TESTS_ONGOING") - else ConfigDict( - frozen=True, - use_attribute_docstrings=True, - extra="forbid", - ) - ) + model_config = get_default_frozen_model_config_dict() HordeResponseTypes = HordeResponseRootModel[Any] | HordeResponseBaseModel @@ -186,11 +152,11 @@ class ResponseRequiringFollowUpMixin(abc.ABC): """Represents any response from any Horde API which requires a follow up request of some kind.""" @abc.abstractmethod - def get_follow_up_returned_params(self, *, as_python_field_name: bool = False) -> list[dict[str, object]]: + def get_follow_up_returned_params(self, *, as_python_field_name: bool = False) -> list[dict[str, Any]]: """Return the information required from this response to submit a follow up request. Note that this dict uses the alias field names (as seen on the API), not the python field names. - JobIDs will be returned as `{"id": "00000000-0000-0000-0000-000000000000"}` instead of + GenerationIDs will be returned as `{"id": "00000000-0000-0000-0000-000000000000"}` instead of `{"id_": "00000000-0000-0000-0000-000000000000"}`. Returns: @@ -374,6 +340,31 @@ class ContainsMessageResponseMixin(HordeAPIData): """A message from the API. This is typically an error or warning message, but may also be informational.""" +class RequestSingleWarning(HordeAPIObjectBaseModel): + """Represents a single warning from the API.""" + + message: str | None = None + """A message from the API. This is typically an error or warning message, but may also be informational.""" + + code: str | None = None + """The code associated with this warning.""" + + @override + @classmethod + def get_api_model_name(cls) -> str | None: + return "RequestSingleWarning" + + +@Unhashable +@Unequatable +class ContainsWarningsResponseMixin(HordeAPIData): + """Represents any response from any Horde API which contains warnings.""" + + warnings: list[RequestSingleWarning] | None = None + """A list of warnings from the API. This is typically an error or warning message, + but may also be informational.""" + + class RequestErrorResponse(HordeResponseBaseModel, ContainsMessageResponseMixin): """The catch all error response for any request to any Horde API. @@ -395,19 +386,7 @@ def get_api_model_name(cls) -> str | None: class HordeRequest(HordeAPIMessage, BaseModel): """Represents any request to any Horde API.""" - model_config = ( - ConfigDict( - frozen=True, - use_attribute_docstrings=True, - extra="allow", - ) - if not os.getenv("TESTS_ONGOING") - else ConfigDict( - frozen=True, - use_attribute_docstrings=True, - extra="forbid", - ) - ) + model_config = get_default_frozen_model_config_dict() @classmethod @abc.abstractmethod @@ -419,7 +398,7 @@ def get_http_method(cls) -> HTTPMethod: # X_Fields # TODO client_agent: str = Field( - default=f"horde_sdk:{__version__}:https://github.com/haidra-org/horde-sdk", + default=default_bridge_agent_string, alias="Client-Agent", ) """The requesting client's agent. You should set this to reflect the name, version and contact information @@ -531,7 +510,7 @@ def validate_api_key_length(cls, v: str) -> str: return v -class RequestSpecifiesUserIDMixin(HordeAPIData): +class MessageSpecifiesUserIDMixin(HordeAPIData): """Mix-in class to describe an endpoint for which you can specify a user.""" user_id: str @@ -561,27 +540,49 @@ class RequestUsesWorkerMixin(HordeAPIData): workers: list[str] = Field(default_factory=list) """A list of worker IDs to use for this request. If empty, any worker can pick up the request. Using this incurs and extra kudos cost.""" - worker_blacklist: list[str] = Field(default_factory=list) + worker_blacklist: bool = False """If true, the worker list will be treated as a blacklist instead of a whitelist.""" models: list[str] """The generative models to use for this request.""" + validated_backends: bool | None = Field(default=None) + """When true, only inference backends that are validated by the AI Horde devs will serve this request. When False, + non-validated backends will also be used which can increase speed but you may end up with unexpected results.""" + style: str | None = Field(default=None, examples=["00000000-0000-0000-0000-000000000000"]) + """The style ID to use for the generation.""" + dry_run: bool = False """If true, the request will not be processed, but will return a response with the estimated kudos cost.""" + webhook: str | None = Field(default=None) + """Provide a URL where the AI Horde will send a POST call after each delivered generation. + The request will include the details of the job as well as the request ID.""" + allow_downgrade: bool = False + """If true, the request can be modified to lesser parameters if the original request is too expensive.""" + proxied_account: str | None = Field(default=None) + """The account this request is being proxied for. This requires the sending API to be a service account.""" + disable_batching: bool | None = Field(False) + """When true, This request will not use batching. This will allow you to retrieve accurate seeds. + Feature is restricted to Trusted users and Patreons.""" __all__ = [ "APIKeyAllowedInRequestMixin", + "ContainsMessageResponseMixin", + "ContainsWarningsResponseMixin", + "HordeAPIData", + "HordeAPIMessage", + "HordeAPIObject", + "HordeAPIObjectBaseModel", "HordeRequest", "HordeResponse", "HordeResponseBaseModel", "HordeResponseRootModel", - "ContainsMessageResponseMixin", - "HordeAPIObject", - "HordeAPIMessage", + "HordeResponseTypes", + "MessageSpecifiesUserIDMixin", "RequestErrorResponse", - "RequestSpecifiesUserIDMixin", + "RequestSingleWarning", "RequestUsesWorkerMixin", + "ResponseRequiringDownloadMixin", "ResponseRequiringFollowUpMixin", "ResponseWithProgressMixin", ] diff --git a/horde_sdk/generic_api/decoration.py b/horde_sdk/generic_api/decoration.py index fdeaf124..6493e866 100644 --- a/horde_sdk/generic_api/decoration.py +++ b/horde_sdk/generic_api/decoration.py @@ -3,7 +3,7 @@ T = TypeVar("T") -def Unhashable(cls: type[T]) -> type[T]: +def Unhashable[T](cls: type[T]) -> type[T]: """Make a class unhashable. Args: @@ -33,7 +33,7 @@ def is_unhashable(obj: type | Any) -> bool: # noqa: ANN401 return getattr(cls, "_unhashable", False) -def Unequatable(cls: type[T]) -> type[T]: +def Unequatable[T](cls: type[T]) -> type[T]: """Mark a class as unequatable. Args: diff --git a/horde_sdk/generic_api/generic_clients.py b/horde_sdk/generic_api/generic_clients.py index 37cff536..f4efd8b2 100644 --- a/horde_sdk/generic_api/generic_clients.py +++ b/horde_sdk/generic_api/generic_clients.py @@ -4,20 +4,27 @@ import asyncio import os +import time from abc import ABC from ssl import SSLContext -from typing import Any, TypeVar +from typing import Any, TypeVar, override import aiohttp +import logfire import requests from loguru import logger -from pydantic import BaseModel, ValidationError +from pydantic import BaseModel, Field, ValidationError from strenum import StrEnum -from typing_extensions import override from horde_sdk import _default_sslcontext -from horde_sdk.ai_horde_api.exceptions import AIHordePayloadValidationError -from horde_sdk.consts import HTTPMethod +from horde_sdk._telemetry.metrics import ( + _telemetry_client_critical_errors_counter, + _telemetry_client_horde_api_errors_counter, + _telemetry_client_requests_finished_successfully_counter, + _telemetry_client_requests_started_counter, +) +from horde_sdk.consts import HTTPMethod, HTTPStatusCode +from horde_sdk.exceptions import PayloadValidationError from horde_sdk.generic_api.apimodels import ( APIKeyAllowedInRequestMixin, HordeRequest, @@ -63,6 +70,31 @@ class ParsedRawRequest(BaseModel): ) """TypeVar for the horde response type.""" +DEFAULT_RETRY_STATUS_CODES = { + HTTPStatusCode.TOO_MANY_REQUESTS, + HTTPStatusCode.SERVICE_UNAVAILABLE, + HTTPStatusCode.GATEWAY_TIMEOUT, + HTTPStatusCode.REQUEST_TIMEOUT, +} + + +class RetryConfiguration(BaseModel): + """Configuration for retrying requests using exponential backoff and jitter.""" + + max_retries: int = Field(default=3, lt=10, ge=0) + initial_delay_seconds: float = Field(default=0.5, gt=0) + max_delay_seconds: float = Field(default=30.0, gt=0) + backoff_factor: float = Field(default=2.0, gt=1) + jitter_factor: float = Field(default=0.1, gt=0, le=1) + retry_status_codes: set[HTTPStatusCode] = Field( + default=DEFAULT_RETRY_STATUS_CODES, + description="HTTP status codes that should trigger a retry", + ) + retry_on_connection_errors: bool = Field( + default=True, + description="Whether to retry on connection errors", + ) + class BaseHordeAPIClient(ABC): """An abstract class which is the base for all horde API clients.""" @@ -83,6 +115,13 @@ class BaseHordeAPIClient(ABC): _accept_types: type[GenericAcceptTypes] = GenericAcceptTypes """A list of all valid values for the header key 'accept'.""" + _msg_format_submit_request = ( + "submit_request {sync_async} {http_method_name} for {api_request_type} expecting {expected_response_type}" + ) + + _retry_by_default: bool = True + retry_config: RetryConfiguration + # endregion def __init__( @@ -94,7 +133,8 @@ def __init__( query_fields: type[GenericQueryFields] = GenericQueryFields, accept_types: type[GenericAcceptTypes] = GenericAcceptTypes, ssl_context: SSLContext = _default_sslcontext, - **kwargs: Any, # noqa: ANN401 + retry_config: RetryConfiguration | None = None, + **kwargs: Any, # noqa: ANN401 # FIXME ) -> None: """Initialize a new `GenericHordeAPIClient` instance. @@ -111,6 +151,8 @@ def __init__( Defaults to GenericAcceptTypes. ssl_context (SSLContext, optional): The SSL context to use for aiohttp requests. Defaults to using `certifi`. + retry_config (RetryConfiguration, optional): The retry configuration to use for requests. + Defaults to None, which will use the default retry configuration. kwargs: Any additional keyword arguments are ignored. Raises: @@ -144,6 +186,14 @@ def __init__( self._query_field_keys = query_fields self._accept_types = accept_types + if retry_config is None: + retry_config = RetryConfiguration() + + if not isinstance(retry_config, RetryConfiguration): + raise TypeError("`retry_config` must be of type `RetryConfiguration` or a subclass of it!") + + self.retry_config = retry_config + def _validate_and_prepare_request(self, api_request: HordeRequest) -> ParsedRawRequest: """Validate the given `api_request` and returns a `_ParsedRequest` instance with the data to be sent. @@ -154,7 +204,6 @@ def _validate_and_prepare_request(self, api_request: HordeRequest) -> ParsedRawR Args: api_request (HordeRequest): The `HordeRequest` instance to be validated and prepared. - expected_response_type (type[HordeResponse]): The expected response type. Returns: _ParsedRequest: A `_ParsedRequest` instance with the extracted data to be sent in the request. @@ -213,6 +262,9 @@ def get_specified_data_keys(data_keys: type[StrEnum], api_request: HordeRequest) if request_key in specified_headers: request_headers_dict[specified_headers[request_key]] = request_value continue + if request_key in specified_queries: + request_queries_dict[specified_queries[request_key]] = request_value + continue if request_key in extra_header_keys: # Remove any trailing underscores from the key as they are used to avoid python keyword conflicts api_name = request_key if not request_key.endswith("_") else request_key[:-1] @@ -220,10 +272,6 @@ def get_specified_data_keys(data_keys: type[StrEnum], api_request: HordeRequest) request_headers_dict[api_name] = request_value continue - if request_key in specified_queries: - request_queries_dict[specified_queries[request_key]] = request_value - continue - if request_key in extra_query_keys: # Remove any trailing underscores from the key as they are used to avoid python keyword conflicts api_name = request_key if not request_key.endswith("_") else request_key[:-1] @@ -251,13 +299,15 @@ def get_specified_data_keys(data_keys: type[StrEnum], api_request: HordeRequest) if not request_body_data_dict: # This is explicitly set to None for clarity that it is unspecified - # i.e., and empty body is not the same as an unspecified body + # i.e., an empty body is not the same as an unspecified body request_body_data_dict = None # Add the API key to the request headers if the request is authenticated and an API key is provided if isinstance(api_request, APIKeyAllowedInRequestMixin) and "apikey" not in request_headers_dict: request_headers_dict["apikey"] = self._apikey + _telemetry_client_requests_started_counter.add(1) + return ParsedRawRequest( endpoint_no_query=endpoint_url, request_headers=request_headers_dict, @@ -276,8 +326,9 @@ def _after_request_handling( # If requests response is a failure code, see if a `message` key exists in the response. # If so, return a RequestErrorResponse if returned_status_code >= 400: + _telemetry_client_horde_api_errors_counter.add(1) if "errors" in raw_response_json: - raise AIHordePayloadValidationError( + raise PayloadValidationError( raw_response_json.get("errors", ""), raw_response_json.get("message", ""), ) @@ -285,6 +336,7 @@ def _after_request_handling( try: return RequestErrorResponse(**raw_response_json) except ValidationError: + _telemetry_client_critical_errors_counter.add(1) return RequestErrorResponse( message="The API returned an error we didn't recognize! See `object_data` for the raw response.", rc=raw_response_json.get("rc", returned_status_code), @@ -301,6 +353,7 @@ def _after_request_handling( message="The response type doesn't match expected one! See `object_data` for the raw response.", object_data={"raw_response": raw_response_json}, ) + _telemetry_client_horde_api_errors_counter.add(1) except ValidationError as e: if not isinstance(handled_response, expected_response_type): # pragma: no cover error_response = RequestErrorResponse( @@ -309,8 +362,50 @@ def _after_request_handling( ) handled_response = error_response + _telemetry_client_critical_errors_counter.add(1) + + _telemetry_client_requests_finished_successfully_counter.add(1) + return handled_response + def should_retry( + self, + status_code: int, + current_error_count: int, + retry_after: float, + ) -> bool: + """Determine if a request should be retried based on the status code and retry configuration. + + Args: + status_code (int): The HTTP status code returned by the request. + current_error_count (int): The current number of errors encountered. + retry_after (float): The time to wait before retrying the request. + + Returns: + bool: True if the request should be retried, False otherwise. + """ + if not self._retry_by_default: + return False + + if current_error_count >= self.retry_config.max_retries: + return False + + if status_code not in self.retry_config.retry_status_codes: + return False + + jitter = (self.retry_config.jitter_factor * retry_after) if self.retry_config.jitter_factor else 0 + + retry_delay = ( + min( + self.retry_config.initial_delay_seconds * (self.retry_config.backoff_factor**current_error_count), + self.retry_config.max_delay_seconds, + ) + + jitter + ) + + time.sleep(retry_delay) + return True + class GenericHordeAPIManualClient(BaseHordeAPIClient): """Interfaces with any flask API the horde provides, but provides little error handling. @@ -338,62 +433,57 @@ def submit_request( """ http_method_name = api_request.get_http_method() - parsed_request = self._validate_and_prepare_request(api_request) + if expected_response_type not in api_request.get_success_status_response_pairs().values(): + logger.warning( + "The expected response type is not in the list of success status response pairs! This may result in " + "unexpected behavior.", + ) + logger.warning(f"Passed expected_response_type: {expected_response_type}") + logger.warning(f"Allowable pairs defined in the SDK : {api_request.get_success_status_response_pairs()}") + + with logfire.span( + self._msg_format_submit_request.format( + sync_async="sync", + http_method_name=http_method_name, + api_request_type=type(api_request).__name__, + expected_response_type=expected_response_type.__name__, + ), + sync_async="sync", + http_method_name=http_method_name, + api_request_type=type(api_request).__name__, + expected_response_type=expected_response_type.__name__, + ): + parsed_request = self._validate_and_prepare_request(api_request) - raw_response: requests.Response | None = None + raw_response: requests.Response | None = None - if http_method_name == HTTPMethod.GET: - if parsed_request.request_body is not None: - raise RuntimeError( - "GET requests cannot have a body! This may mean you forgot to override `get_header_fields()` " - "or perhaps you may need to define a `metadata.py` module or entry in it for your API.", + if http_method_name == HTTPMethod.GET: + if parsed_request.request_body is not None: + raise RuntimeError( + "GET requests cannot have a body! This may mean you forgot to override `get_header_fields()` " + "or perhaps you may need to define a `metadata.py` module or entry in it for your API.", + ) + raw_response = requests.get( + parsed_request.endpoint_no_query, + headers=parsed_request.request_headers, + params=parsed_request.request_queries, + allow_redirects=True, + ) + else: + raw_response = requests.request( + method=http_method_name, + url=parsed_request.endpoint_no_query, + headers=parsed_request.request_headers, + params=parsed_request.request_queries, + json=parsed_request.request_body, + allow_redirects=True, ) - raw_response = requests.get( - parsed_request.endpoint_no_query, - headers=parsed_request.request_headers, - params=parsed_request.request_queries, - allow_redirects=True, - ) - elif http_method_name == HTTPMethod.POST: - raw_response = requests.post( - parsed_request.endpoint_no_query, - headers=parsed_request.request_headers, - params=parsed_request.request_queries, - json=parsed_request.request_body, - allow_redirects=True, - ) - elif http_method_name == HTTPMethod.PUT: - raw_response = requests.put( - parsed_request.endpoint_no_query, - headers=parsed_request.request_headers, - params=parsed_request.request_queries, - json=parsed_request.request_body, - allow_redirects=True, - ) - elif http_method_name == HTTPMethod.PATCH: - raw_response = requests.patch( - parsed_request.endpoint_no_query, - headers=parsed_request.request_headers, - params=parsed_request.request_queries, - json=parsed_request.request_body, - allow_redirects=True, - ) - elif http_method_name == HTTPMethod.DELETE: - raw_response = requests.delete( - parsed_request.endpoint_no_query, - headers=parsed_request.request_headers, - params=parsed_request.request_queries, - json=parsed_request.request_body, - allow_redirects=True, - ) - else: - raise RuntimeError(f"Unknown HTTP method: {http_method_name}") - return self._after_request_handling( - raw_response_json=raw_response.json(), - returned_status_code=raw_response.status_code, - expected_response_type=expected_response_type, - ) + return self._after_request_handling( + raw_response_json=raw_response.json(), + returned_status_code=raw_response.status_code, + expected_response_type=expected_response_type, + ) class GenericAsyncHordeAPIManualClient(BaseHordeAPIClient): @@ -404,7 +494,8 @@ class GenericAsyncHordeAPIManualClient(BaseHordeAPIClient): _aiohttp_session: aiohttp.ClientSession - def __init__( # noqa: D107 + @override + def __init__( self, *, apikey: str | None = None, @@ -414,7 +505,7 @@ def __init__( # noqa: D107 query_fields: type[GenericQueryFields] = GenericQueryFields, accept_types: type[GenericAcceptTypes] = GenericAcceptTypes, ssl_context: SSLContext = _default_sslcontext, - **kwargs: Any, # noqa: ANN401 + **kwargs: Any, ) -> None: super().__init__( apikey=apikey, @@ -422,6 +513,7 @@ def __init__( # noqa: D107 path_fields=path_fields, query_fields=query_fields, accept_types=accept_types, + ssl_context=ssl_context, **kwargs, ) self._aiohttp_session = aiohttp_session @@ -457,25 +549,37 @@ async def submit_request( if not self._aiohttp_session: raise RuntimeError("No aiohttp session was provided but an async method was called!") - async with ( - self._aiohttp_session.request( - http_method_name.value, - parsed_request.endpoint_no_query, - headers=parsed_request.request_headers, - params=parsed_request.request_queries, - json=parsed_request.request_body, - allow_redirects=True, - ssl=self._ssl_context, - ) as response, + with logfire.span( + self._msg_format_submit_request.format( + sync_async="async", + http_method_name=http_method_name, + api_request_type=type(api_request).__name__, + expected_response_type=expected_response_type.__name__, + ), + sync_async="async", + http_method_name=http_method_name, + api_request_type=type(api_request).__name__, + expected_response_type=expected_response_type.__name__, ): - raw_response_json = await response.json() - response_status = response.status - - return self._after_request_handling( - raw_response_json=raw_response_json, - returned_status_code=response_status, - expected_response_type=expected_response_type, - ) + async with ( + self._aiohttp_session.request( + http_method_name.value, + parsed_request.endpoint_no_query, + headers=parsed_request.request_headers, + params=parsed_request.request_queries, + json=parsed_request.request_body, + allow_redirects=True, + ssl=self._ssl_context, + ) as response, + ): + raw_response_json = await response.json() + response_status = response.status + + return self._after_request_handling( + raw_response_json=raw_response_json, + returned_status_code=response_status, + expected_response_type=expected_response_type, + ) class GenericHordeAPISession(GenericHordeAPIManualClient): @@ -512,7 +616,8 @@ def __init__( ) self._pending_follow_ups = [] - def submit_request( # noqa: D102 + @override + def submit_request( self, api_request: HordeRequest, expected_response_type: type[HordeResponseTypeVar], @@ -672,7 +777,8 @@ class GenericAsyncHordeAPISession(GenericAsyncHordeAPIManualClient): it.""" _pending_follow_ups_lock: asyncio.Lock = asyncio.Lock() - def __init__( # noqa: D107 + @override + def __init__( self, aiohttp_session: aiohttp.ClientSession, *, @@ -867,11 +973,11 @@ async def _handle_exit_async( # Log the results of each cleanup request. for i, cleanup_response in enumerate(cleanup_responses): if isinstance(cleanup_response, Exception): - logger.error(f"Recovery request {i+1} failed!") + logger.error(f"Recovery request {i + 1} failed!") - logger.info(f"Recovery request {i+1} submitted!") - logger.debug(f"Recovery request {i+1}: {cleanup_requests[i].log_safe_model_dump()}") - logger.debug(f"Recovery response {i+1}: {cleanup_response}") + logger.info(f"Recovery request {i + 1} submitted!") + logger.debug(f"Recovery request {i + 1}: {cleanup_requests[i].log_safe_model_dump()}") + logger.debug(f"Recovery response {i + 1}: {cleanup_response}") # Return True to indicate that all requests were handled successfully. return True diff --git a/horde_sdk/generic_api/metadata.py b/horde_sdk/generic_api/metadata.py index 6b0067b5..f549582a 100644 --- a/horde_sdk/generic_api/metadata.py +++ b/horde_sdk/generic_api/metadata.py @@ -15,6 +15,9 @@ class GenericHeaderFields(StrEnum): """`StrEnum` for data that may be passed in the header of a request. + Maps the python object's field name which matches to a comparable header. This implies that fields with these + names are *always* passed in the header of a request. + Pass this in to a GenericClient initializer if implementing a new API. See an existing API's `metadata.py` module. """ @@ -37,6 +40,9 @@ class GenericAcceptTypes(StrEnum): class GenericPathFields(StrEnum): """`StrEnum` for data that may be passed as part of a URL path (before the query string). + Used to replace path parameters like `/api/v1/{path_field}` with the value of the field. This implies that fields + with these names are *always* passed in the path of a request. + Pass this in to a GenericClient initializer if implementing a new API. See an existing API's `metadata.py` module. """ @@ -44,5 +50,8 @@ class GenericPathFields(StrEnum): class GenericQueryFields(StrEnum): """`StrEnum` for data that may be passed as part of a URL query (after the `?`). + Used to replace query parameters like `/api/v1/resource?query_field=value` with the value of the field. + This implies that fields with these names are *always* passed in the query of a request. + Pass this in to a GenericClient initializer if implementing a new API. See an existing API's `metadata.py` module. """ diff --git a/horde_sdk/generic_api/utils/swagger.py b/horde_sdk/generic_api/utils/swagger.py index 3dd865a8..75a54576 100644 --- a/horde_sdk/generic_api/utils/swagger.py +++ b/horde_sdk/generic_api/utils/swagger.py @@ -4,6 +4,7 @@ import json import re +import uuid from abc import ABC from pathlib import Path from typing import Any, ClassVar @@ -107,13 +108,13 @@ class SwaggerModelDefinitionSchemaValidation(SwaggerModelEntry): anyOf: list[SwaggerModelDefinition | SwaggerModelRef] | None = None """The model must match at least one of the schemas in this list.""" - @model_validator(mode="before") - def one_method_specified(cls, v: dict[Any, Any]) -> dict[Any, Any]: - """Ensure at least one of the validation methods is specified.""" - if not any([v.get("allOf"), v.get("oneOf"), v.get("anyOf")]): - raise ValueError("At least one of allOf, oneOf, or anyOf must be specified.") + @model_validator(mode="after") + def validate_schema_validation_methods(self) -> SwaggerModelDefinitionSchemaValidation: + """Ensure that only one of the validation methods is specified.""" + if sum([bool(self.allOf), bool(self.oneOf), bool(self.anyOf)]) > 1: + raise ValueError("Only one of allOf, oneOf, or anyOf can be specified.") - return v + return self def get_validation_method(self) -> SwaggerSchemaValidationMethod | None: """Get the schema validation method used for this model.""" @@ -609,6 +610,28 @@ def filename_from_endpoint_path( # Replace any consecutive underscores with a single underscore. return re.sub(r"__+", "_", endpoint_path) + def get_all_verbs_for_endpoint( + self, + endpoint_path: str, + ) -> list[HTTPMethod]: + """Get all HTTP methods defined for a given endpoint path. + + Args: + endpoint_path: The path of the endpoint. + + Returns: + A list of HTTPMethod objects representing the HTTP methods defined for the endpoint. + """ + # Get the SwaggerEndpoint object for the given endpoint path. + endpoint = self.paths.get(endpoint_path) + + # If the endpoint does not exist, return an empty list. + if not endpoint: + return [] + + # Get all defined HTTP methods for the endpoint and return them as a list of HTTPMethod objects. + return [HTTPMethod(http_method_name.upper()) for http_method_name in endpoint.get_defined_endpoints()] + def write_all_payload_examples_to_file(self, directory: str | Path) -> bool: """Write all example payloads to a file in the test_data directory. @@ -827,15 +850,25 @@ def _resolve_model_ref_defaults( continue # Otherwise, get the default value for the property and add it to the return dictionary - return_dict[prop_name] = self.get_default_with_constraint(prop) + return_dict[prop_name] = self.get_default_with_constraint(prop, prop_name) return return_dict if return_dict else return_list - def get_default_with_constraint(self, model_property: SwaggerModelProperty) -> object: + def get_default_with_constraint( + self, + model_property: SwaggerModelProperty, + prop_name: str | None = None, + ) -> object: """Get the example value, defaulting to a a value appropriate to the type with any constraints applied.""" # If the model property has a description that includes the word "optionally", do nothing - if model_property.description and "optionally" in model_property.description: - pass + # if model_property.description and "optionally" in model_property.description: + # pass + + if prop_name == "id" and ( + (model_property.description) + and ("uuid" in model_property.description.lower() or "sharedkey id" in model_property.description.lower()) + ): # FIXME + return str(uuid.UUID("00000000-0000-0000-0000-000000000000")) # If the model property has an example value, return it if model_property.example is not None: diff --git a/horde_sdk/horde_logging.py b/horde_sdk/horde_logging.py index 4b1a6206..e89dba00 100644 --- a/horde_sdk/horde_logging.py +++ b/horde_sdk/horde_logging.py @@ -5,7 +5,8 @@ from loguru import logger # FIXME? This is more of an indev thing. I'd like a less confusing default. -verbosity = 17 # By default we show anything more significant than a progress log +# verbosity = 17 # By default we show anything more significant than a progress log +verbosity = 20 error_levels = ["ERROR", "CRITICAL", "EXCEPTION", "TRACE"] @@ -15,28 +16,29 @@ def set_logger_verbosity(count: int) -> None: global verbosity # The count comes reversed. So count = 0 means minimum verbosity # While count 5 means maximum verbosity - # So the more count we have, the lowe we drop the versbosity maximum - verbosity = 20 - (count * 10) + verbosity = 50 - (count * 10) def is_stdout_log(record: dict[str, Any]) -> bool: """Filter for stdout logs levels.""" - return not record["level"].no < verbosity + global verbosity + return bool(record["level"].no >= verbosity) def is_msg_log(record: dict[str, Any]) -> bool: """Filter for stdout logs levels.""" - return not record["level"].no < verbosity + global verbosity + return bool(record["level"].no >= verbosity) def is_stderr_log(record: dict[str, Any]) -> bool: """Filter for stderr logs levels.""" - return not record["level"].name not in error_levels + return record["level"].name in error_levels def is_trace_log(record: dict[str, Any]) -> bool: """Filter for trace logs levels.""" - return not record["level"].name not in error_levels + return record["level"].name in error_levels handler_config = [ @@ -87,7 +89,47 @@ def is_trace_log(record: dict[str, Any]) -> bool: if parsed_verbosity is not None: verbosity = parsed_verbosity +HORDE_SDK_TELEMETRY = os.getenv("HORDE_SDK_TELEMETRY") + + +def set_default_log_handlers(with_telemetry: bool) -> None: + """Set the default log handlers if not already configured.""" + if with_telemetry: + import logfire + + import horde_sdk._version + + logfire.configure( + send_to_logfire=False, + service_name="horde-sdk", + service_version=horde_sdk._version.__version__, + console=False, + ) + + if HORDE_SDK_TELEMETRY and HORDE_SDK_TELEMETRY.lower() == "full": + logger.info("Enabling full telemetry instrumentation.") + logfire.instrument_aiohttp_client() + logfire.instrument_requests() + else: + logger.info("Enabling basic telemetry instrumentation.") + + logfire.instrument_pydantic() + + logger.remove() + handler_config.append(logfire.loguru_handler()) + logger.configure(handlers=handler_config) # type: ignore + logger.info("Telemetry instrumentation enabled.") + else: + logger.info("Setting default logger handlers.") + logger.remove() + logger.configure(handlers=handler_config) # type: ignore + logger.info("Default logger handlers set.") + + set_logger_handlers = os.getenv("HORDE_SDK_SET_DEFAULT_LOG_HANDLERS") -if set_logger_handlers: - logger.configure(handlers=handler_config) # type: ignore + +if HORDE_SDK_TELEMETRY is not None and HORDE_SDK_TELEMETRY != "" and HORDE_SDK_TELEMETRY != "0": + set_default_log_handlers(with_telemetry=True) +elif set_logger_handlers: + set_default_log_handlers(with_telemetry=False) diff --git a/horde_sdk/meta.py b/horde_sdk/meta.py index 3163f2fb..79413df5 100644 --- a/horde_sdk/meta.py +++ b/horde_sdk/meta.py @@ -4,17 +4,20 @@ import types from functools import cache +from loguru import logger + import horde_sdk import horde_sdk.ai_horde_api import horde_sdk.ai_horde_api.apimodels -import horde_sdk.ai_horde_worker import horde_sdk.generic_api import horde_sdk.generic_api.apimodels import horde_sdk.ratings_api import horde_sdk.ratings_api.apimodels +import horde_sdk.worker from horde_sdk import HordeAPIObject, HordeRequest from horde_sdk.ai_horde_api.endpoints import AI_HORDE_API_ENDPOINT_SUBPATH, get_ai_horde_swagger_url -from horde_sdk.generic_api.apimodels import HordeAPIData +from horde_sdk.consts import HTTPMethod, HTTPStatusCode +from horde_sdk.generic_api.apimodels import HordeAPIData, HordeResponse from horde_sdk.generic_api.utils.swagger import SwaggerParser @@ -52,6 +55,47 @@ def find_subclasses(module_or_package: types.ModuleType, super_type: type) -> li return subclasses +@cache +def find_subclasses_and_dependency_imports( + module_or_package: types.ModuleType, + super_type: type, +) -> list[tuple[type, list[str]]]: + """Find all subclasses of a given type in a module or package. + + Args: + module_or_package (types.ModuleType): The module or package to search in. + super_type (type): The super type of the classes to search for. + + Returns: + list[tuple[type, list[str]]]: A list of tuples of all the subclasses of the super type in the module or package + and all of the imported symbols in the module. + """ + subclasses: list[tuple[type, list[str]]] = [] + + if hasattr(module_or_package, "__package__") and module_or_package.__package__ is not None: + module_or_package = importlib.import_module(module_or_package.__package__) + + for _importer, modname, _ispkg in pkgutil.walk_packages( + path=module_or_package.__path__, + prefix=module_or_package.__name__ + ".", + onerror=lambda x: None, + ): + module = importlib.import_module(modname) + for name in dir(module): + obj = getattr(module, name) + if ( + isinstance(obj, type) + and issubclass(obj, super_type) + and obj is not super_type + and not inspect.isabstract(obj) + ): + if hasattr(module, "__all__"): + subclasses.append((obj, module.__all__)) + else: + pass + return subclasses + + def any_unimported_classes(module: types.ModuleType, super_type: type) -> tuple[bool, set[type]]: """Check if any classes in the module are not imported in the `__init__.py` of the apimodels namespace. @@ -170,6 +214,8 @@ def all_models_missing_docstrings() -> set[type]: all_classes = find_subclasses(horde_sdk.ai_horde_api.apimodels, HordeAPIObject) all_classes += find_subclasses(horde_sdk.ai_horde_api.apimodels, HordeAPIData) + all_classes = list(set(all_classes)) + missing_docstrings = set() for class_type in all_classes: @@ -186,6 +232,8 @@ def all_model_and_fields_missing_docstrings() -> dict[type, set[str]]: all_classes += find_subclasses(horde_sdk.ai_horde_api.apimodels, HordeAPIObject) all_classes += find_subclasses(horde_sdk.ai_horde_api.apimodels, HordeAPIData) + all_classes = list(set(all_classes)) + missing_docstrings: dict[type, set[str]] = {} from pydantic import BaseModel @@ -203,3 +251,304 @@ def all_model_and_fields_missing_docstrings() -> dict[type, set[str]]: missing_docstrings[class_type] = missing_fields return missing_docstrings + + +class FoundResponseInfo: + """A class to store information about a found response class (type).""" + + def __init__( + self, + *, + response: type[HordeResponse], + api_model_name: str | None, + parent_request: type[HordeRequest], + http_status_code: HTTPStatusCode, + endpoint: str, + http_method: HTTPMethod, + ) -> None: + """Initialize the FoundResponseInfo object. + + Args: + response (type[HordeResponse]): The response class. + api_model_name (str | None): The API model name + parent_request (type[HordeRequest]): The parent request class. + http_status_code (HTTPStatusCode): The HTTP status code. + endpoint (str): The endpoint. + http_method (HTTPMethod): The HTTP method. + """ + self.response = response + self.api_model_name = api_model_name + self.parent_request = parent_request + self.http_status_code = http_status_code + self.endpoint = endpoint + self.http_method = http_method + + def __eq__(self, other: object) -> bool: + if not isinstance(other, FoundResponseInfo): + return False + + return ( + self.response is other.response + and self.api_model_name == other.api_model_name + and self.parent_request is other.parent_request + and self.http_status_code == other.http_status_code + and self.endpoint == other.endpoint + and self.http_method == other.http_method + ) + + def __hash__(self) -> int: + return hash((self.response, self.parent_request, self.http_status_code, self.endpoint, self.http_method)) + + +class FoundMixinInfo: + """A class to store information about a found mixin class (type).""" + + def __init__( + self, + *, + mixin: type, + api_model_name: str | None, + ) -> None: + """Initialize the FoundMixinInfo object. + + Args: + mixin (type): The mixin class. + api_model_name (str | None): The API model name + """ + self.mixin = mixin + self.api_model_name = api_model_name + + def __eq__(self, other: object) -> bool: + if not isinstance(other, FoundMixinInfo): + return False + + return self.mixin is other.mixin and self.api_model_name == other.api_model_name + + def __hash__(self) -> int: + return hash((self.mixin, self.api_model_name)) + + +def all_models_non_conforming_docstrings() -> dict[type, tuple[str | None, str | None]]: + """Return all of the models that do not have a v2 API model.""" + all_classes: list[type[HordeAPIObject] | type[HordeAPIData]] + all_classes = find_subclasses(horde_sdk.ai_horde_api.apimodels, HordeAPIObject) + + request_docstring_template = "Represents a {http_method} request to the {endpoint} endpoint." + response_docstring_template_single = ( + "Represents the data returned from the {endpoint} endpoint with http status code {http_status_code}." + ) + endpoint_status_codes_pairs_template = ( + " - {endpoint} | {request_type} [{http_method}] -> {http_status_code}\n" + ) + response_docstring_template_multiple = ( + "Represents the data returned from the following endpoints and http " + "status codes:\n{endpoint_status_codes_pairs}" + ) + + v2_api_model_template = "\n\n v2 API Model: `{api_model}`" + + non_conforming_requests: dict[type, tuple[str | None, str | None]] = {} + non_conforming_responses: dict[type, tuple[str | None, str | None]] = {} + non_conforming_other: dict[type, tuple[str | None, str | None]] = {} + + request_response_map: dict[type[HordeRequest], list[FoundResponseInfo]] = {} + response_request_map: dict[type[HordeResponse], list[FoundResponseInfo]] = {} + + def process_request(class_type: type[HordeRequest]) -> None: + request_response_map[class_type] = [] + + http_method = class_type.get_http_method() + endpoint = class_type.get_api_endpoint_subpath() + + expected_request_docstring = request_docstring_template.format( + http_method=http_method, + endpoint=endpoint, + ) + + request_api_model_name = class_type.get_api_model_name() + + if request_api_model_name is not None: + expected_request_docstring += v2_api_model_template.format(api_model=request_api_model_name) + + expected_request_docstring = expected_request_docstring.replace("\n\n ", "\n\n") + + original_request_docstring = class_type.__doc__.rstrip() if class_type.__doc__ else "" + original_request_docstring = original_request_docstring.replace("\n\n ", "\n\n") + + if not class_type.__doc__ or not original_request_docstring.endswith(expected_request_docstring): + non_conforming_requests[class_type] = (original_request_docstring, expected_request_docstring) + + for response_status_code, response_type in sorted(class_type.get_success_status_response_pairs().items()): + if not issubclass(response_type, HordeResponse): + raise TypeError(f"Expected {response_type} to be a HordeResponse") + + found_response_info = FoundResponseInfo( + response=response_type, + api_model_name=response_type.get_api_model_name(), + parent_request=class_type, + http_status_code=response_status_code, + endpoint=endpoint, + http_method=http_method, + ) + + request_response_map[class_type].append(found_response_info) + + if response_type not in response_request_map: + response_request_map[response_type] = [] + + response_request_map[response_type].append(found_response_info) + + def process_response(response_request_infos: list[FoundResponseInfo]) -> None: + endpoint_status_codes_pairs = "" + + if len(response_request_infos) == 1: + response_request_info = response_request_infos[0] + + endpoint_status_codes_pairs += endpoint_status_codes_pairs_template.format( + endpoint=response_request_info.endpoint, + request_type=response_request_info.parent_request.__name__, + http_method=response_request_info.http_method, + http_status_code=response_request_info.http_status_code, + ) + + original_expected_response_docstring = response_docstring_template_single.format( + endpoint=response_request_info.endpoint, + http_status_code=response_request_info.http_status_code, + ) + + if response_request_info.api_model_name: + original_expected_response_docstring += v2_api_model_template.format( + api_model=response_request_info.api_model_name, + ) + + found_response_type = response_request_info.response + + response_doc_string = found_response_type.__doc__.rstrip() if found_response_type.__doc__ else "" + response_doc_string = response_doc_string.replace("\n\n ", "\n\n") + response_doc_string = response_doc_string.replace("\n\n ", "\n\n") + + # If the expected docstring contains any runs of text which are greater than 119 characters... + + expected_exceeds_119 = False + + for line in original_expected_response_docstring.split("\n"): + if len(line) > 119: + expected_exceeds_119 = True + break + + matching_response_docstring = original_expected_response_docstring + # Due to the 119 ruff-enforced line limit in source files, + # we need to account for the fact that the expected docstring may require an + # unspecified (in the format string) line break. + if expected_exceeds_119: + matching_response_docstring = matching_response_docstring.replace("\n", "") + matching_response_docstring = matching_response_docstring.replace(" ", "") + response_doc_string = response_doc_string.replace("\n", "") + response_doc_string = response_doc_string.replace(" ", "") + logger.warning( + f"Docstring for {found_response_type} exceeds 119 characters in places. " + "Please manually verify the whitespace formatting as the testing script may not " + "accurately reflect the expected docstring formatting.", + ) + + matching_response_docstring = matching_response_docstring.replace("\n\n ", "\n\n") + + if not found_response_type.__doc__ or not response_doc_string.endswith( + matching_response_docstring, + ): + non_conforming_responses[found_response_type] = ( + response_doc_string, + matching_response_docstring, + ) + + else: + last_response_request_info: FoundResponseInfo | None = None + for response_request_info in response_request_infos: + if ( + last_response_request_info is not None + and last_response_request_info.endpoint == response_request_info.endpoint + and last_response_request_info.http_method == response_request_info.http_method + and last_response_request_info.http_status_code == response_request_info.http_status_code + and last_response_request_info.parent_request == response_request_info.parent_request + ): + continue + + last_response_request_info = response_request_info + endpoint_status_codes_pairs += endpoint_status_codes_pairs_template.format( + endpoint=response_request_info.endpoint, + request_type=response_request_info.parent_request.__name__, + http_method=response_request_info.http_method, + http_status_code=response_request_info.http_status_code, + ) + + original_expected_response_docstring = response_docstring_template_multiple.format( + endpoint_status_codes_pairs=endpoint_status_codes_pairs, + ) + + if response_request_info.api_model_name: + original_expected_response_docstring = original_expected_response_docstring.rstrip() + original_expected_response_docstring += v2_api_model_template.format( + api_model=response_request_info.api_model_name, + ) + + for response_request_info in response_request_infos: + found_response_type = response_request_info.response + + response_doc_string = found_response_type.__doc__.rstrip() if found_response_type.__doc__ else "" + response_doc_string = response_doc_string.replace(" ", " ") + response_doc_string = response_doc_string.replace(" ", " ") + response_doc_string = response_doc_string.replace("\n\n ", "\n\n") + response_doc_string = response_doc_string.replace("\n\n ", "\n\n") + + matching_response_docstring = original_expected_response_docstring.replace("\n\n ", "\n\n") + matching_response_docstring = matching_response_docstring.replace(" ", " ") + matching_response_docstring = matching_response_docstring.replace(" ", " ") + matching_response_docstring = matching_response_docstring.replace(" ", " ") + + if not found_response_type.__doc__ or not response_doc_string.endswith(matching_response_docstring): + non_conforming_responses[found_response_type] = ( + response_doc_string, + matching_response_docstring, + ) + + def process_other(class_type: type[HordeAPIObject]) -> None: + api_model_name = class_type.get_api_model_name() + + expected_suffix = v2_api_model_template.format(api_model=api_model_name) + + if not class_type.__doc__: + return + + original_other_docstring = class_type.__doc__.rstrip() + original_other_docstring = original_other_docstring.replace("\n\n ", "\n\n") + + expected_suffix_normalized = expected_suffix.replace("\n\n ", "\n\n") + if not original_other_docstring.endswith(expected_suffix_normalized): + # Extract the description portion (everything before any "v2 API Model:" line) + # to construct the full expected docstring + description_part = original_other_docstring + if "\n\nv2 API Model:" in original_other_docstring: + description_part = original_other_docstring.split("\n\nv2 API Model:")[0] + + full_expected_docstring = description_part + expected_suffix_normalized + non_conforming_other[class_type] = (original_other_docstring, full_expected_docstring) + + _sorted_all_classes = sorted(all_classes, key=lambda x: x.__name__) + _sorted_all_classes.reverse() + for class_type in _sorted_all_classes: + if issubclass(class_type, HordeResponse): + continue + + if issubclass(class_type, HordeRequest): + process_request(class_type) + elif issubclass(class_type, HordeAPIObject): + process_other(class_type) + + for _, response_request_infos in sorted(response_request_map.items(), key=lambda x: x[0].__name__): + process_response(response_request_infos) + + return { + **non_conforming_requests, + **non_conforming_responses, + **non_conforming_other, + } diff --git a/horde_sdk/ratings_api/apimodels.py b/horde_sdk/ratings_api/apimodels.py index 5e6e57e5..41ab2d8b 100644 --- a/horde_sdk/ratings_api/apimodels.py +++ b/horde_sdk/ratings_api/apimodels.py @@ -2,17 +2,17 @@ import uuid from enum import auto +from typing import override from pydantic import BaseModel, Field from strenum import StrEnum -from typing_extensions import override from horde_sdk.consts import _ANONYMOUS_MODEL, HTTPMethod from horde_sdk.generic_api.apimodels import ( APIKeyAllowedInRequestMixin, HordeRequest, HordeResponseBaseModel, - RequestSpecifiesUserIDMixin, + MessageSpecifiesUserIDMixin, ) from horde_sdk.ratings_api.endpoints import RATING_API_BASE_URL, RATING_API_ENDPOINT_SUBPATH @@ -206,7 +206,7 @@ class ImageRatingsFilterableRequestBase(BaseSelectableReturnTypeRequest): class UserValidateRequest( BaseRatingsAPIRequest, APIKeyAllowedInRequestMixin, - RequestSpecifiesUserIDMixin, + MessageSpecifiesUserIDMixin, ImageRatingsFilterableRequestBase, ): """Represents the data needed to make a request to the `/v1/user/validate/{user_id}` endpoint.""" @@ -235,7 +235,7 @@ def get_default_success_response_type(cls) -> type[UserValidateResponse]: class UserCheckRequest( BaseRatingsAPIRequest, APIKeyAllowedInRequestMixin, - RequestSpecifiesUserIDMixin, + MessageSpecifiesUserIDMixin, ): """Represents the data needed to make a request to the `/v1/user/check/` endpoint.""" diff --git a/horde_sdk/safety.py b/horde_sdk/safety.py new file mode 100644 index 00000000..8e789206 --- /dev/null +++ b/horde_sdk/safety.py @@ -0,0 +1,127 @@ +from pydantic import BaseModel + + +class SafetyResult(BaseModel): + """A model representing the result of a safety check.""" + + is_nsfw: bool + """Indicates if the content is NSFW (Not Safe For Work).""" + + nsfw_likelihood: float | None = None + """A float representing the likelihood that the content is NSFW, typically between 0.0 and 1.0.""" + + is_csam: bool | None = None + """Indicates if the content is CSAM.""" + + csam_likelihood: float | None = None + """A float representing the likelihood that the content is CSAM, typically between 0.0 and 1.0.""" + + is_hate_speech: bool | None = None + """Indicates if the content is hate speech.""" + + hate_speech_likelihood: float | None = None + """A float representing the likelihood that the content is hate speech, typically between 0.0 and 1.0.""" + + is_violent: bool | None = None + """Indicates if the content is violent.""" + + violent_likelihood: float | None = None + """A float representing the likelihood that the content is violent, typically between 0.0 and 1.0.""" + + is_self_harm: bool | None = None + """Indicates if the content promotes self-harm.""" + + self_harm_likelihood: float | None = None + """A float representing the likelihood that the content promotes self-harm, typically between 0.0 and 1.0.""" + + +class TextSafetyResult(SafetyResult): + """A model representing the result of a text safety check.""" + + +class ImageSafetyResult(SafetyResult): + """A model representing the result of an image safety check.""" + + is_csam: bool + """Indicates if the content is CSAM.""" + + +class SafetyRules: + """A class representing the rules for content safety checks.""" + + should_censor_nsfw: bool = True + """Indicates whether NSFW content should be censored.""" + + should_censor_hate_speech: bool = True + """Indicates whether hate speech content should be censored.""" + + should_censor_violent: bool = True + """Indicates whether violent content should be censored.""" + + should_censor_self_harm: bool = True + """Indicates whether self-harm content should be censored.""" + + def should_censor(self, safety_result: SafetyResult) -> bool: + """Determine if the content should be censored based on the safety result. + + Args: + safety_result (SafetyResult): The result of the safety check. + + Returns: + bool: True if the content should be censored, False otherwise. + """ + if safety_result.is_csam: + return True + + if safety_result.is_nsfw and self.should_censor_nsfw: + return True + + if safety_result.is_hate_speech and self.should_censor_hate_speech: + return True + + if safety_result.is_violent and self.should_censor_violent: + return True + + return bool(safety_result.is_self_harm and self.should_censor_self_harm) + + def __init__( + self, + should_censor_nsfw: bool = True, + should_censor_hate_speech: bool = True, + should_censor_violent: bool = True, + should_censor_self_harm: bool = True, + ) -> None: + """Initialize the SafetyRules with optional parameters to set censorship preferences. + + Args: + should_censor_nsfw (bool): Whether to censor NSFW content. Defaults to True. + should_censor_hate_speech (bool): Whether to censor hate speech content. Defaults to True. + should_censor_violent (bool): Whether to censor violent content. Defaults to True. + should_censor_self_harm (bool): Whether to censor self-harm content. Defaults to True. + """ + self.should_censor_nsfw = should_censor_nsfw + self.should_censor_hate_speech = should_censor_hate_speech + self.should_censor_violent = should_censor_violent + self.should_censor_self_harm = should_censor_self_harm + + +default_safety_rules = SafetyRules( + should_censor_nsfw=True, + should_censor_hate_speech=True, + should_censor_violent=True, + should_censor_self_harm=True, +) + +default_image_safety_rules = SafetyRules( + should_censor_nsfw=True, + should_censor_hate_speech=True, + should_censor_violent=True, + should_censor_self_harm=True, +) + +default_text_safety_rules = SafetyRules( + should_censor_nsfw=True, + should_censor_hate_speech=True, + should_censor_violent=True, + should_censor_self_harm=True, +) diff --git a/horde_sdk/scripts/write_all_response_examples_for_tests.py b/horde_sdk/scripts/write_all_response_examples_for_tests.py index 0640a24b..8bc9b4d8 100644 --- a/horde_sdk/scripts/write_all_response_examples_for_tests.py +++ b/horde_sdk/scripts/write_all_response_examples_for_tests.py @@ -20,25 +20,72 @@ def write_all_example_responses(*, test_data_path: Path | None = None) -> None: ) ai_horde_swagger_doc.write_all_response_examples_to_file(test_data_path) + files_to_make_arrays = [ + "_v2_users_get_200.json", + ] # Compatibility hacks: - # `_v2_users_get_200.json` needs to have the object added to an array and overwritten - with open(test_data_path / "_v2_users_get_200.json") as f: - _v2_users_get_200 = f.read() - - if not _v2_users_get_200.startswith("["): - logger.warning( - "The _v2_users_get_200.json file is not an array, converting it to one to make it compatible with the " - "tests. This is a compatibility hack due to the API docs not being correct.", - ) - _v2_users_get_200 = f"[{_v2_users_get_200}]" - _v2_users_get_200 = json.loads(_v2_users_get_200) - - with open(test_data_path / "_v2_users_get_200.json", "w") as f: - json.dump(_v2_users_get_200, f, indent=4) - f.write("\n") - else: - logger.info("The _v2_users_get_200.json file is already compatible with the tests.") - logger.info("This script should be updated to remove this compatibility hack.") + + for file_name in files_to_make_arrays: + if not (test_data_path / file_name).exists(): + logger.warning(f"File {file_name} does not exist, skipping compatibility hack.") + continue + + file_contents: str | None = None + with open(test_data_path / file_name) as f: + file_contents = f.read() + + if file_contents is None: + logger.warning(f"File {file_name} is empty, skipping compatibility hack.") + continue + + if not file_contents.startswith("["): + logger.warning( + f"The {file_name} file is not an array, converting it to one to make it compatible with the tests. " + f"This is a compatibility hack due to the API docs not being correct.", + ) + file_contents = f"[{file_contents}]" + file_contents = json.loads(file_contents) + + with open(test_data_path / file_name, "w") as f: + json.dump(file_contents, f, indent=4) + f.write("\n") + else: + logger.info(f"The {file_name} file is already compatible with the tests.") + logger.info("This script should be updated to remove this compatibility hack.") + + files_to_make_objects = [ + "_v2_filters_filter_id_get_200.json", + ] + + # Compatibility hacks: + for file_name in files_to_make_objects: + if not (test_data_path / file_name).exists(): + logger.warning(f"File {file_name} does not exist, skipping compatibility hack.") + continue + + file_contents_object: str | None = None + with open(test_data_path / file_name) as f: + file_contents_object = f.read() + + if file_contents_object is None: + logger.warning(f"File {file_name} is empty, skipping compatibility hack.") + continue + + if file_contents_object.startswith("["): + logger.warning( + f"The {file_name} file is an array, converting it to an object to make it compatible with the tests. " + f"This is a compatibility hack due to the API docs not being correct.", + ) + file_contents_object = file_contents_object.strip().strip("[]").strip() + file_contents_object = json.loads(file_contents_object) + + with open(test_data_path / file_name, "w") as f: + json.dump(file_contents_object, f, indent=4) + f.write("\n") + + else: + logger.info(f"The {file_name} file is already compatible with the tests.") + logger.info("This script should be updated to remove this compatibility hack.") if __name__ == "__main__": diff --git a/horde_sdk/utils.py b/horde_sdk/utils.py deleted file mode 100644 index c0de6ff8..00000000 --- a/horde_sdk/utils.py +++ /dev/null @@ -1,21 +0,0 @@ -"""AI-Horde specific utility functions.""" - -import random - - -def seed_to_int(s: int | str | None = None) -> int: - """Convert a seed to an int. If s is None or an empty string, a random int is returned.""" - if isinstance(s, int): - return s - if s is None or s == "": - # return a random int - return random.randint(0, (2**32) - 1) - - # Convert the seed to an int - n = abs(int(s) if s.isdigit() else int.from_bytes(s.encode(), "little")) - - # Keep the seed in the range [0, 2**32) - while n >= 2**32: - n = n >> 32 - # logger.debug(f"Seed {s} is too large, using {n} instead") - return n diff --git a/horde_sdk/utils/__init__.py b/horde_sdk/utils/__init__.py new file mode 100644 index 00000000..50b702f7 --- /dev/null +++ b/horde_sdk/utils/__init__.py @@ -0,0 +1,44 @@ +"""Contains utility functions and classes for the Horde SDK.""" + +import random + +import horde_sdk +import horde_sdk._version +from horde_sdk.consts import horde_sdk_github_url + + +def seed_to_int(s: int | str | None = None) -> int: + """Convert a seed to an int. If s is None or an empty string, a random int is returned.""" + if isinstance(s, int): + return s + + if s is None or s == "": + # return a random int + return random.randint(0, (2**32) - 1) + + # Convert the seed to an int + n = abs(int(s) if s.isdigit() else int.from_bytes(s.encode(), "little")) + + # Ensure the int is within the range of a 32-bit unsigned int + return n % (2**32) if n > (2**32) - 1 else n + + +def create_bridge_agent_string(client_name: str, client_version: str, client_contact_or_url: str) -> str: + """Create a bridge agent string. + + Args: + client_name (str): The name of the client. + client_version (str): The version of the client. + client_contact_or_url (str): The contact information or URL for the client. + + Returns: + str: The formatted bridge agent string. + """ + return f"{client_name}:{client_version}:{client_contact_or_url}" + + +default_bridge_agent_string = create_bridge_agent_string( + client_name=horde_sdk.__name__, + client_version=horde_sdk._version.__version__, + client_contact_or_url=horde_sdk_github_url, +) diff --git a/horde_sdk/utils/image_utils.py b/horde_sdk/utils/image_utils.py new file mode 100644 index 00000000..3c83f1dd --- /dev/null +++ b/horde_sdk/utils/image_utils.py @@ -0,0 +1,349 @@ +import base64 +import io + +import PIL.Image +from horde_model_reference.meta_consts import ( + KNOWN_IMAGE_GENERATION_BASELINE, +) +from loguru import logger + +IMAGE_CHUNK_SIZE = 64 +"""The chunk size used for image processing. Images must be divisible by this value. + +Note that, at the time of writing, 64 is the de-facto standard value for all image models. +""" +DEFAULT_IMAGE_MIN_RESOLUTION = 512 +"""The default minimum resolution of the shortest dimension to use for the first pass.""" +DEFAULT_HIGHER_RES_MAX_RESOLUTION = 1024 +"""The default maximum resolution of the shortest dimension to use for the second pass.""" + +IDEAL_SDXL_RESOLUTIONS = [ + (1024, 1024), + (1152, 896), + (896, 1152), + (1216, 832), + (832, 1216), + (1344, 768), + (768, 1344), + (1536, 640), + (640, 1536), +] +"""The Stability.AI recommended resolutions for SDXL generation.""" + +IDEAL_SDXL_RESOLUTIONS_ASPECT_RATIOS = [width / height for width, height in IDEAL_SDXL_RESOLUTIONS] +"""The aspect ratios of the Stability.AI recommended resolutions for SDXL generation.""" + +MIN_DENOISING_STRENGTH = 0.01 +"""The minimum denoising strength to use for the upscale sampler""" +MAX_DENOISING_STRENGTH = 1.0 +"""The maximum denoising strength to use for the upscale sampler""" +DECAY_RATE = 2 +"""The rate at which the upscale steps decay based on the denoising strength""" +MIN_STEPS = 3 +"""The minimum number of steps to use for the upscaling sampler""" +UPSCALE_ADJUSTMENT_FACTOR = 0.5 +"""The factor by which the upscale steps are adjusted based on the native resolution distance factor""" +UPSCALE_DIVISOR = 2.25 +"""The divisor used to adjust the upscale steps based on the native resolution distance factor""" + +STANDARD_RESOLUTION = 512 +"""The standard resolution used for the resolution penalty calculation""" +RESOLUTION_PENALTY_MULTIPLIER = 3 +"""The multiplier used for the resolution penalty calculation""" + +STEP_FLOOR_THRESHOLD = 18 +"""The threshold at which the upscale steps are adjusted to the ddim steps""" + + +def resize_image_dimensions( + width: int, + height: int, + desired_dimension: int, + use_min: bool, + *, + image_chunk_size: int = IMAGE_CHUNK_SIZE, +) -> tuple[int, int]: + """Resize the image dimensions to have one side equal to the desired resolution, keeping the aspect ratio. + + - If use_min is True, the side with the minimum length will be resized to the desired resolution. + - For example, if the image is 1024x2048 and the desired resolution is 512, the image will be + resized to 512x1024. (As desired for 512x trained models) + - If use_min is False, the side with the maximum length will be resized to the desired resolution. + - For example, if the image is 1024x2048 and the desired resolution is 1024, the image will be + resized to 512x1024. (As desired for 1024x trained models) + - If the image is smaller than the desired resolution, the image will not be resized. + + Args: + width (int): The width of the image. + height (int): The height of the image. + desired_dimension (int): The desired single side resolution. + use_min (bool): Whether to use the minimum or maximum side. + image_chunk_size (int): The chunk size used for image processing. Images must be divisible by this value. \ + Defaults to 64, which is the de-facto standard value for all image models at the time of writing. \ + This only should be changed if you are certain that the model you are using requires a different value. + + Returns: + tuple[int, int]: The target first pass width and height of the image. + """ + if desired_dimension is None or desired_dimension <= 0: + raise ValueError("desired_resolution must be a positive integer.") + + if width <= 0 or height <= 0: + raise ValueError("width and height must be positive integers.") + + if width < desired_dimension and height < desired_dimension: + return width, height + + if use_min: + ratio = min( + height / desired_dimension, + width / desired_dimension, + ) + else: + ratio = max( + height / desired_dimension, + width / desired_dimension, + ) + + new_width = int(width // (ratio * image_chunk_size)) * image_chunk_size + new_height = int(height // (ratio * image_chunk_size)) * image_chunk_size + + return new_width, new_height + + +def get_first_pass_image_resolution_min( + width: int, + height: int, + min_dimension: int = DEFAULT_IMAGE_MIN_RESOLUTION, +) -> tuple[int, int]: + """Resize the image dimensions to have one side equal to the desired resolution, keeping the aspect ratio. + + - If the image is larger than the desired resolution, the side with the minimum length will be resized to the + desired resolution. + - If the image is smaller than the desired resolution, the image will not be resized. + + """ + if width > min_dimension and height > min_dimension: + return resize_image_dimensions( + width, + height, + desired_dimension=min_dimension, + use_min=True, + ) + return width, height + + +def get_first_pass_image_resolution_max( + width: int, + height: int, + max_dimension: int = DEFAULT_HIGHER_RES_MAX_RESOLUTION, +) -> tuple[int, int]: + """Resize the image dimensions to have one side equal to the desired resolution, keeping the aspect ratio. + + - If the image is larger than the desired resolution, the side with the maximum length will be resized to the + desired resolution. + - If the image is smaller than the desired resolution, the image will not be resized. + """ + if max(width, height) > max_dimension: + return resize_image_dimensions( + width, + height, + desired_dimension=max_dimension, + use_min=False, + ) + return width, height + + +def get_first_pass_image_resolution_sdxl( + width: int, + height: int, +) -> tuple[int, int]: + """Resize the image to fit the SDXL resolution bucket which most closely matches the aspect ratio.""" + aspect_ratio = width / height + closest_aspect_ratio = min( + IDEAL_SDXL_RESOLUTIONS_ASPECT_RATIOS, + key=lambda x: abs(aspect_ratio - x), + ) + + index = IDEAL_SDXL_RESOLUTIONS_ASPECT_RATIOS.index(closest_aspect_ratio) + return IDEAL_SDXL_RESOLUTIONS[index] + + +def get_first_pass_image_resolution_by_baseline( + width: int, + height: int, + baseline: KNOWN_IMAGE_GENERATION_BASELINE | None, +) -> tuple[int, int]: + """Get the first pass image resolution based on the baseline category.""" + if baseline == KNOWN_IMAGE_GENERATION_BASELINE.stable_cascade: + return get_first_pass_image_resolution_max(width, height) + if baseline == KNOWN_IMAGE_GENERATION_BASELINE.stable_diffusion_xl: + return get_first_pass_image_resolution_sdxl(width, height) + + return get_first_pass_image_resolution_min(width, height) + + +def calc_upscale_sampler_steps( + model_native_resolution: int | None, + width: int, + height: int, + hires_fix_denoising_strength: float, + ddim_steps: int, + *, + resolution_penalty_multiplier: float = RESOLUTION_PENALTY_MULTIPLIER, + standard_resolution: int = STANDARD_RESOLUTION, + min_denoising_strength: float = MIN_DENOISING_STRENGTH, + max_denoising_strength: float = MAX_DENOISING_STRENGTH, + min_steps: int = MIN_STEPS, + decay_rate: int = DECAY_RATE, + upscale_adjustment_factor: float = UPSCALE_ADJUSTMENT_FACTOR, + upscale_divisor: float = UPSCALE_DIVISOR, + step_floor_threshold: int = STEP_FLOOR_THRESHOLD, +) -> int: + """Calculate the number of upscale steps to use for the upscale sampler based on the input parameters. + + Note: The resulting values are non-linear to the input values. The heuristic is based on the native resolution + of the model, the requested resolution, the denoising strength and the number of steps used for the ddim + sampler. + + Practically speaking, the resulting number of steps should be roughly the number of step required + for most models to converge to a good result. Generally, doing more second pass steps than the value + returned by this function is wasted effort. + + Args: + model_native_resolution (int): The native resolution of the model to use for the generation. \ + This is the single side resolution (e.g. 512 for a 512x512 model). \ + Note that if this is unspecified (None), the upscale steps will not be adjusted based on the native \ + which will lead to suboptimal results, especially for models which work best at high resolutions. + width (int): The width of the image to generate. + height (int): The height of the image to generate. + hires_fix_denoising_strength (float): The denoising strength to use for the upscale sampler. + ddim_steps (int): The number of steps used for the sampler. + resolution_penalty_multiplier (float): The multiplier used for the resolution penalty calculation. + standard_resolution (int): The standard resolution used for the resolution penalty calculation. + min_denoising_strength (float): The minimum denoising strength to use for the upscale sampler. + max_denoising_strength (float): The maximum denoising strength to use for the upscale sampler. + min_steps (int): The minimum number of steps to use for the upscaling sampler. + decay_rate (int): The rate at which the upscale steps decay based on the denoising strength. + upscale_adjustment_factor (float): The factor by which the upscale steps are adjusted based on the native \ + resolution distance factor. + upscale_divisor (float): The divisor used to adjust the upscale steps based on the native resolution \ + distance factor. + step_floor_threshold (int): The threshold at which the upscale steps are adjusted to the ddim steps. + + Returns: + int: The number of upscale steps to use for the upscale sampler. + """ + native_resolution_distance_factor: float = 0 + + if model_native_resolution is not None: + native_resolution_pixels = model_native_resolution * model_native_resolution + + requested_pixels = width * height + native_resolution_distance_factor = requested_pixels / native_resolution_pixels + + resolution_penalty = resolution_penalty_multiplier * (standard_resolution / model_native_resolution) + native_resolution_distance_factor /= resolution_penalty + + hires_fix_denoising_strength = max( + min_denoising_strength, + min(max_denoising_strength, hires_fix_denoising_strength), + ) + + scale = ddim_steps - min_steps + upscale_steps = round(min_steps + scale * (hires_fix_denoising_strength**decay_rate)) + + # if native_resolution_distance_factor > NATIVE_RESOLUTION_THRESHOLD: + upscale_steps = round( + upscale_steps * ((1 / (upscale_adjustment_factor**native_resolution_distance_factor)) / upscale_divisor), + ) + + logger.trace(f"Upscale steps calculated as {upscale_steps}") + + if ddim_steps <= step_floor_threshold: + logger.debug(f"Upscale steps increased by {min_steps} due to low requested ddim steps") + upscale_steps += min_steps + + if upscale_steps > ddim_steps: + logger.debug(f"Upscale steps adjusted to {ddim_steps} from {upscale_steps}") + upscale_steps = ddim_steps + + step_floor = min(6, ddim_steps) + if step_floor > upscale_steps: + logger.debug(f"Upscale steps adjusted to {step_floor} from {upscale_steps}") + upscale_steps = step_floor + + return upscale_steps + + +def base64_str_to_pil_image( + base64_str: str, + *, + except_on_parse_fail: bool = False, +) -> PIL.Image.Image | None: + """Convert a base64 string to a PIL image. + + Args: + base64_str (str): The base64 string to convert to a PIL image. + except_on_parse_fail (bool): Whether to raise an exception if the base64 string cannot be parsed. \ + Defaults to False. + + Returns: + PIL.Image.Image: The PIL image. + """ + try: + image_bytes = base64.b64decode(base64_str) + return PIL.Image.open(io.BytesIO(image_bytes)) + except Exception as e: + if except_on_parse_fail: + raise e + logger.error(f"({type(e)}) Failed to parse base64 image: {e}") + return None + + +def bytes_to_pil_image( + image_bytes: bytes, + *, + except_on_parse_fail: bool = False, +) -> PIL.Image.Image | None: + """Convert bytes to a PIL image. + + Args: + image_bytes (bytes): The bytes to convert to a PIL image. + except_on_parse_fail (bool): Whether to raise an exception if the bytes cannot be parsed. \ + Defaults to False. + + Returns: + PIL.Image.Image: The PIL image. + """ + try: + return PIL.Image.open(io.BytesIO(image_bytes)) + except Exception as e: + if except_on_parse_fail: + raise e + logger.error(f"({type(e)}) Failed to parse image bytes: {e}") + return None + + +def base64_str_to_bytes( + base64_str: str, + *, + except_on_parse_fail: bool = False, +) -> bytes | None: + """Convert a base64 string to bytes. + + Args: + base64_str (str): The base64 string to convert to bytes. + except_on_parse_fail (bool): Whether to raise an exception if the base64 string cannot be parsed. \ + Defaults to False. + + Returns: + bytes: The bytes. + """ + try: + return base64.b64decode(base64_str) + except Exception as e: + if except_on_parse_fail: + raise e + logger.error(f"({type(e)}) Failed to parse base64 image: {e}") + return None diff --git a/horde_sdk/worker/__init__.py b/horde_sdk/worker/__init__.py new file mode 100644 index 00000000..daadd961 --- /dev/null +++ b/horde_sdk/worker/__init__.py @@ -0,0 +1,27 @@ +"""Helper methods for creating a worker for the AI Horde.""" + +from horde_sdk.worker.generations import AlchemySingleGeneration, ImageSingleGeneration, TextSingleGeneration +from horde_sdk.worker.generations_base import HordeSingleGeneration + +KnownGenerationType = ImageSingleGeneration | AlchemySingleGeneration | TextSingleGeneration +"""All of the possible generation types.""" + +from horde_sdk.worker.job_base import HordeWorkerJob, HordeWorkerJobConfig, SingleGenerationTypeVar +from horde_sdk.worker.jobs import AlchemyWorkerJob, ImageWorkerJob, TextWorkerJob + +KnownWorkerJobType = ImageWorkerJob | AlchemyWorkerJob | TextWorkerJob + +__all__ = [ + "AlchemySingleGeneration", + "AlchemyWorkerJob", + "HordeSingleGeneration", + "HordeWorkerJob", + "HordeWorkerJobConfig", + "ImageSingleGeneration", + "ImageWorkerJob", + "KnownGenerationType", + "KnownWorkerJobType", + "SingleGenerationTypeVar", + "TextSingleGeneration", + "TextWorkerJob", +] diff --git a/horde_sdk/worker/consts.py b/horde_sdk/worker/consts.py new file mode 100644 index 00000000..c62641de --- /dev/null +++ b/horde_sdk/worker/consts.py @@ -0,0 +1,530 @@ +from __future__ import annotations + +from collections.abc import Iterable, Mapping +from copy import deepcopy +from enum import auto +from typing import ClassVar + +from loguru import logger +from strenum import StrEnum + + +class WORKER_ERRORS(StrEnum): + """The reason a job faulted.""" + + UNHANDLED_EXCEPTION = auto() + """An error not otherwise specified occurred.""" + UNHANDLED_EXCEPTION_FROM_BACKEND = auto() + """An error was caught originating from within the backend.""" + SYSTEM_OUT_OF_MEMORY = auto() + """The system ran out of memory.""" + GPU_OUT_OF_MEMORY = auto() + """The GPU ran out of memory.""" + NETWORK_ISSUE = auto() + """There was a network issue, such as a timeout or a connection error.""" + SAFEGUARD_TIMEOUT = auto() + """A reasonable time limit was exceeded, such as a model taking too long to load or generate.""" + + +class GENERATION_PROGRESS(StrEnum): + """The state of a generation.""" + + NOT_STARTED = auto() + """The generation has not started.""" + ERROR = auto() + """An error occurred during generation. The most recent step will be retried up to a certain number of times.""" + PRELOADING = auto() + """The generation is preloading any models to RAM/VRAM. Preloading is skipped if the models are already loaded.""" + PRELOADING_COMPLETE = auto() + """The generation has completed preloading.""" + GENERATING = auto() + """The generation is in progress. This will also preload if that step did not yet occur.""" + GENERATION_COMPLETE = auto() + """The generation has completed generating the data, but may still need post-processing or safety checks.""" + PENDING_POST_PROCESSING = auto() + """The generation has completed and is pending post-processing.""" + POST_PROCESSING = auto() + """The generation is post-processing the generated data.""" + POST_PROCESSING_COMPLETE = auto() + """The generation has completed post-processing and is pending safety check.""" + PENDING_SAFETY_CHECK = auto() + """The generation was created and is pending safety check.""" + SAFETY_CHECKING = auto() + """The generation is being safety checked.""" + SAFETY_CHECK_COMPLETE = auto() + """The generation has completed safety check and is pending submission.""" + PENDING_SUBMIT = auto() + """The generation has completed safety check and is pending submission.""" + SUBMITTING = auto() + """The generation is pending submission.""" + SUBMIT_COMPLETE = auto() + """The generation has been successfully submitted.""" + COMPLETE = auto() + """The generation is completely finished and no further steps are required.""" + ABORTED = auto() + """The generation has failed because one or more steps failed too many times. An attempt to notify the API will + be made.""" + REPORTED_FAILED = auto() + """The generation has been reported as failed to the API.""" + USER_REQUESTED_ABORT = auto() + """The generation was aborted by the submitting user's request.""" + USER_ABORT_COMPLETE = auto() + """The generation was aborted (by user's request) and the API has been notified accordingly.""" + ABANDONED = auto() + """The generation failed and the API could not be notified. It has simply been discarded. + + Note that this can lead to a worker being put into maintenance mode if too many generations are abandoned + within a certain time frame. + """ + + @staticmethod + def is_state_complete(progress: GENERATION_PROGRESS) -> bool: + """Check if the generation is complete.""" + return progress in { + GENERATION_PROGRESS.COMPLETE, + GENERATION_PROGRESS.ABORTED, + GENERATION_PROGRESS.REPORTED_FAILED, + GENERATION_PROGRESS.USER_ABORT_COMPLETE, + GENERATION_PROGRESS.ABANDONED, + } + + @staticmethod + def is_state_failing(progress: GENERATION_PROGRESS) -> bool: + """Check if the generation is failing.""" + return progress in { + GENERATION_PROGRESS.ERROR, + GENERATION_PROGRESS.ABORTED, + GENERATION_PROGRESS.REPORTED_FAILED, + GENERATION_PROGRESS.USER_REQUESTED_ABORT, + GENERATION_PROGRESS.ABANDONED, + } + + @staticmethod + def is_state_pending(progress: GENERATION_PROGRESS) -> bool: + """Check if the generation is pending.""" + return progress in { + GENERATION_PROGRESS.NOT_STARTED, + GENERATION_PROGRESS.PRELOADING_COMPLETE, + GENERATION_PROGRESS.PENDING_POST_PROCESSING, + GENERATION_PROGRESS.PENDING_SAFETY_CHECK, + GENERATION_PROGRESS.PENDING_SUBMIT, + } + + +initial_generation_state = GENERATION_PROGRESS.NOT_STARTED + +base_generate_progress_transitions: dict[GENERATION_PROGRESS, list[GENERATION_PROGRESS]] = { + GENERATION_PROGRESS.NOT_STARTED: [ + GENERATION_PROGRESS.PRELOADING, + GENERATION_PROGRESS.GENERATING, + GENERATION_PROGRESS.PENDING_POST_PROCESSING, + GENERATION_PROGRESS.POST_PROCESSING, + GENERATION_PROGRESS.ERROR, + ], + GENERATION_PROGRESS.PRELOADING: [ + GENERATION_PROGRESS.PRELOADING_COMPLETE, + GENERATION_PROGRESS.ERROR, + ], + GENERATION_PROGRESS.PRELOADING_COMPLETE: [ + GENERATION_PROGRESS.GENERATING, + GENERATION_PROGRESS.PENDING_POST_PROCESSING, + GENERATION_PROGRESS.POST_PROCESSING, + GENERATION_PROGRESS.ERROR, + ], + GENERATION_PROGRESS.GENERATING: [ + GENERATION_PROGRESS.PENDING_POST_PROCESSING, + GENERATION_PROGRESS.POST_PROCESSING, + GENERATION_PROGRESS.PENDING_SAFETY_CHECK, + GENERATION_PROGRESS.SAFETY_CHECKING, + GENERATION_PROGRESS.ERROR, + ], + GENERATION_PROGRESS.PENDING_POST_PROCESSING: [ + GENERATION_PROGRESS.POST_PROCESSING, + GENERATION_PROGRESS.ERROR, + ], + GENERATION_PROGRESS.POST_PROCESSING: [ + GENERATION_PROGRESS.PENDING_SAFETY_CHECK, + GENERATION_PROGRESS.SAFETY_CHECKING, + GENERATION_PROGRESS.ERROR, + ], + GENERATION_PROGRESS.PENDING_SAFETY_CHECK: [ + GENERATION_PROGRESS.SAFETY_CHECKING, + GENERATION_PROGRESS.ERROR, + ], + GENERATION_PROGRESS.SAFETY_CHECKING: [ + GENERATION_PROGRESS.PENDING_SUBMIT, + GENERATION_PROGRESS.ERROR, + ], + GENERATION_PROGRESS.PENDING_SUBMIT: [ + GENERATION_PROGRESS.SUBMITTING, + GENERATION_PROGRESS.ERROR, + ], + GENERATION_PROGRESS.SUBMITTING: [ + GENERATION_PROGRESS.SUBMIT_COMPLETE, + GENERATION_PROGRESS.ERROR, + ], + GENERATION_PROGRESS.SUBMIT_COMPLETE: [GENERATION_PROGRESS.COMPLETE], + GENERATION_PROGRESS.COMPLETE: [], + GENERATION_PROGRESS.ABORTED: [ + GENERATION_PROGRESS.REPORTED_FAILED, + GENERATION_PROGRESS.ERROR, + ], + GENERATION_PROGRESS.REPORTED_FAILED: [], + GENERATION_PROGRESS.ERROR: [GENERATION_PROGRESS.ABORTED], + GENERATION_PROGRESS.USER_REQUESTED_ABORT: [ + GENERATION_PROGRESS.USER_ABORT_COMPLETE, + ], + GENERATION_PROGRESS.USER_ABORT_COMPLETE: [], + GENERATION_PROGRESS.ABANDONED: [], +} +"""A map of the typical transitions between generation states.""" + +# "black box" generations are connected to backends which have no internal observability. These transitions are +# therefore limited to starting, erroring and completing. +black_box_generate_progress_transitions: dict[GENERATION_PROGRESS, list[GENERATION_PROGRESS]] = { + GENERATION_PROGRESS.NOT_STARTED: [ + GENERATION_PROGRESS.GENERATING, + GENERATION_PROGRESS.ERROR, + ], + GENERATION_PROGRESS.GENERATING: [ + GENERATION_PROGRESS.PENDING_SUBMIT, + GENERATION_PROGRESS.PENDING_SAFETY_CHECK, + GENERATION_PROGRESS.COMPLETE, + GENERATION_PROGRESS.ERROR, + ], + GENERATION_PROGRESS.PENDING_SAFETY_CHECK: [ + GENERATION_PROGRESS.SAFETY_CHECKING, + GENERATION_PROGRESS.ERROR, + ], + GENERATION_PROGRESS.SAFETY_CHECKING: [ + GENERATION_PROGRESS.PENDING_SUBMIT, + GENERATION_PROGRESS.COMPLETE, + GENERATION_PROGRESS.ERROR, + ], + GENERATION_PROGRESS.PENDING_SUBMIT: [ + GENERATION_PROGRESS.SUBMITTING, + GENERATION_PROGRESS.ERROR, + ], + GENERATION_PROGRESS.SUBMITTING: [ + GENERATION_PROGRESS.SUBMIT_COMPLETE, + GENERATION_PROGRESS.COMPLETE, + GENERATION_PROGRESS.ERROR, + ], + GENERATION_PROGRESS.SUBMIT_COMPLETE: [GENERATION_PROGRESS.COMPLETE], + GENERATION_PROGRESS.COMPLETE: [], + GENERATION_PROGRESS.ABORTED: [ + GENERATION_PROGRESS.REPORTED_FAILED, + GENERATION_PROGRESS.ERROR, + ], + GENERATION_PROGRESS.REPORTED_FAILED: [], + GENERATION_PROGRESS.ERROR: [GENERATION_PROGRESS.ABORTED], + GENERATION_PROGRESS.USER_REQUESTED_ABORT: [ + GENERATION_PROGRESS.USER_ABORT_COMPLETE, + GENERATION_PROGRESS.ABANDONED, + ], + GENERATION_PROGRESS.USER_ABORT_COMPLETE: [], + GENERATION_PROGRESS.ABANDONED: [], +} + + +def generate_transitions( + base_transitions: dict[GENERATION_PROGRESS, list[GENERATION_PROGRESS]], + skip_submit: bool = False, + can_skip_safety_checks: bool = False, + supports_safety_only: bool = False, +) -> dict[GENERATION_PROGRESS, list[GENERATION_PROGRESS]]: + """Generate transitions with optional modifications. + + Args: + base_transitions (dict[GENERATION_PROGRESS, list[GENERATION_PROGRESS]]): The base transitions to modify. + skip_submit (bool, optional): Whether to skip submit transitions. Defaults to False. + can_skip_safety_checks (bool, optional): Whether to skip safety check transitions can be skipped. + Defaults to False. + supports_safety_only (bool, optional): Whether to allow transition directly to safety check + without going through generation or post-processing. Defaults to False. + """ + transitions = deepcopy(base_transitions) + + if can_skip_safety_checks: + for state in [ + GENERATION_PROGRESS.GENERATING, + GENERATION_PROGRESS.POST_PROCESSING, + ]: + transitions[state].append( + GENERATION_PROGRESS.PENDING_SUBMIT, + ) + + if skip_submit: + for state in [ + GENERATION_PROGRESS.PENDING_SUBMIT, + GENERATION_PROGRESS.SUBMITTING, + GENERATION_PROGRESS.SUBMIT_COMPLETE, + ]: + transitions.pop(state, None) + + for transition_values in transitions.values(): + for transition_value in transition_values: + found_transition_to_remove = False + if transition_value in [ + GENERATION_PROGRESS.PENDING_SUBMIT, + GENERATION_PROGRESS.SUBMITTING, + GENERATION_PROGRESS.SUBMIT_COMPLETE, + ]: + transition_values.remove(transition_value) + found_transition_to_remove = True + + if found_transition_to_remove: + transition_values.append( + GENERATION_PROGRESS.COMPLETE, + ) + if supports_safety_only: + for state in [ + GENERATION_PROGRESS.NOT_STARTED, + GENERATION_PROGRESS.PRELOADING_COMPLETE, + ]: + if state in transitions: + transitions[state].append(GENERATION_PROGRESS.PENDING_SAFETY_CHECK) + + return transitions + + +base_generate_progress_no_submit_transitions = generate_transitions( + base_generate_progress_transitions, + skip_submit=True, +) + +default_image_generate_progress_transitions = generate_transitions( + base_generate_progress_transitions, +) +default_image_generate_progress_no_submit_transitions = generate_transitions( + base_generate_progress_transitions, + skip_submit=True, +) + +default_alchemy_generate_progress_transitions = generate_transitions( + base_generate_progress_transitions, + can_skip_safety_checks=True, +) +default_alchemy_generate_progress_no_submit_transitions = generate_transitions( + base_generate_progress_transitions, + skip_submit=True, + can_skip_safety_checks=True, +) + +default_text_generate_progress_transitions = generate_transitions( + base_generate_progress_transitions, + can_skip_safety_checks=True, +) +default_text_generate_progress_no_submit_transitions = generate_transitions( + base_generate_progress_transitions, + skip_submit=True, + can_skip_safety_checks=True, +) + + +# Finalized generation states +finalized_generation_states = { + GENERATION_PROGRESS.SUBMIT_COMPLETE, + GENERATION_PROGRESS.REPORTED_FAILED, + GENERATION_PROGRESS.USER_ABORT_COMPLETE, + GENERATION_PROGRESS.ABANDONED, +} + + +def validate_generation_progress_transitions( + progress_transitions: Mapping[GENERATION_PROGRESS, Iterable[GENERATION_PROGRESS]], +) -> bool: + """Validate the generation progress transitions. + + Args: + progress_transitions (Mapping[GENERATION_PROGRESS, Iterable[GENERATION_PROGRESS]]): + The transitions to validate + + Returns: + bool: True if the transitions are valid, False otherwise. + """ + parent_states: set[GENERATION_PROGRESS] = set(progress_transitions.keys()) + target_states: set[GENERATION_PROGRESS] = set() + for transitions in progress_transitions.values(): + target_states.update(transitions) + + if not target_states.issubset(parent_states): + logger.error( + f"Invalid generation progress transitions: " + f"target states {target_states - parent_states} are not in parent states {parent_states}", + ) + return False + + if not all(isinstance(state, GENERATION_PROGRESS) for state in parent_states): + logger.error( + "Invalid generation progress transitions: not all parent states are instances of GENERATION_PROGRESS", + ) + return False + + if not all(isinstance(state, GENERATION_PROGRESS) for state in target_states): + logger.error( + "Invalid generation progress transitions: not all target states are instances of GENERATION_PROGRESS", + ) + return False + + return True + + +class JobState(StrEnum): + """The state of a job.""" + + QUEUED = auto() + """The job has been received and is waiting to be processed.""" + PREPARING = auto() + """The job is being prepared for processing.""" + GENERATING = auto() + """The job is in the process of generating.""" + PENDING_SAFETY_CHECK = auto() + """The job was generated and is pending safety check.""" + PENDING_SUBMIT = auto() + """The job is pending submission.""" + WAITING_ON_NETWORK = auto() + """The job is waiting on network IO.""" + SUCCESSFULLY_COMPLETED = auto() + """The job finished successfully.""" + FAULTED = auto() + """The job faulted. Faulted jobs are ones which failed catastrophically and will not be retried. + + Note: This is different from a generation faulting, which can be submitted as a faulted generation. + """ + + +class HordeWorkerConfigDefaults: + """Default values for HordeWorkerJobConfig.""" + + DEFAULT_MAX_CONSECUTIVE_FAILED_JOB_SUBMITS: int = 3 + """The default maximum number of consecutive times a job can fail to submit to the API before it is marked as + faulted. This is used to prevent a job from being retried indefinitely and to prevent a job from being submitted + well after it would have likely have been marked stale by the API. + + Jobs which are faulted are *abandoned* and no further attempts are made to submit any generations in the job nor to + notify the API that the job failed. + """ + + DEFAULT_JOB_SUBMIT_RETRY_DELAY: float = 2.0 + """The default delay in seconds between retries to submit a job to the API after submit issues.""" + + UNREASONABLE_MAX_CONSECUTIVE_FAILED_JOB_SUBMITS: int = 10 + """The highest number of consecutive failed job submits allowed in any configuration. + + This is used internally to the sdk as a final safeguard to prevent mistakes in configuration. + """ + + DEFAULT_MAX_GENERATION_FAILURES: int = 3 + """The default maximum number of times a generation can fail before it is abandoned. + + **Note:** *Generations* which fail are reported to the API as failed, but the job itself is not + necessarily *faulted*. If notifying the API of a failed generation fails the number of times specified by + `max_consecutive_failed_job_submits`, then the job is marked as faulted and is abandoned. + """ + + UNREASONABLE_MAX_GENERATION_FAILURES: int = 10 + """The highest number of generation failures allowed in any configuration. + + This is used internally to the sdk as a final safeguard to prevent mistakes in configuration. + """ + + DEFAULT_STATE_ERROR_LIMITS: ClassVar[dict[GENERATION_PROGRESS, int]] = { + GENERATION_PROGRESS.PRELOADING: 3, + GENERATION_PROGRESS.GENERATING: 3, + GENERATION_PROGRESS.POST_PROCESSING: 3, + GENERATION_PROGRESS.SAFETY_CHECKING: 3, + GENERATION_PROGRESS.SUBMITTING: 10, + GENERATION_PROGRESS.USER_REQUESTED_ABORT: 10, + } + + DEFAULT_UPLOAD_TIMEOUT: float = 10.0 + DEFAULT_MAX_RETRIES: int = 10 + DEFAULT_RETRY_DELAY: float = 1.0 + + DEFAULT_RESULT_IMAGE_FORMAT: str = "WebP" + DEFAULT_RESULT_IMAGE_QUALITY: int = 95 + DEFAULT_RESULT_IMAGE_PIL_METHOD: int = 6 + + DEFAULT_GENERATION_STRICT_TRANSITION_MODE: bool = True + + +class REQUESTED_BACKEND_CONSTRAINTS(StrEnum): + """What constraints on backends to use were requested by the user/server.""" + + ANY = auto() + """Any backend is acceptable.""" + + SPECIFIED = auto() + """Only the specified backend is acceptable.""" + + DEFAULT_IMAGE = auto() + """Only the default image backend is acceptable.""" + + DEFAULT_TEXT = auto() + """Only the default text backend is acceptable.""" + + DEFAULT_AUDIO = auto() + """Only the default audio backend is acceptable.""" + + DEFAULT_VIDEO = auto() + """Only the default video backend is acceptable.""" + + DEFAULT_ALCHEMY = auto() + """Only the default alchemy backend is acceptable.""" + + NO_CUSTOM = auto() + """Only official backends are acceptable.""" + + ONLY_CUSTOM = auto() + """Only custom backends are acceptable.""" + + +class REQUESTED_SOURCE_IMAGE_FALLBACK_CHOICE(StrEnum): + """The choice for what to do when a requested source image couldn't be parsed or is otherwise unusable.""" + + TXT2IMG_FALLBACK = auto() + """Use txt2img instead if the source image is unusable.""" + + ABANDON = auto() + """Abandon the generation if the source image is unusable.""" + + USE_WHITE_IMAGE = auto() + """Use a white image if the source image is unusable.""" + + USE_BLACK_IMAGE = auto() + """Use a black image if the source image is unusable.""" + + USE_NOISE_IMAGE = auto() + """Use a noise image if the source image is unusable.""" + + +class CHAIN_EDGE_KIND(StrEnum): + """The known chain edges.""" + + CUSTOM = auto() + """A custom chain edge.""" + + RESULTING_IMAGE_AS_SOURCE = auto() + """The resulting image of the previous generation is used as the source for the next generation.""" + + RESULTING_TEXT_AS_PROMPT = auto() + """The resulting text of the previous generation is used as the prompt for the next generation.""" + + RESULTING_IMAGE_AS_MASK = auto() + """The resulting image of the previous generation is used as the mask for the next generation.""" + + RESULTING_IMAGE_AS_CONTROL_MAP = auto() + """The resulting image of the previous generation is used as the control map for the next generation.""" + + IMAGE_TO_ALCHEMY_UPSCALE = auto() + """The resulting image from image generation is used as the source for alchemy upscaling.""" + + IMAGE_TO_ALCHEMY_FACEFIX = auto() + """The resulting image from image generation is used as the source for alchemy face fixing.""" + + ALCHEMY_TO_ALCHEMY = auto() + """The resulting image from one alchemy operation is used as the source for another alchemy operation.""" + + TEXT_TO_IMAGE_PROMPT = auto() + """The resulting text from text generation is used as the prompt for image generation.""" diff --git a/horde_sdk/worker/dispatch/__init__.py b/horde_sdk/worker/dispatch/__init__.py new file mode 100644 index 00000000..2a34419a --- /dev/null +++ b/horde_sdk/worker/dispatch/__init__.py @@ -0,0 +1,5 @@ +"""Contains class definitions and handling for sources of jobs or generation data. + +The prototypical job dispatch source from the horde_sdk's perspective is the official AI-Horde API. However, +support for a variety of other sources is supported, included sources local to a worker computer. +""" diff --git a/horde_sdk/worker/dispatch/ai_horde/__init__.py b/horde_sdk/worker/dispatch/ai_horde/__init__.py new file mode 100644 index 00000000..dcea5b3a --- /dev/null +++ b/horde_sdk/worker/dispatch/ai_horde/__init__.py @@ -0,0 +1 @@ +"""Contains handling for converting AI-Horde API object models to more general SDK object models.""" diff --git a/horde_sdk/worker/dispatch/ai_horde/alchemy/__init__.py b/horde_sdk/worker/dispatch/ai_horde/alchemy/__init__.py new file mode 100644 index 00000000..a69dcf8f --- /dev/null +++ b/horde_sdk/worker/dispatch/ai_horde/alchemy/__init__.py @@ -0,0 +1 @@ +"""The AI-Horde API specific parsing module for alchemy generation.""" diff --git a/horde_sdk/worker/dispatch/ai_horde/alchemy/convert.py b/horde_sdk/worker/dispatch/ai_horde/alchemy/convert.py new file mode 100644 index 00000000..382157cd --- /dev/null +++ b/horde_sdk/worker/dispatch/ai_horde/alchemy/convert.py @@ -0,0 +1,135 @@ +"""Contains functions to convert API responses to alchemy generation parameters.""" + +from horde_sdk.ai_horde_api.apimodels import AlchemyJobPopResponse, NoValidAlchemyFound +from horde_sdk.consts import KNOWN_ALCHEMY_BACKEND, KNOWN_DISPATCH_SOURCE, KNOWN_NSFW_DETECTOR +from horde_sdk.generation_parameters.alchemy import ( + AlchemyParameters, + CaptionAlchemyParameters, + FacefixAlchemyParameters, + InterrogateAlchemyParameters, + NSFWAlchemyParameters, + SingleAlchemyParameters, + UpscaleAlchemyParameters, +) +from horde_sdk.generation_parameters.alchemy.consts import ( + KNOWN_ALCHEMY_FORMS, + KNOWN_CAPTION_MODELS, + KNOWN_INTERROGATORS, + is_caption_form, + is_facefixer_form, + is_interrogator_form, + is_nsfw_detector_form, + is_upscaler_form, +) +from horde_sdk.utils.image_utils import ( + base64_str_to_bytes, +) +from horde_sdk.worker.consts import ( + REQUESTED_BACKEND_CONSTRAINTS, + REQUESTED_SOURCE_IMAGE_FALLBACK_CHOICE, +) +from horde_sdk.worker.dispatch.ai_horde_parameters import AIHordeR2DispatchParameters + + +def convert_alchemy_job_pop_response_to_parameters( + api_response: AlchemyJobPopResponse, +) -> tuple[AlchemyParameters, AIHordeR2DispatchParameters]: + """Convert an API response to the parameters for a generation.""" + if api_response.forms is None or len(api_response.forms) == 0: + raise ValueError("The API response did not contain any forms. Was this a skipped response?") + + parsed_upscalers: list[UpscaleAlchemyParameters] = [] + parsed_facefixers: list[FacefixAlchemyParameters] = [] + parsed_interrogators: list[InterrogateAlchemyParameters] = [] + parsed_captions: list[CaptionAlchemyParameters] = [] + parsed_nsfw_detectors: list[NSFWAlchemyParameters] = [] + + parsed_unknown_forms: list[SingleAlchemyParameters] = [] + + r2_upload_url_map = {} + + for form in api_response.forms: + if form.source_image is None: + raise ValueError("The API response did not contain a source image for a form.") + + r2_upload_url_map[str(form.id_)] = form.r2_upload + + if is_upscaler_form(form.form): + parsed_upscalers.append( + UpscaleAlchemyParameters( + result_id=str(form.id_), + form=KNOWN_ALCHEMY_FORMS.post_process, + source_image=base64_str_to_bytes(form.source_image), + upscaler=form.form, + ), + ) + + elif is_facefixer_form(form.form): + parsed_facefixers.append( + FacefixAlchemyParameters( + result_id=str(form.id_), + form=KNOWN_ALCHEMY_FORMS.post_process, + source_image=base64_str_to_bytes(form.source_image), + facefixer=form.form, + ), + ) + + elif is_interrogator_form(form.form): + parsed_interrogators.append( + InterrogateAlchemyParameters( + result_id=str(form.id_), + form=KNOWN_ALCHEMY_FORMS.interrogation, + source_image=base64_str_to_bytes(form.source_image), + interrogator=KNOWN_INTERROGATORS.vit_l_14, + ), + ) + + elif is_caption_form(form.form): + parsed_captions.append( + CaptionAlchemyParameters( + result_id=str(form.id_), + form=KNOWN_ALCHEMY_FORMS.caption, + source_image=base64_str_to_bytes(form.source_image), + caption_model=KNOWN_CAPTION_MODELS.BLIP_BASE_SALESFORCE, + ), + ) + + elif is_nsfw_detector_form(form.form): + parsed_nsfw_detectors.append( + NSFWAlchemyParameters( + result_id=str(form.id_), + form=KNOWN_ALCHEMY_FORMS.nsfw, + source_image=base64_str_to_bytes(form.source_image), + nsfw_detector=KNOWN_NSFW_DETECTOR.HORDE_SAFETY, + ), + ) + + else: + parsed_unknown_forms.append( + SingleAlchemyParameters( + result_id=str(form.id_), + form=form.form, + source_image=base64_str_to_bytes(form.source_image), + ), + ) + + alchemy_parameters = AlchemyParameters( + upscalers=parsed_upscalers or None, + facefixers=parsed_facefixers or None, + interrogators=parsed_interrogators or None, + captions=parsed_captions or None, + nsfw_detectors=parsed_nsfw_detectors or None, + misc_post_processors=parsed_unknown_forms or None, + ) + + dispatch_parameters = AIHordeR2DispatchParameters( + generation_ids=[str(form.id_) for form in api_response.forms], + dispatch_source=KNOWN_DISPATCH_SOURCE.AI_HORDE_API_OFFICIAL, + inference_backend=KNOWN_ALCHEMY_BACKEND.HORDE_ALCHEMIST, + requested_backend_constraints=REQUESTED_BACKEND_CONSTRAINTS.SPECIFIED, + no_valid_request_found_reasons=api_response.skipped or NoValidAlchemyFound(), + r2_upload_url_map=r2_upload_url_map, + source_image_fallback_choice=REQUESTED_SOURCE_IMAGE_FALLBACK_CHOICE.ABANDON, + ) + + return alchemy_parameters, dispatch_parameters diff --git a/horde_sdk/ai_horde_worker/bridge_data.py b/horde_sdk/worker/dispatch/ai_horde/bridge_data.py similarity index 97% rename from horde_sdk/ai_horde_worker/bridge_data.py rename to horde_sdk/worker/dispatch/ai_horde/bridge_data.py index 7e25905d..00a11be4 100644 --- a/horde_sdk/ai_horde_worker/bridge_data.py +++ b/horde_sdk/worker/dispatch/ai_horde/bridge_data.py @@ -10,9 +10,9 @@ from pydantic import BaseModel, ConfigDict, Field, field_validator, model_validator from strenum import StrEnum -from horde_sdk.ai_horde_api.consts import ALCHEMY_FORMS -from horde_sdk.ai_horde_worker.locale_info.bridge_data_fields import BRIDGE_DATA_FIELD_DESCRIPTIONS +from horde_sdk.generation_parameters.alchemy.consts import KNOWN_ALCHEMY_FORMS from horde_sdk.generic_api.consts import ANON_API_KEY +from horde_sdk.worker.locale_info.bridge_data_fields import BRIDGE_DATA_FIELD_DESCRIPTIONS _UNREASONABLE_NUMBER_OF_MODELS = 1000 """1000""" @@ -225,6 +225,11 @@ class ImageWorkerBridgeData(SharedHordeBridgeData): """The factor in the equation `max_power * (8 * 64 * 64)`. This will be the maximum number of pixels that can be generated (with inference) in a single job by this worker.""" + @property + def max_pixels(self) -> int: + """Calculate the maximum number of pixels that can be generated in a single job by this worker.""" + return self.max_power * (8 * 64 * 64) + image_models_to_load: list[str] = Field( default_factory=list, alias="models_to_load", @@ -390,7 +395,7 @@ def validate_ram_to_leave_free(cls, v: str | int | float) -> str | int | float: return v @field_validator("forms") - def validate_alchemy_forms(cls, v: list[str]) -> list[str | ALCHEMY_FORMS]: + def validate_alchemy_forms(cls, v: list[str]) -> list[str | KNOWN_ALCHEMY_FORMS]: """Validate the alchemy forms (services offered).""" if not isinstance(v, list): raise ValueError("forms must be a list") @@ -398,7 +403,7 @@ def validate_alchemy_forms(cls, v: list[str]) -> list[str | ALCHEMY_FORMS]: for form in v: form = str(form).lower() form = form.replace("-", "_") - if form not in ALCHEMY_FORMS.__members__: + if form not in KNOWN_ALCHEMY_FORMS.__members__: raise ValueError(f"Invalid form: {form}") validated_forms.append(form) return validated_forms diff --git a/horde_sdk/worker/dispatch/ai_horde/image/__init__.py b/horde_sdk/worker/dispatch/ai_horde/image/__init__.py new file mode 100644 index 00000000..7249d87d --- /dev/null +++ b/horde_sdk/worker/dispatch/ai_horde/image/__init__.py @@ -0,0 +1 @@ +"""The AI-Horde API specific parsing module for image generation.""" diff --git a/horde_sdk/worker/dispatch/ai_horde/image/convert.py b/horde_sdk/worker/dispatch/ai_horde/image/convert.py new file mode 100644 index 00000000..aff84381 --- /dev/null +++ b/horde_sdk/worker/dispatch/ai_horde/image/convert.py @@ -0,0 +1,410 @@ +"""Contains functions to convert API responses to image generation parameters.""" + +from horde_model_reference.meta_consts import ( + KNOWN_IMAGE_GENERATION_BASELINE, + get_baseline_native_resolution, +) +from horde_model_reference.model_reference_manager import ModelReferenceManager +from loguru import logger + +from horde_sdk.ai_horde_api.apimodels.generate.pop import ImageGenerateJobPopResponse +from horde_sdk.ai_horde_api.consts import DEFAULT_HIRES_DENOISE_STRENGTH +from horde_sdk.ai_horde_api.fields import GenerationID +from horde_sdk.consts import KNOWN_DISPATCH_SOURCE, KNOWN_INFERENCE_BACKEND +from horde_sdk.generation_parameters.generic.consts import KNOWN_AUX_MODEL_SOURCE +from horde_sdk.generation_parameters.image import ( + DEFAULT_BASELINE_RESOLUTION, + HIRES_FIX_DENOISE_STRENGTH_DEFAULT, + BasicImageGenerationParameters, + ControlnetGenerationParameters, + CustomWorkflowGenerationParameters, + HiresFixGenerationParameters, + Image2ImageGenerationParameters, + ImageGenerationParameters, + LoRaEntry, + RemixGenerationParameters, + RemixImageEntry, + TIEntry, +) +from horde_sdk.generation_parameters.image.consts import ( + KNOWN_IMAGE_SCHEDULERS, + KNOWN_IMAGE_SOURCE_PROCESSING, + LORA_TRIGGER_INJECT_CHOICE, + TI_TRIGGER_INJECT_CHOICE, +) +from horde_sdk.generation_parameters.image.object_models import ImageGenerationComponentContainer +from horde_sdk.utils.image_utils import ( + base64_str_to_bytes, + calc_upscale_sampler_steps, + get_first_pass_image_resolution_by_baseline, +) +from horde_sdk.worker.consts import ( + REQUESTED_BACKEND_CONSTRAINTS, + REQUESTED_SOURCE_IMAGE_FALLBACK_CHOICE, +) +from horde_sdk.worker.dispatch.ai_horde_parameters import AIHordeR2DispatchParameters + + +def _get_img2img_params(api_response: ImageGenerateJobPopResponse) -> Image2ImageGenerationParameters | None: + """Get the image-to-image parameters from the API response, if applicable.""" + if api_response.source_processing in [ + KNOWN_IMAGE_SOURCE_PROCESSING.img2img, + KNOWN_IMAGE_SOURCE_PROCESSING.inpainting, + KNOWN_IMAGE_SOURCE_PROCESSING.outpainting, + ]: + source_image: bytes | None = None + if isinstance(api_response.source_image, str): + source_image = base64_str_to_bytes(api_response.source_image) + + source_mask: bytes | None = None + if isinstance(api_response.source_mask, str): + source_mask = base64_str_to_bytes(api_response.source_mask) + + if source_image is None: + logger.warning("No source image found for img2img generation. Avoiding img2img if possible.") + return None + + return Image2ImageGenerationParameters( + source_image=source_image, + source_mask=source_mask, + ) + + return None + + +def _get_remix_params(api_response: ImageGenerateJobPopResponse) -> RemixGenerationParameters | None: + """Get the remix parameters from the API response, if applicable.""" + if api_response.source_processing == KNOWN_IMAGE_SOURCE_PROCESSING.remix: + source_image: bytes | None = None + if isinstance(api_response.source_image, str): + source_image = base64_str_to_bytes(api_response.source_image) + + if source_image is None: + raise ValueError("No source image found for remix generation.") + + remix_images: list[RemixImageEntry] = [] + if api_response.extra_source_images is not None: + for remix_image in api_response.extra_source_images: + payload_image = remix_image.image + + payload_image_bytes: bytes | None = None + if isinstance(payload_image, str): + payload_image_bytes = base64_str_to_bytes(payload_image) + + if not payload_image_bytes: + raise ValueError("Remix image payload is empty.") + + remix_images.append( + RemixImageEntry( + image=payload_image_bytes, + strength=remix_image.strength, + ), + ) + + return RemixGenerationParameters( + source_image=source_image, + remix_images=remix_images, + ) + + return None + + +def _get_controlnet_params(api_response: ImageGenerateJobPopResponse) -> ControlnetGenerationParameters | None: + """Get the controlnet parameters from the API response, if applicable.""" + if api_response.payload.control_type is not None: + source_image: bytes | None = None + + if isinstance(api_response.source_image, str): + source_image = base64_str_to_bytes(api_response.source_image) + + if api_response.payload.image_is_control: + controlnet_params = ControlnetGenerationParameters( + source_image=None, + controlnet_type=api_response.payload.control_type, + control_map=source_image, + ) + else: + controlnet_params = ControlnetGenerationParameters( + source_image=source_image, + controlnet_type=api_response.payload.control_type, + control_map=None, + ) + + return controlnet_params + + return None + + +def _get_hires_fix_params( + api_response: ImageGenerateJobPopResponse, + model_baseline: KNOWN_IMAGE_GENERATION_BASELINE | None = None, +) -> HiresFixGenerationParameters | None: + """Get the high-resolution fix parameters from the API response, if applicable.""" + first_pass_width, first_pass_height = get_first_pass_image_resolution_by_baseline( + width=api_response.payload.width, + height=api_response.payload.height, + baseline=model_baseline, + ) + + second_pass_width = api_response.payload.width + second_pass_height = api_response.payload.height + + second_pass_steps = calc_upscale_sampler_steps( + model_native_resolution=( + get_baseline_native_resolution(model_baseline) if model_baseline else DEFAULT_BASELINE_RESOLUTION + ), + width=api_response.payload.width, + height=api_response.payload.height, + hires_fix_denoising_strength=( + api_response.payload.hires_fix_denoising_strength or HIRES_FIX_DENOISE_STRENGTH_DEFAULT + ), + ddim_steps=api_response.payload.ddim_steps, + ) + + model = api_response.model + + if not model or model.isspace(): + raise ValueError("Model is required for hires fix generation.") + + prompt = api_response.payload.prompt + if not prompt or prompt.isspace(): + raise ValueError("Prompt is required for hires fix generation.") + + if api_response.payload.hires_fix: + return HiresFixGenerationParameters( + first_pass=BasicImageGenerationParameters( + model=model, + model_baseline=model_baseline, + # model_filename=None, # TODO + # model_hash=None, # TODO + prompt=prompt, + seed=api_response.payload.seed, + width=first_pass_width, + height=first_pass_height, + steps=api_response.payload.ddim_steps, + cfg_scale=api_response.payload.cfg_scale, + sampler_name=api_response.payload.sampler_name, + scheduler=( + KNOWN_IMAGE_SCHEDULERS.karras if api_response.payload.karras else KNOWN_IMAGE_SCHEDULERS.normal + ), + clip_skip=api_response.payload.clip_skip, + denoising_strength=api_response.payload.denoising_strength, + ), + second_pass=BasicImageGenerationParameters( + model=model, + model_baseline=model_baseline, + # model_filename=None, # TODO + # model_hash=None, # TODO + prompt=prompt, + seed=api_response.payload.seed, + width=second_pass_width, + height=second_pass_height, + steps=second_pass_steps, + cfg_scale=api_response.payload.cfg_scale, + sampler_name=api_response.payload.sampler_name, + scheduler=( + KNOWN_IMAGE_SCHEDULERS.karras if api_response.payload.karras else KNOWN_IMAGE_SCHEDULERS.normal + ), + clip_skip=api_response.payload.clip_skip, + denoising_strength=api_response.payload.hires_fix_denoising_strength or DEFAULT_HIRES_DENOISE_STRENGTH, + ), + ) + + return None + + +def _get_custom_workflow_params( + api_response: ImageGenerateJobPopResponse, +) -> CustomWorkflowGenerationParameters | None: + """Get the custom workflow parameters from the API response, if applicable.""" + if api_response.payload.workflow is not None: + return CustomWorkflowGenerationParameters( + custom_workflow_name=api_response.payload.workflow, + custom_parameters=None, + custom_workflow_version=None, + ) + + return None + + +def _get_lora_params(api_response: ImageGenerateJobPopResponse) -> list[LoRaEntry] | None: + """Get the LoRa parameters from the API response, if applicable.""" + if api_response.payload.loras is not None: + loras = [] + for lora in api_response.payload.loras: + trigger_inject_choice = LORA_TRIGGER_INJECT_CHOICE.NO_INJECT + + if lora.inject_trigger is not None: + trigger_inject_choice = LORA_TRIGGER_INJECT_CHOICE.FUZZY_POSITIVE + + if lora.is_version: + loras.append( + LoRaEntry( + name=None, + release_version=None, + remote_version_id=lora.name, + source=KNOWN_AUX_MODEL_SOURCE.CIVITAI, + model_strength=lora.model, + clip_strength=lora.clip, + lora_inject_trigger_choice=trigger_inject_choice, + lora_triggers=[lora.inject_trigger] if lora.inject_trigger is not None else None, + ), + ) + else: + loras.append( + LoRaEntry( + name=lora.name, + release_version=None, + remote_version_id=None, + source=KNOWN_AUX_MODEL_SOURCE.CIVITAI, + model_strength=lora.model, + clip_strength=lora.clip, + lora_inject_trigger_choice=trigger_inject_choice, + lora_triggers=[lora.inject_trigger] if lora.inject_trigger is not None else None, + ), + ) + + return loras + + return None + + +def _get_ti_params(api_response: ImageGenerateJobPopResponse) -> list[TIEntry] | None: + """Get the TI parameters from the API response, if applicable.""" + if api_response.payload.tis is not None: + tis = [] + for ti in api_response.payload.tis: + ti_trigger_inject_choice = TI_TRIGGER_INJECT_CHOICE.NO_INJECT + + if ti.inject_ti == "prompt": + ti_trigger_inject_choice = TI_TRIGGER_INJECT_CHOICE.POSITIVE_PROMPT + elif ti.inject_ti == "negprompt": + ti_trigger_inject_choice = TI_TRIGGER_INJECT_CHOICE.NEGATIVE_PROMPT + + tis.append( + TIEntry( + name=ti.name, + remote_version_id=None, + source=KNOWN_AUX_MODEL_SOURCE.HORDELING, + ti_inject_trigger_choice=ti_trigger_inject_choice, + model_strength=ti.strength, + ), + ) + + return tis + + return None + + +def convert_image_job_pop_response_to_parameters( + api_response: ImageGenerateJobPopResponse, + model_reference_manager: ModelReferenceManager, +) -> tuple[ImageGenerationParameters, AIHordeR2DispatchParameters]: + """Convert an API response to the parameters for a generation.""" + if api_response.model is None: + raise ValueError("Model is required for generation.") + + model_record = model_reference_manager.image_generation_models.get(api_response.model) + model_baseline: KNOWN_IMAGE_GENERATION_BASELINE | None = None + + if model_record is not None: + try: + model_baseline = KNOWN_IMAGE_GENERATION_BASELINE(model_record.baseline) + except ValueError: + logger.debug( + f"Invalid baseline {model_record.baseline} for model {api_response.model}. Using None instead.", + ) + model_baseline = None + + # model_version = model_record.version # TODO + + prompt = api_response.payload.prompt + if not prompt or prompt.isspace(): + raise ValueError("Prompt is required for generation.") + + base_params = BasicImageGenerationParameters( + model=api_response.model, + model_baseline=model_baseline, + # model_version=model_version, + # model_filename=None, # TODO + # model_hash=None, # TODO + prompt=prompt, + seed=api_response.payload.seed, + height=api_response.payload.height, + width=api_response.payload.width, + steps=api_response.payload.ddim_steps, + cfg_scale=api_response.payload.cfg_scale, + sampler_name=api_response.payload.sampler_name, + scheduler=KNOWN_IMAGE_SCHEDULERS.karras if api_response.payload.karras else KNOWN_IMAGE_SCHEDULERS.normal, + clip_skip=api_response.payload.clip_skip, + denoising_strength=api_response.payload.denoising_strength, + tiling=api_response.payload.tiling, + ) + + img2img_params: Image2ImageGenerationParameters | None = _get_img2img_params(api_response) + remix_params: RemixGenerationParameters | None = _get_remix_params(api_response) + controlnet_params: ControlnetGenerationParameters | None = _get_controlnet_params(api_response) + hires_fix_params: HiresFixGenerationParameters | None = _get_hires_fix_params(api_response, model_baseline) + custom_workflow_params: CustomWorkflowGenerationParameters | None = _get_custom_workflow_params(api_response) + + loras: list[LoRaEntry] | None = _get_lora_params(api_response) + tis: list[TIEntry] | None = _get_ti_params(api_response) + + raw_uuids = [id_.root for id_ in api_response.ids] + + additional_params: list[ + Image2ImageGenerationParameters + | RemixGenerationParameters + | ControlnetGenerationParameters + | HiresFixGenerationParameters + | LoRaEntry + | TIEntry + | CustomWorkflowGenerationParameters + ] = [] + + if img2img_params is not None: + additional_params.append(img2img_params) + if remix_params is not None: + additional_params.append(remix_params) + if controlnet_params is not None: + additional_params.append(controlnet_params) + if hires_fix_params is not None: + additional_params.append(hires_fix_params) + if loras is not None: + additional_params.extend(loras) + if tis is not None: + additional_params.extend(tis) + if custom_workflow_params is not None: + additional_params.append(custom_workflow_params) + + image_generation_parameters = ImageGenerationParameters( + result_ids=raw_uuids, + batch_size=api_response.payload.n_iter, + source_processing=api_response.source_processing, + base_params=base_params, + additional_params=ImageGenerationComponentContainer( + components=additional_params, + ), + ) + + r2_upload_url_map = {} + + if api_response.r2_upload is not None: + r2_upload_url_map[api_response.id_] = api_response.r2_upload + elif api_response.r2_uploads is not None: + r2_upload_url_map = dict(zip(api_response.ids, api_response.r2_uploads, strict=True)) + else: + raise ValueError("No R2 upload URL found in the API response.") + + ai_horde_dispatch_parameters = AIHordeR2DispatchParameters( + generation_ids=[GenerationID(root=uuid_) for uuid_ in raw_uuids], + dispatch_source=KNOWN_DISPATCH_SOURCE.AI_HORDE_API_OFFICIAL, + ttl=api_response.ttl, + inference_backend=KNOWN_INFERENCE_BACKEND.COMFYUI, + requested_backend_constraints=REQUESTED_BACKEND_CONSTRAINTS.SPECIFIED, + no_valid_request_found_reasons=api_response.skipped, + source_image_fallback_choice=REQUESTED_SOURCE_IMAGE_FALLBACK_CHOICE.TXT2IMG_FALLBACK, + r2_upload_url_map=r2_upload_url_map, + ) + + return image_generation_parameters, ai_horde_dispatch_parameters diff --git a/horde_sdk/worker/dispatch/ai_horde/multimodal/__init__.py b/horde_sdk/worker/dispatch/ai_horde/multimodal/__init__.py new file mode 100644 index 00000000..2ebd44df --- /dev/null +++ b/horde_sdk/worker/dispatch/ai_horde/multimodal/__init__.py @@ -0,0 +1 @@ +"""The AI-Horde API specific parsing module for multimodal generation.""" diff --git a/horde_sdk/worker/dispatch/ai_horde/pop_strategy.py b/horde_sdk/worker/dispatch/ai_horde/pop_strategy.py new file mode 100644 index 00000000..f917f31f --- /dev/null +++ b/horde_sdk/worker/dispatch/ai_horde/pop_strategy.py @@ -0,0 +1,121 @@ +from typing import override + +from horde_model_reference.model_reference_manager import ModelReferenceManager + +from horde_sdk import KNOWN_DISPATCH_SOURCE, RequestErrorResponse +from horde_sdk.ai_horde_api.ai_horde_clients import AIHordeAPIAsyncClientSession, AIHordeAPIClientSession +from horde_sdk.ai_horde_api.apimodels import ImageGenerateJobPopRequest, ImageGenerateJobPopResponse +from horde_sdk.consts import WORKER_TYPE +from horde_sdk.generation_parameters import ImageGenerationParameters +from horde_sdk.utils import default_bridge_agent_string +from horde_sdk.worker.dispatch.ai_horde.bridge_data import ImageWorkerBridgeData +from horde_sdk.worker.dispatch.ai_horde.image.convert import convert_image_job_pop_response_to_parameters +from horde_sdk.worker.dispatch.pop_strategy import JobPopStrategyGeneric +from horde_sdk.worker.generations import ( + ImageSingleGeneration, +) +from horde_sdk.worker.jobs import ImageWorkerJob + + +class AIHordeImageWorkerJobPopStrategy(JobPopStrategyGeneric[ImageSingleGeneration, ImageGenerationParameters]): + """Job pop strategy for AI Horde image worker jobs.""" + + _image_worker_bridge_data: ImageWorkerBridgeData + _bridge_agent_string: str = default_bridge_agent_string + + _sync_client_session: AIHordeAPIClientSession | None + _async_client_session: AIHordeAPIAsyncClientSession | None + + _model_reference_manager: ModelReferenceManager + + def __init__( + self, + default_job_pop_time_spacing: float = JobPopStrategyGeneric._default_job_pop_time_spacing, + *, + image_worker_bridge_data: ImageWorkerBridgeData, + bridge_agent_string: str = default_bridge_agent_string, + sync_client_session: AIHordeAPIClientSession | None = None, + async_client_session: AIHordeAPIAsyncClientSession | None = None, + model_reference_manager: ModelReferenceManager, + ) -> None: + """Initialize the AI Horde image worker job pop strategy. + + Args: + default_job_pop_time_spacing (float): Default minimum time spacing between job pops in seconds. + image_worker_bridge_data (ImageWorkerBridgeData): The bridge data for the image worker. + bridge_agent_string (str): The bridge agent string to use for the worker. + sync_client_session (AIHordeAPIClientSession | None): Optional synchronous client session for API calls. + async_client_session (AIHordeAPIAsyncClientSession | None): Optional asynchronous client session for API + calls. + model_reference_manager (ModelReferenceManager): The model reference manager for handling model references. + """ + super().__init__(default_job_pop_time_spacing) + + self._bridge_agent_string = bridge_agent_string + + self._image_worker_bridge_data = image_worker_bridge_data + + self._sync_client_session = sync_client_session + self._async_client_session = async_client_session + + self._model_reference_manager = model_reference_manager + + @override + def get_worker_type(self) -> WORKER_TYPE: + return WORKER_TYPE.image + + @override + def get_dispatch_source(self) -> KNOWN_DISPATCH_SOURCE: + return KNOWN_DISPATCH_SOURCE.AI_HORDE_API_OFFICIAL + + @override + def pop_job(self) -> ImageWorkerJob | None: + if self._sync_client_session is None: + raise ValueError("Synchronous client session is not available.") + + job_pop_request = ImageGenerateJobPopRequest( + apikey=self._image_worker_bridge_data.api_key, + name=self._image_worker_bridge_data.dreamer_worker_name, + models=self._image_worker_bridge_data.image_models_to_load, + max_pixels=self._image_worker_bridge_data.max_pixels, + bridge_agent=self._bridge_agent_string, + blacklist=self._image_worker_bridge_data.blacklist, + nsfw=self._image_worker_bridge_data.nsfw, + threads=self._image_worker_bridge_data.max_threads, + require_upfront_kudos=self._image_worker_bridge_data.require_upfront_kudos, + allow_img2img=self._image_worker_bridge_data.allow_img2img, + allow_painting=self._image_worker_bridge_data.allow_inpainting, + allow_unsafe_ipaddr=self._image_worker_bridge_data.allow_unsafe_ip, + allow_post_processing=self._image_worker_bridge_data.allow_post_processing, + allow_controlnet=self._image_worker_bridge_data.allow_controlnet, + allow_sdxl_controlnet=self._image_worker_bridge_data.allow_sdxl_controlnet, + extra_slow_worker=self._image_worker_bridge_data.extra_slow_worker, + limit_max_steps=self._image_worker_bridge_data.limit_max_steps, + allow_lora=self._image_worker_bridge_data.allow_lora, + amount=self._image_worker_bridge_data.max_batch, + ) + + job_pop_response = self._sync_client_session.submit_request( + job_pop_request, + ImageGenerateJobPopResponse, + ) + + if isinstance(job_pop_response, RequestErrorResponse): + print(f"Error popping job: {job_pop_response.message}") + return None + + generation, ai_horde_dispatch_parameters = convert_image_job_pop_response_to_parameters( + api_response=job_pop_response, + model_reference_manager=self._model_reference_manager, + ) + + raise NotImplementedError("Conversion from job pop response to parameters is not implemented.") # FIXME + + return ImageWorkerJob( + generation=generation, + ai_horde_dispatch_parameters=ai_horde_dispatch_parameters, + ) + + @override + async def async_pop_job(self) -> ImageWorkerJob | None: + pass diff --git a/horde_sdk/worker/dispatch/ai_horde/text/__init__.py b/horde_sdk/worker/dispatch/ai_horde/text/__init__.py new file mode 100644 index 00000000..4abc9317 --- /dev/null +++ b/horde_sdk/worker/dispatch/ai_horde/text/__init__.py @@ -0,0 +1 @@ +"""The AI-Horde API specific parsing module for text generation.""" diff --git a/horde_sdk/worker/dispatch/ai_horde/text/convert.py b/horde_sdk/worker/dispatch/ai_horde/text/convert.py new file mode 100644 index 00000000..ec2e1e7b --- /dev/null +++ b/horde_sdk/worker/dispatch/ai_horde/text/convert.py @@ -0,0 +1,63 @@ +"""Contains functions to convert API responses to text generation parameters.""" + +from horde_sdk.ai_horde_api.apimodels.generate.text.pop import TextGenerateJobPopResponse +from horde_sdk.consts import KNOWN_DISPATCH_SOURCE, KNOWN_INFERENCE_BACKEND +from horde_sdk.generation_parameters.text import ( + BasicTextGenerationFormatParameters, + BasicTextGenerationParameters, + KoboldAITextGenerationParameters, +) +from horde_sdk.worker.consts import ( + REQUESTED_BACKEND_CONSTRAINTS, +) +from horde_sdk.worker.dispatch.ai_horde_parameters import AIHordeDispatchParameters + + +def convert_text_job_pop_response_to_parameters( + api_response: TextGenerateJobPopResponse, +) -> tuple[KoboldAITextGenerationParameters, AIHordeDispatchParameters]: + """Convert a text API response to the parameters for a generation.""" + dispatch_parameters = AIHordeDispatchParameters( + generation_ids=[str(id_) for id_ in api_response.ids], + dispatch_source=KNOWN_DISPATCH_SOURCE.AI_HORDE_API_OFFICIAL, + ttl=api_response.ttl, + inference_backend=KNOWN_INFERENCE_BACKEND.IN_MODEL_NAME, + requested_backend_constraints=REQUESTED_BACKEND_CONSTRAINTS.ANY, + no_valid_request_found_reasons=api_response.skipped, + ) + + generation_parameters = KoboldAITextGenerationParameters( + result_ids=[str(id_) for id_ in api_response.ids], + base_params=BasicTextGenerationParameters( + model=api_response.model, + prompt=api_response.payload.prompt, + soft_prompt=api_response.softprompt, + max_context_length=api_response.payload.max_context_length, + max_length=api_response.payload.max_length, + stop_sequence=api_response.payload.stop_sequence, + temperature=api_response.payload.temperature, + dynamic_temp_exponent=api_response.payload.dynatemp_exponent, + dynamic_temp_range=api_response.payload.dynatemp_range, + tfs=api_response.payload.tfs, + typical=api_response.payload.typical, + sampler_order=api_response.payload.sampler_order, + smoothing_factor=api_response.payload.smoothing_factor, + top_a=api_response.payload.top_a, + top_k=api_response.payload.top_k, + top_p=api_response.payload.top_p, + min_p=api_response.payload.min_p, + rep_pen=api_response.payload.rep_pen, + rep_pen_range=api_response.payload.rep_pen_range, + rep_pen_slope=api_response.payload.rep_pen_slope, + ), + format_params=BasicTextGenerationFormatParameters( + frmtadsnsp=api_response.payload.frmtadsnsp, + frmtrmblln=api_response.payload.frmtrmblln, + frmtrmspch=api_response.payload.frmtrmspch, + frmttriminc=api_response.payload.frmttriminc, + singleline=api_response.payload.singleline, + ), + use_default_bad_words_ids=api_response.payload.use_default_badwordsids, + ) + + return generation_parameters, dispatch_parameters diff --git a/horde_sdk/worker/dispatch/ai_horde_parameters.py b/horde_sdk/worker/dispatch/ai_horde_parameters.py new file mode 100644 index 00000000..96a10752 --- /dev/null +++ b/horde_sdk/worker/dispatch/ai_horde_parameters.py @@ -0,0 +1,35 @@ +"""Contains the implemented classes that encapsulate the parameters for the dispatching of a task to a worker. + +This includes meta-parameters that are API-specific (ids), pertain to API expectations (time to live, etc), +or otherwise required for the worker to complete the task (such as r2 upload URLs). +""" + +from collections.abc import Mapping + +from horde_sdk.ai_horde_api.apimodels import NoValidRequestFound +from horde_sdk.ai_horde_api.fields import GenerationID +from horde_sdk.generic_api.apimodels import HordeAPIObjectBaseModel +from horde_sdk.worker.consts import REQUESTED_SOURCE_IMAGE_FALLBACK_CHOICE +from horde_sdk.worker.dispatch.base import DispatchParameterBase + + +class AIHordeDispatchParameters(DispatchParameterBase): + """Base class for all AI Horde dispatch parameter sets.""" + + generation_ids: list[GenerationID] + """The UUIDs for this generation.""" + + no_valid_request_found_reasons: NoValidRequestFound | HordeAPIObjectBaseModel # FIXME + """The reasons why no valid request was found for this worker (as in, no job to dispatch to this worker).""" + + source_image_fallback_choice: REQUESTED_SOURCE_IMAGE_FALLBACK_CHOICE | str = ( + REQUESTED_SOURCE_IMAGE_FALLBACK_CHOICE.ABANDON + ) + """The source image fallback choice to use for this job.""" + + +class AIHordeR2DispatchParameters(AIHordeDispatchParameters): + """Dispatch parameters for R2 tasks.""" + + r2_upload_url_map: Mapping[GenerationID, str] + """The map of GenerationID to R2 upload URLs for this job.""" diff --git a/horde_sdk/worker/dispatch/base.py b/horde_sdk/worker/dispatch/base.py new file mode 100644 index 00000000..60729a3b --- /dev/null +++ b/horde_sdk/worker/dispatch/base.py @@ -0,0 +1,35 @@ +"""Contains the class bases that encapsulate the parameters for the dispatching of a task to a worker. + +This includes meta-parameters that are API-specific (ids), pertain to API expectations (time to live, etc), +or otherwise required for the worker to complete the task (such as r2 upload URLs). +""" + +from __future__ import annotations + +from typing import Any + +from pydantic import BaseModel + +from horde_sdk.consts import KNOWN_DISPATCH_SOURCE, KNOWN_INFERENCE_BACKEND +from horde_sdk.worker.consts import ( + REQUESTED_BACKEND_CONSTRAINTS, +) + + +class DispatchParameterBase(BaseModel): + """Base class for all dispatch parameter sets.""" + + generation_ids: list[Any] + """The identifiers for this generation. The request is expected to yield as many results as there are IDs.""" + + dispatch_source: KNOWN_DISPATCH_SOURCE | str = KNOWN_DISPATCH_SOURCE.LOCAL_CUSTOM_3RD_PARTY + """The source of the dispatch request. Defaults to KNOWN_DISPATCH_SOURCE.LOCAL_CUSTOM_3RD_PARTY.""" + + ttl: int | None = None + """The amount of seconds before this job is considered stale and aborted on the server. Defaults to None.""" + + inference_backend: KNOWN_INFERENCE_BACKEND | str | None = None + """The inference backend to use for this job. Defaults to None.""" + + requested_backend_constraints: REQUESTED_BACKEND_CONSTRAINTS | str = REQUESTED_BACKEND_CONSTRAINTS.ANY + """User/server request constraints on which backend to use. Defaults to REQUESTED_BACKEND_CONSTRAINTS.ANY.""" diff --git a/horde_sdk/worker/dispatch/pop_strategy.py b/horde_sdk/worker/dispatch/pop_strategy.py new file mode 100644 index 00000000..1ff8cc60 --- /dev/null +++ b/horde_sdk/worker/dispatch/pop_strategy.py @@ -0,0 +1,60 @@ +from abc import ABC, abstractmethod +from typing import Any + +from horde_sdk import KNOWN_DISPATCH_SOURCE +from horde_sdk.consts import WORKER_TYPE +from horde_sdk.generation_parameters.generic import CompositeParametersBase +from horde_sdk.worker.generations_base import HordeSingleGeneration +from horde_sdk.worker.job_base import HordeWorkerJob + + +class JobPopStrategyGeneric[SingleGenerationTypeVar, ComposedParameterSetTypeVar](ABC): + """Abstract base class for job pop strategies.""" + + _default_job_pop_time_spacing: float = 1.0 + """Default minimum time spacing between job pops in seconds.""" + + def __init__( + self, + default_job_pop_time_spacing: float = _default_job_pop_time_spacing, + ) -> None: + """Initialize the job pop strategy.""" + self._default_job_pop_time_spacing = default_job_pop_time_spacing + + @abstractmethod + def get_worker_type(self) -> WORKER_TYPE: + """Get the worker type associated with this job pop strategy. + + Returns: + WORKER_TYPE: The worker type. + """ + + @abstractmethod + def get_dispatch_source(self) -> KNOWN_DISPATCH_SOURCE | str: + """Get the dispatch source associated with this job pop strategy. + + Returns: + KNOWN_DISPATCH_SOURCE | str: The dispatch source. + """ + + @abstractmethod + def pop_job(self) -> HordeWorkerJob[HordeSingleGeneration[Any], CompositeParametersBase] | None: + """Pop a job synchronously from the dispatch source. + + Use `async_pop_job` for asynchronous operations. + + Returns: + HordeWorkerJob[HordeSingleGeneration[Any], CompositeParametersBase] | None: The popped job or `None` if + no job is available. + """ + + @abstractmethod + async def async_pop_job(self) -> HordeWorkerJob[HordeSingleGeneration[Any], CompositeParametersBase] | None: + """Pop a job asynchronously from the dispatch source. + + Use `pop_job` if you prefer synchronous operations. + + Returns: + HordeWorkerJob[HordeSingleGeneration[Any], CompositeParametersBase] | None: The popped job or `None` if + no job is available. + """ diff --git a/horde_sdk/worker/exceptions.py b/horde_sdk/worker/exceptions.py new file mode 100644 index 00000000..9153ece3 --- /dev/null +++ b/horde_sdk/worker/exceptions.py @@ -0,0 +1,36 @@ +from horde_sdk.consts import ID_TYPES +from horde_sdk.worker.consts import GENERATION_PROGRESS + + +class GenerationStateException(Exception): + """Raised when there is an error in the generation state.""" + + def __init__(self, message: str) -> None: + """Initialize the generation state exception with a given message.""" + super().__init__(message) + + +class GenerationStateErrorLimitExceeded(GenerationStateException): + """Raised when the generation state exceeds the allowed error limit.""" + + def __init__( + self, + generation_id: ID_TYPES, + error_limit: int, + last_non_error_state: GENERATION_PROGRESS, + ) -> None: + """Initialize the state error limit exceeded exception. + + Args: + generation_id (ID_TYPES): The unique identifier for the generation. + error_limit (int): The maximum number of errors allowed. + last_non_error_state (GENERATION_PROGRESS): The last non-error state of the generation. + """ + message = ( + f"Generation {generation_id} has exceeded the error limit of {error_limit}. " + f"Last non-error state was {last_non_error_state}." + ) + super().__init__(message) + self.generation_id = generation_id + self.error_limit = error_limit + self.last_non_error_state = last_non_error_state diff --git a/horde_sdk/worker/feature_flags.py b/horde_sdk/worker/feature_flags.py new file mode 100644 index 00000000..fe5718f2 --- /dev/null +++ b/horde_sdk/worker/feature_flags.py @@ -0,0 +1,501 @@ +from abc import ABC, abstractmethod +from enum import auto +from typing import TypeVar, override + +from horde_model_reference.meta_consts import KNOWN_IMAGE_GENERATION_BASELINE +from loguru import logger +from pydantic import BaseModel, Field +from strenum import StrEnum + +from horde_sdk import get_default_frozen_model_config_dict +from horde_sdk.generation_parameters.alchemy.consts import ( + is_caption_form, + is_facefixer_form, + is_interrogator_form, + is_nsfw_detector_form, + is_upscaler_form, +) +from horde_sdk.generation_parameters.alchemy.object_models import AlchemyFeatureFlags +from horde_sdk.generation_parameters.generic.object_models import GenerationFeatureFlags +from horde_sdk.generation_parameters.image.consts import ( + CLIP_SKIP_REPRESENTATION, + KNOWN_IMAGE_SAMPLERS, + KNOWN_IMAGE_SCHEDULERS, +) +from horde_sdk.generation_parameters.image.object_models import ImageGenerationFeatureFlags + +ReasonTypeVar = TypeVar("ReasonTypeVar", bound=str) + + +class RESULT_RETURN_METHOD(StrEnum): + """The method of returning results from a worker.""" + + base64_post_back = auto() + """Base64 post back in the 'job completed' message.""" + + base64_post_back_with_url = auto() + """Base64 post back to a given URL without results in the 'job completed' message.""" + + byte_stream = auto() + """Byte stream to a given URL without results in the 'job completed' message.""" + + local_write_to_file = auto() + """Can write to the local filesystem for jobs originating locally or within a closed environment.""" + + +class WorkerFeatureFlags[ReasonTypeVar: str](ABC, BaseModel): + """Feature flags for a worker.""" + + model_config = get_default_frozen_model_config_dict() + + supported_result_return_methods: list[RESULT_RETURN_METHOD] = Field(default_factory=list) + """The methods of returning results supported by the worker.""" + + supports_threads: bool = Field(default=False) + """Whether the worker supports threading.""" + + def is_capable_of_features(self, features: GenerationFeatureFlags) -> bool: + """Check if the worker is capable of handling the requested features. + + Args: + features (GenerationFeatureFlags): The features to check. + + Returns: + bool: True if the worker is capable of handling the requested features, False otherwise. + """ + return not self.reasons_not_capable_of_features(features) + + @abstractmethod + def get_not_capable_reason_type(self) -> type[ReasonTypeVar]: + """Return the type of the reason for not being capable of handling the requested features. + + Returns: + type[ReasonTypeVar]: The (python) type of the reason for not being capable of handling the requested + features. + """ + + @abstractmethod + def reasons_not_capable_of_features( + self, + features: GenerationFeatureFlags, + ) -> list[ReasonTypeVar] | None: + """Return a list of reasons why the worker is not capable of handling the requested features. + + Args: + features (GenerationFeatureFlags): The features to check. + + Returns: + list[str] | None: A list of reasons why the worker is not capable of handling the requested features, + or None if the worker is capable. + """ + + +class PerBaselineFeatureFlags(BaseModel): + """Feature flags for a worker per baseline.""" + + model_config = get_default_frozen_model_config_dict() + + schedulers_map: dict[KNOWN_IMAGE_GENERATION_BASELINE | str, list[KNOWN_IMAGE_SCHEDULERS | str]] | None = Field( + default=None, + examples=[ + { + KNOWN_IMAGE_GENERATION_BASELINE.stable_diffusion_1: [ + KNOWN_IMAGE_SCHEDULERS.simple, + KNOWN_IMAGE_SCHEDULERS.normal, + KNOWN_IMAGE_SCHEDULERS.exponential, + ], + KNOWN_IMAGE_GENERATION_BASELINE.stable_diffusion_xl: [ + KNOWN_IMAGE_SCHEDULERS.simple, + ], + }, + ], + ) + """If set, the supported schedulers for each baseline. If unset, it is assumed that all baselines + support all schedulers.""" + + samplers_map: dict[KNOWN_IMAGE_GENERATION_BASELINE | str, list[KNOWN_IMAGE_SAMPLERS | str]] | None = Field( + default=None, + examples=[ + { + KNOWN_IMAGE_GENERATION_BASELINE.stable_diffusion_1: [ + KNOWN_IMAGE_SAMPLERS.k_lms, + KNOWN_IMAGE_SAMPLERS.k_dpm_2, + KNOWN_IMAGE_SAMPLERS.k_euler, + ], + KNOWN_IMAGE_GENERATION_BASELINE.stable_diffusion_xl: [ + KNOWN_IMAGE_SAMPLERS.k_lms, + ], + }, + ], + ) + """If set, the supported samplers for each baseline. If unset, it is assumed that all baselines + support all samplers.""" + + tiling_map: dict[KNOWN_IMAGE_GENERATION_BASELINE | str, bool] | None = Field( + default=None, + examples=[ + { + KNOWN_IMAGE_GENERATION_BASELINE.stable_diffusion_1: True, + KNOWN_IMAGE_GENERATION_BASELINE.stable_diffusion_xl: False, + }, + ], + ) + """If set, the supported tiling for each baseline. If unset, it is assumed that all baselines + support tiling.""" + + hires_fix_map: dict[KNOWN_IMAGE_GENERATION_BASELINE | str, bool] | None = Field( + default=None, + examples=[ + { + KNOWN_IMAGE_GENERATION_BASELINE.stable_diffusion_1: True, + KNOWN_IMAGE_GENERATION_BASELINE.stable_diffusion_xl: True, + KNOWN_IMAGE_GENERATION_BASELINE.flux_1: False, + }, + ], + ) + """If set, the supported hires fix for each baseline. If unset, it is assumed that all baselines + support hires fix.""" + + controlnet_map: dict[KNOWN_IMAGE_GENERATION_BASELINE | str, bool] | None = Field( + default=None, + examples=[ + { + KNOWN_IMAGE_GENERATION_BASELINE.stable_diffusion_1: True, + KNOWN_IMAGE_GENERATION_BASELINE.stable_diffusion_xl: False, + }, + ], + ) + """If set, support for controlnet for each baseline. If unset, it is assumed that all baselines + support controlnets.""" + + tis_map: dict[KNOWN_IMAGE_GENERATION_BASELINE | str, bool] | None = Field( + default=None, + examples=[ + { + KNOWN_IMAGE_GENERATION_BASELINE.stable_diffusion_1: True, + KNOWN_IMAGE_GENERATION_BASELINE.stable_diffusion_xl: False, + }, + ], + ) + """If set, support for TIs for each baseline. If unset, it is assumed that all baselines support + TIs.""" + + loras_map: dict[KNOWN_IMAGE_GENERATION_BASELINE | str, bool] | None = Field( + default=None, + examples=[ + { + KNOWN_IMAGE_GENERATION_BASELINE.stable_diffusion_1: True, + KNOWN_IMAGE_GENERATION_BASELINE.stable_diffusion_xl: False, + }, + ], + ) + """If set, support for Loras for each baseline. If unset, it is assumed that all baselines + support Loras.""" + + +class IMAGE_WORKER_NOT_CAPABLE_REASON(StrEnum): + """Reasons why a worker is not capable of handling a request.""" + + clip_skip = auto() + """The worker does not support clip skip.""" + + samplers = auto() + """The worker does not support the requested samplers.""" + + schedulers = auto() + """The worker does not support the requested schedulers.""" + + tiling = auto() + """The worker does not support tiling.""" + + hires_fix = auto() + """The worker does not support hires fix.""" + + controlnets = auto() + """The worker does not support controlnets.""" + + tis = auto() + """The worker does not support TIs.""" + + loras = auto() + """The worker does not support Loras.""" + + extra_texts = auto() + """The worker does not support extra texts.""" + + extra_source_images = auto() + """The worker does not support extra source images.""" + + unsupported_baseline = auto() + """The worker does not support the requested baseline.""" + + +class ImageWorkerFeatureFlags(WorkerFeatureFlags[IMAGE_WORKER_NOT_CAPABLE_REASON]): + """Feature flags for an image worker.""" + + image_generation_feature_flags: ImageGenerationFeatureFlags + """The image generation feature flags for the worker.""" + + per_baseline_feature_flags: PerBaselineFeatureFlags | None = None + """The per baseline feature flags for the worker. This includes the supported schedulers and + samplers for each baseline.""" + + backend_clip_skip_representation: CLIP_SKIP_REPRESENTATION | None = None + """The clip skip representation supported.""" + + @override + def get_not_capable_reason_type(self) -> type[IMAGE_WORKER_NOT_CAPABLE_REASON]: + return IMAGE_WORKER_NOT_CAPABLE_REASON + + @override + def reasons_not_capable_of_features( + self, + request: GenerationFeatureFlags, + ) -> list[IMAGE_WORKER_NOT_CAPABLE_REASON] | None: + """Return a list of reasons why a worker is not capable of handling a request.""" + if not isinstance(request, ImageGenerationFeatureFlags): + logger.debug(f"Request is not an ImageGenerationFeatureFlags instance. Request type: {type(request)}") + return None + + reasons = [] + + if request.clip_skip and not self.image_generation_feature_flags.clip_skip: + reasons.append(IMAGE_WORKER_NOT_CAPABLE_REASON.clip_skip) + + if not self.worker_supports_requested_samplers(request): + reasons.append(IMAGE_WORKER_NOT_CAPABLE_REASON.samplers) + + if not self.worker_supports_requested_schedulers(request): + reasons.append(IMAGE_WORKER_NOT_CAPABLE_REASON.schedulers) + + if not self.worker_supports_requested_tiling(request): + reasons.append(IMAGE_WORKER_NOT_CAPABLE_REASON.tiling) + + if not self.worker_supports_requested_hires_fix(request): + reasons.append(IMAGE_WORKER_NOT_CAPABLE_REASON.hires_fix) + + if not self.worker_supports_requested_controlnets(request): + reasons.append(IMAGE_WORKER_NOT_CAPABLE_REASON.controlnets) + + if not self.worker_supports_requested_tis(request): + reasons.append(IMAGE_WORKER_NOT_CAPABLE_REASON.tis) + + if not self.worker_supports_requested_loras(request): + reasons.append(IMAGE_WORKER_NOT_CAPABLE_REASON.loras) + + if request.extra_texts and not self.image_generation_feature_flags.extra_texts: + reasons.append(IMAGE_WORKER_NOT_CAPABLE_REASON.extra_texts) + + if request.extra_source_images and not self.image_generation_feature_flags.extra_source_images: + reasons.append(IMAGE_WORKER_NOT_CAPABLE_REASON.extra_source_images) + + if ( + request.baselines + and self.image_generation_feature_flags.baselines + and (not any(baseline in self.image_generation_feature_flags.baselines for baseline in request.baselines)) + ): + reasons.append(IMAGE_WORKER_NOT_CAPABLE_REASON.unsupported_baseline) + + return reasons if reasons else None + + def worker_supports_requested_samplers( + self, + request: ImageGenerationFeatureFlags, + ) -> bool: + """Return True if the worker supports the samplers requested.""" + if self.per_baseline_feature_flags and self.per_baseline_feature_flags.samplers_map: + for baseline in request.baselines or []: + if any( + sampler not in self.per_baseline_feature_flags.samplers_map.get(baseline, []) + for sampler in request.samplers + ): + return False + else: + if any(sampler not in self.image_generation_feature_flags.samplers for sampler in request.samplers): + return False + return True + + def worker_supports_requested_schedulers( + self, + request: ImageGenerationFeatureFlags, + ) -> bool: + """Return True if the worker supports the schedulers requested.""" + if self.per_baseline_feature_flags and self.per_baseline_feature_flags.schedulers_map: + for baseline in request.baselines or []: + if any( + scheduler not in self.per_baseline_feature_flags.schedulers_map.get(baseline, []) + for scheduler in request.schedulers + ): + return False + else: + if any( + scheduler not in self.image_generation_feature_flags.schedulers for scheduler in request.schedulers + ): + return False + return True + + def worker_supports_requested_tiling( + self, + request: ImageGenerationFeatureFlags, + ) -> bool: + """Return True if the worker supports tiling.""" + if self.per_baseline_feature_flags and self.per_baseline_feature_flags.tiling_map: + for baseline in request.baselines or []: + if not self.per_baseline_feature_flags.tiling_map.get(baseline, True): + return False + else: + if request.tiling and not self.image_generation_feature_flags.tiling: + return False + return True + + def worker_supports_requested_hires_fix( + self, + request: ImageGenerationFeatureFlags, + ) -> bool: + """Return True if the worker supports hires fix.""" + if self.per_baseline_feature_flags and self.per_baseline_feature_flags.hires_fix_map: + for baseline in request.baselines or []: + if not self.per_baseline_feature_flags.hires_fix_map.get(baseline, True): + return False + else: + if request.hires_fix and not self.image_generation_feature_flags.hires_fix: + return False + return True + + def worker_supports_requested_controlnets( + self, + request: ImageGenerationFeatureFlags, + ) -> bool: + """Return True if the worker supports controlnets.""" + if not request.controlnets_feature_flags: + return True + + if not self.image_generation_feature_flags.controlnets_feature_flags: + return False + + if self.per_baseline_feature_flags and self.per_baseline_feature_flags.controlnet_map: + for baseline in request.baselines or []: + if not self.per_baseline_feature_flags.controlnet_map.get(baseline, True): + return False + + if ( + request.controlnets_feature_flags.image_is_control + and not self.image_generation_feature_flags.controlnets_feature_flags.image_is_control + ): + return False + + if ( # noqa SIM103: For readability, we return this False directly + request.controlnets_feature_flags.return_control_map + and not self.image_generation_feature_flags.controlnets_feature_flags.return_control_map + ): + return False + + return True + + def worker_supports_requested_tis( + self, + request: ImageGenerationFeatureFlags, + ) -> bool: + """Return True if the worker supports TIs.""" + if self.per_baseline_feature_flags and self.per_baseline_feature_flags.tis_map: + for baseline in request.baselines or []: + if not self.per_baseline_feature_flags.tis_map.get(baseline, True): + return False + else: + if request.tis and not self.image_generation_feature_flags.tis: + return False + return True + + def worker_supports_requested_loras( + self, + request: ImageGenerationFeatureFlags, + ) -> bool: + """Return True if the worker supports Loras.""" + if self.per_baseline_feature_flags and self.per_baseline_feature_flags.loras_map: + for baseline in request.baselines or []: + if not self.per_baseline_feature_flags.loras_map.get(baseline, True): + return False + else: + if request.loras and not self.image_generation_feature_flags.loras: + return False + return True + + +# class TextWorkerFeatureFlags(WorkerFeatureFlags[TEXT_WORKER_NOT_CAPABLE_REASON]): +# """Feature flags for a text worker.""" + + +class ALCHEMY_WORKER_NOT_CAPABLE_REASON(StrEnum): + """Reasons why a worker is not capable of handling an alchemy request.""" + + unsupported_upscaler = auto() + """The worker does not support a requested upscaler.""" + + unsupported_facefixer = auto() + """The worker does not support a requested facefixer.""" + + unsupported_interrogator = auto() + """The worker does not support a requested interrogator.""" + + unsupported_caption_model = auto() + """The worker does not support a requested caption model.""" + + unsupported_nsfw_detector = auto() + """The worker does not support a requested NSFW detector.""" + + unsupported_misc = auto() + """The worker does not support a requested miscellaneous feature.""" + + +class AlchemyWorkerFeatureFlags(WorkerFeatureFlags[ALCHEMY_WORKER_NOT_CAPABLE_REASON]): + """Feature flags for an alchemy worker.""" + + alchemy_feature_flags: AlchemyFeatureFlags + + @override + def get_not_capable_reason_type(self) -> type[ALCHEMY_WORKER_NOT_CAPABLE_REASON]: + return ALCHEMY_WORKER_NOT_CAPABLE_REASON + + @override + def reasons_not_capable_of_features( + self, + request: GenerationFeatureFlags, + ) -> list[ALCHEMY_WORKER_NOT_CAPABLE_REASON] | None: + """Return a list of reasons why a worker is not capable of handling an alchemy request.""" + if not isinstance(request, AlchemyFeatureFlags): + logger.debug(f"Request is not an AlchemyFeatureFlags instance. Request type: {type(request)}") + return None + + if not self.alchemy_feature_flags: + logger.debug("Worker does not have alchemy feature flags.") + return None + + if not request.alchemy_types: + logger.debug("Request does not have alchemy types.") + return None + + reasons = [] + + for alchemy_type in request.alchemy_types: + if alchemy_type not in self.alchemy_feature_flags.alchemy_types: + if is_upscaler_form(alchemy_type): + reasons.append(ALCHEMY_WORKER_NOT_CAPABLE_REASON.unsupported_upscaler) + elif is_facefixer_form(alchemy_type): + reasons.append(ALCHEMY_WORKER_NOT_CAPABLE_REASON.unsupported_facefixer) + elif is_interrogator_form(alchemy_type): + reasons.append(ALCHEMY_WORKER_NOT_CAPABLE_REASON.unsupported_interrogator) + elif is_caption_form(alchemy_type): + reasons.append(ALCHEMY_WORKER_NOT_CAPABLE_REASON.unsupported_caption_model) + elif is_nsfw_detector_form(alchemy_type): + reasons.append(ALCHEMY_WORKER_NOT_CAPABLE_REASON.unsupported_nsfw_detector) + else: + reasons.append(ALCHEMY_WORKER_NOT_CAPABLE_REASON.unsupported_misc) + return reasons if reasons else None + + +# class AudioWorkerFeatureFlags(WorkerFeatureFlags[AUDIO_WORKER_NOT_CAPABLE_REASON]): +# """Feature flags for an audio worker.""" + + +# class VideoWorkerFeatureFlags(WorkerFeatureFlags[VIDEO_WORKER_NOT_CAPABLE_REASON]): +# """Feature flags for a video worker.""" diff --git a/horde_sdk/worker/generations.py b/horde_sdk/worker/generations.py new file mode 100644 index 00000000..e94132cf --- /dev/null +++ b/horde_sdk/worker/generations.py @@ -0,0 +1,538 @@ +from __future__ import annotations + +from collections.abc import Mapping, Sequence +from typing import TypedDict, override + +from loguru import logger + +from horde_sdk.consts import ID_TYPES +from horde_sdk.generation_parameters import BasicImageGenerationParametersTemplate +from horde_sdk.generation_parameters.alchemy import SingleAlchemyParameters +from horde_sdk.generation_parameters.alchemy.object_models import SingleAlchemyParametersTemplate +from horde_sdk.generation_parameters.image import ImageGenerationParameters +from horde_sdk.generation_parameters.image.object_models import ( + ImageGenerationComponentContainer, + ImageGenerationParametersTemplate, +) +from horde_sdk.generation_parameters.text import TextGenerationParameters +from horde_sdk.generation_parameters.text.object_models import ( + BasicTextGenerationParametersTemplate, + TextGenerationParametersTemplate, +) +from horde_sdk.generation_parameters.utils import ResultIdAllocator +from horde_sdk.safety import SafetyRules, default_image_safety_rules, default_text_safety_rules +from horde_sdk.worker.consts import ( + GENERATION_PROGRESS, + HordeWorkerConfigDefaults, + base_generate_progress_no_submit_transitions, + base_generate_progress_transitions, + default_alchemy_generate_progress_no_submit_transitions, + default_alchemy_generate_progress_transitions, + default_image_generate_progress_no_submit_transitions, + default_image_generate_progress_transitions, + default_text_generate_progress_no_submit_transitions, + default_text_generate_progress_transitions, +) +from horde_sdk.worker.generations_base import HordeSingleGeneration + + +class ImageGenerationInitKwargs(TypedDict, total=False): + """Optional keyword arguments accepted by `ImageSingleGeneration` from template helpers.""" + + generation_id: ID_TYPES | None + dispatch_result_ids: Sequence[ID_TYPES] | None + result_ids: list[ID_TYPES] | None + requires_submit: bool + safety_rules: SafetyRules + black_box_mode: bool + state_error_limits: Mapping[GENERATION_PROGRESS, int] | None + strict_transition_mode: bool + extra_logging: bool + + +class AlchemyGenerationInitKwargs(TypedDict, total=False): + """Optional keyword arguments accepted by `AlchemySingleGeneration` factory flows.""" + + generation_id: str | None + dispatch_result_ids: Sequence[ID_TYPES] | None + result_ids: list[ID_TYPES] | None + requires_generation: bool + requires_post_processing: bool + requires_safety_check: bool + requires_submit: bool + safety_rules: SafetyRules + black_box_mode: bool + state_error_limits: Mapping[GENERATION_PROGRESS, int] | None + strict_transition_mode: bool + extra_logging: bool + + +class TextGenerationInitKwargs(TypedDict, total=False): + """Optional keyword arguments accepted by `TextSingleGeneration` helpers.""" + + generation_id: ID_TYPES | None + dispatch_result_ids: Sequence[ID_TYPES] | None + result_ids: list[ID_TYPES] | None + requires_post_processing: bool + requires_safety_check: bool + requires_submit: bool + safety_rules: SafetyRules + black_box_mode: bool + state_error_limits: Mapping[GENERATION_PROGRESS, int] | None + strict_transition_mode: bool + extra_logging: bool + + +def _stringify_id(identifier: ID_TYPES | None) -> str | None: + if identifier is None: + return None + return str(identifier) + + +class ImageSingleGeneration(HordeSingleGeneration[bytes]): + """A single image generation. + + **Not to be confused with a job**, which can contain multiple generations. + """ + + @override + @classmethod + def default_generate_progress_transitions(cls) -> dict[GENERATION_PROGRESS, list[GENERATION_PROGRESS]]: + return default_image_generate_progress_transitions + + @override + @classmethod + def default_generate_progress_transitions_no_submit( + cls, + ) -> dict[GENERATION_PROGRESS, list[GENERATION_PROGRESS]]: + return default_image_generate_progress_no_submit_transitions + + @override + @classmethod + def does_class_require_generation(cls) -> bool: + return True + + @override + @classmethod + def does_class_require_safety_check(cls) -> bool: + return True + + generation_parameters: ImageGenerationParameters + + def __init__( + self, + *, + generation_parameters: ImageGenerationParameters, + generation_id: ID_TYPES | None = None, + dispatch_result_ids: Sequence[ID_TYPES] | None = None, + result_ids: list[ID_TYPES] | None = None, + requires_submit: bool = True, + safety_rules: SafetyRules = default_image_safety_rules, + black_box_mode: bool = False, + state_error_limits: ( + Mapping[GENERATION_PROGRESS, int] | None + ) = HordeWorkerConfigDefaults.DEFAULT_STATE_ERROR_LIMITS, + strict_transition_mode: bool = HordeWorkerConfigDefaults.DEFAULT_GENERATION_STRICT_TRANSITION_MODE, + extra_logging: bool = False, + ) -> None: + """Initialize the generation. + + Args: + generation_parameters (ImageGenerationParameters): The parameters for the generation. + generation_id (str | None): The unique identifier for the generation. \ + If None, a random UUID will be generated. + dispatch_result_ids (Sequence[str | uuid.UUID] | None): Result identifiers assigned by dispatch, if + available. + result_ids (list[ID_TYPES] | None): The unique identifiers for the generation. \ + If None, a random UUID will be generated for each result. + requires_submit (bool, optional): Whether the generation requires submission. \ + Defaults to True. + safety_rules (SafetyRules, optional): The safety rules to apply to the generation. \ + Defaults to default_image_safety_rules from `horde_sdk.safety`. + black_box_mode (bool, optional): Whether the generation is in black box mode. \ + This removes all of the intermediate states between starting and finished states. \ + This should only be used when the backend has no observability into the generation process. \ + Defaults to False. + state_error_limits (Mapping[GENERATION_PROGRESS, int], optional): The maximum number of times a \ + generation can be in an error state before it is considered failed. \ + Defaults to HordeWorkerConfigDefaults.DEFAULT_STATE_ERROR_LIMITS. + strict_transition_mode (bool, optional): Whether or not to enforce strict transition checking. \ + This prevents setting the same state multiple times in a row. \ + Defaults to True. + extra_logging (bool, optional): Whether or not to enable extra debug-level logging, \ + especially for state transitions. \ + Defaults to True. + """ + generate_progress_transitions = self.default_generate_progress_transitions() + + if not requires_submit: + generate_progress_transitions = self.default_generate_progress_transitions_no_submit() + + if result_ids is None and generation_parameters.result_ids is not None: + result_ids = generation_parameters.result_ids + logger.trace( + f"Result IDs were not provided, using result IDs from generation parameters: {result_ids}", + extra={"generation_id": generation_id}, + ) + elif result_ids is not None and generation_parameters.result_ids is not None: + logger.warning( + "Both result IDs and generation parameters result IDs were provided. Using the provided result IDs.", + extra={"generation_id": generation_id}, + ) + + super().__init__( + result_type=bytes, + generation_parameters=generation_parameters, + generation_id=generation_id, + dispatch_result_ids=dispatch_result_ids, + result_ids=result_ids, + requires_generation=ImageSingleGeneration.does_class_require_generation(), + requires_post_processing=generation_parameters.alchemy_params is not None, + requires_safety_check=True, + requires_submit=requires_submit, + safety_rules=safety_rules, + state_error_limits=state_error_limits, + generate_progress_transitions=generate_progress_transitions, + black_box_mode=black_box_mode, + strict_transition_mode=strict_transition_mode, + extra_logging=extra_logging, + ) + + @classmethod + def from_template( + cls, + template: ImageGenerationParametersTemplate, + *, + generation_id: ID_TYPES | None = None, + dispatch_result_ids: Sequence[ID_TYPES] | None = None, + base_param_updates: BasicImageGenerationParametersTemplate | None = None, + additional_param_updates: ImageGenerationComponentContainer | None = None, + result_ids: Sequence[ID_TYPES] | None = None, + allocator: ResultIdAllocator | None = None, + seed: str = "image", + init_kwargs: ImageGenerationInitKwargs | None = None, + ) -> ImageSingleGeneration: + """Instantiate an image generation from a template.""" + generation_parameters = template.to_parameters( + base_param_updates=base_param_updates, + result_ids=result_ids, + allocator=allocator, + seed=seed, + ) + resolved_kwargs: ImageGenerationInitKwargs = {} + if init_kwargs: + resolved_kwargs.update(init_kwargs) + if generation_id is not None: + resolved_kwargs.setdefault("generation_id", generation_id) + if dispatch_result_ids is not None: + resolved_kwargs.setdefault("dispatch_result_ids", list(dispatch_result_ids)) + resolved_kwargs.setdefault("result_ids", generation_parameters.result_ids) + return cls( + generation_parameters=generation_parameters, + **resolved_kwargs, + ) + + +class AlchemySingleGeneration(HordeSingleGeneration[bytes]): + """A single alchemy generation. Alchemy is generally transformative or analytical in nature. + + 'Generation' is used more broadly here than in the context of AI generation for the sake of naming consistency. + + Generally an input might be an image and the output could be anything, such as the input `image` upscaled with a \ + model, caption `text`, or whether the image is NSFW or not (`bool`). + + **Not to be confused with a job**, which can contain multiple generations. + """ + + @override + @classmethod + def default_generate_progress_transitions(cls) -> dict[GENERATION_PROGRESS, list[GENERATION_PROGRESS]]: + return default_alchemy_generate_progress_transitions + + @override + @classmethod + def default_generate_progress_transitions_no_submit( + cls, + ) -> dict[GENERATION_PROGRESS, list[GENERATION_PROGRESS]]: + return default_alchemy_generate_progress_no_submit_transitions + + @override + @classmethod + def does_class_require_generation(cls) -> bool: + return False + + @override + @classmethod + def does_class_require_safety_check(cls) -> bool: + return False + + generation_parameters: SingleAlchemyParameters + """The parameters for the generation.""" + + def __init__( + self, + *, + generation_parameters: SingleAlchemyParameters, + generation_id: str | None = None, + dispatch_result_ids: Sequence[ID_TYPES] | None = None, + result_ids: list[ID_TYPES] | None = None, + requires_generation: bool = False, + requires_post_processing: bool = True, + requires_safety_check: bool = False, + requires_submit: bool = True, + safety_rules: SafetyRules = default_image_safety_rules, + black_box_mode: bool = False, + state_error_limits: ( + Mapping[GENERATION_PROGRESS, int] | None + ) = HordeWorkerConfigDefaults.DEFAULT_STATE_ERROR_LIMITS, + strict_transition_mode: bool = HordeWorkerConfigDefaults.DEFAULT_GENERATION_STRICT_TRANSITION_MODE, + extra_logging: bool = False, + ) -> None: + """Initialize the generation. + + Args: + generation_parameters (SingleAlchemyParameters): The parameters for the generation. + generation_id (str | None): The unique identifier for the generation. \ + If None, a random UUID will be generated. + dispatch_result_ids (Sequence[str | uuid.UUID] | None): Result identifiers assigned by dispatch, if + available. + result_ids (list[ID_TYPES] | None): The unique identifiers for the generation. \ + If None, a random UUID will be generated for each result. + requires_generation (bool, optional): Whether the generation requires generation. \ + Defaults to False. + requires_post_processing (bool, optional): Whether the generation requires post-processing. \ + Defaults to True. + requires_safety_check (bool, optional): Whether the generation requires a safety check. \ + Defaults to False. + requires_submit (bool, optional): Whether the generation requires submission. \ + Defaults to True. + safety_rules (SafetyRules, optional): The safety rules to apply to the generation. \ + Defaults to default_image_safety_rules from `horde_sdk.safety`. + black_box_mode (bool, optional): Whether the generation is in black box mode. \ + This removes all of the intermediate states between starting and finished states. \ + This should only be used when the backend has no observability into the generation process. \ + Defaults to False. + state_error_limits (Mapping[GENERATION_PROGRESS, int], optional): The maximum number of times a \ + generation can be in an error state before it is considered failed. \ + Defaults to HordeWorkerConfigDefaults.DEFAULT_STATE_ERROR_LIMITS. + strict_transition_mode (bool, optional): Whether or not to enforce strict transition checking. \ + This prevents setting the same state multiple times in a row. \ + Defaults to True. + extra_logging (bool, optional): Whether or not to enable extra debug-level logging, \ + especially for state transitions. \ + Defaults to True. + """ + generate_progress_transitions = self.default_generate_progress_transitions() + + if not requires_safety_check and not requires_submit: + generate_progress_transitions = self.default_generate_progress_transitions_no_submit() + elif requires_safety_check and not requires_submit: + generate_progress_transitions = base_generate_progress_no_submit_transitions + elif requires_safety_check and requires_submit: + generate_progress_transitions = base_generate_progress_transitions + + super().__init__( + generation_parameters=generation_parameters, + result_type=bytes, + generation_id=generation_id, + dispatch_result_ids=dispatch_result_ids, + result_ids=result_ids, + requires_generation=requires_generation, + requires_post_processing=requires_post_processing, + requires_safety_check=requires_safety_check, + requires_submit=requires_submit, + safety_rules=safety_rules, + state_error_limits=state_error_limits, + generate_progress_transitions=generate_progress_transitions, + black_box_mode=black_box_mode, + strict_transition_mode=strict_transition_mode, + extra_logging=extra_logging, + ) + + @classmethod + def from_template( + cls, + template: SingleAlchemyParametersTemplate, + *, + source_image: bytes | str | None = None, + default_form: str | None = None, + generation_id: ID_TYPES | None = None, + dispatch_result_ids: Sequence[ID_TYPES] | None = None, + result_id: ID_TYPES | None = None, + allocator: ResultIdAllocator | None = None, + seed: str = "alchemy", + init_kwargs: AlchemyGenerationInitKwargs | None = None, + ) -> AlchemySingleGeneration: + """Instantiate an alchemy generation from a template.""" + generation_parameters = template.to_parameters( + result_id=result_id, + source_image=source_image, + default_form=default_form, + allocator=allocator, + seed=seed, + ) + resolved_kwargs: AlchemyGenerationInitKwargs = {} + if init_kwargs: + resolved_kwargs.update(init_kwargs) + if generation_id is not None: + resolved_kwargs.setdefault("generation_id", _stringify_id(generation_id)) + else: + resolved_kwargs.setdefault("generation_id", _stringify_id(generation_parameters.result_id)) + if dispatch_result_ids is not None: + resolved_kwargs.setdefault( + "dispatch_result_ids", + [str(identifier) for identifier in dispatch_result_ids], + ) + resolved_kwargs.setdefault("result_ids", [generation_parameters.result_id]) + return cls( + generation_parameters=generation_parameters, + **resolved_kwargs, + ) + + +class TextSingleGeneration(HordeSingleGeneration[str]): + """A single text generation. + + **Not to be confused with a job**, which can contain multiple generations. + """ + + @override + @classmethod + def default_generate_progress_transitions(cls) -> dict[GENERATION_PROGRESS, list[GENERATION_PROGRESS]]: + return default_text_generate_progress_transitions + + @override + @classmethod + def default_generate_progress_transitions_no_submit( + cls, + ) -> dict[GENERATION_PROGRESS, list[GENERATION_PROGRESS]]: + return default_text_generate_progress_no_submit_transitions + + @override + @classmethod + def does_class_require_generation(cls) -> bool: + return True + + @override + @classmethod + def does_class_require_safety_check(cls) -> bool: + return False + + generation_parameters: TextGenerationParameters + """The parameters for the generation.""" + + def __init__( + self, + *, + generation_parameters: TextGenerationParameters, + generation_id: ID_TYPES | None = None, + dispatch_result_ids: Sequence[ID_TYPES] | None = None, + result_ids: list[ID_TYPES] | None = None, + requires_generation: bool = True, + requires_post_processing: bool = False, + requires_safety_check: bool = False, + requires_submit: bool = True, + safety_rules: SafetyRules = default_text_safety_rules, + black_box_mode: bool = False, + state_error_limits: ( + Mapping[GENERATION_PROGRESS, int] | None + ) = HordeWorkerConfigDefaults.DEFAULT_STATE_ERROR_LIMITS, + strict_transition_mode: bool = HordeWorkerConfigDefaults.DEFAULT_GENERATION_STRICT_TRANSITION_MODE, + extra_logging: bool = False, + ) -> None: + """Initialize the generation. + + Args: + generation_parameters (TextGenerationParameters): The parameters for the generation. + generation_id (str | None): The unique identifier for the generation. \ + If None, a random UUID will be generated. + dispatch_result_ids (Sequence[str | uuid.UUID] | None): Result identifiers assigned by dispatch, if + available. + result_ids (list[ID_TYPES] | None): The unique identifiers for the generation. \ + If None, a random UUID will be generated for each result. + requires_generation (bool, optional): Whether the generation requires generation. \ + Defaults to True. + requires_post_processing (bool, optional): Whether the generation requires post-processing. \ + Defaults to False. + requires_safety_check (bool, optional): Whether the generation requires a safety check. \ + Defaults to False. + requires_submit (bool, optional): Whether the generation requires submission. \ + Defaults to True. + safety_rules (SafetyRules, optional): The safety rules to apply to the generation. \ + Defaults to default_text_safety_rules from `horde_sdk.safety`. + black_box_mode (bool, optional): Whether the generation is in black box mode. \ + This removes all of the intermediate states between starting and finished states. \ + This should only be used when the backend has no observability into the generation process. \ + Defaults to False. + state_error_limits (Mapping[GENERATION_PROGRESS, int], optional): The maximum number of times a \ + generation can be in an error state before it is considered failed. \ + Defaults to HordeWorkerConfigDefaults.DEFAULT_STATE_ERROR_LIMITS. + strict_transition_mode (bool, optional): Whether or not to enforce strict transition checking. \ + This prevents setting the same state multiple times in a row. \ + Defaults to True. + extra_logging (bool, optional): Whether or not to enable extra debug-level logging, \ + especially for state transitions. \ + Defaults to True. + """ + if requires_generation is False: + raise ValueError("requires_generation must be True for TextSingleGeneration") + + generate_progress_transitions = self.default_generate_progress_transitions() + + if not requires_safety_check and not requires_submit: + generate_progress_transitions = self.default_generate_progress_transitions_no_submit() + if requires_safety_check and requires_submit: + generate_progress_transitions = base_generate_progress_transitions + elif requires_safety_check and not requires_submit: + generate_progress_transitions = base_generate_progress_no_submit_transitions + + super().__init__( + result_type=str, + generation_parameters=generation_parameters, + generation_id=generation_id, + dispatch_result_ids=dispatch_result_ids, + result_ids=result_ids, + requires_generation=True, + requires_post_processing=requires_post_processing, + requires_safety_check=requires_safety_check, + requires_submit=requires_submit, + safety_rules=safety_rules, + state_error_limits=state_error_limits, + generate_progress_transitions=generate_progress_transitions, + black_box_mode=black_box_mode, + strict_transition_mode=strict_transition_mode, + extra_logging=extra_logging, + ) + + @classmethod + def from_template( + cls, + template: TextGenerationParametersTemplate, + *, + generation_id: ID_TYPES | None = None, + dispatch_result_ids: Sequence[ID_TYPES] | None = None, + base_param_updates: BasicTextGenerationParametersTemplate | None = None, + result_ids: Sequence[ID_TYPES] | None = None, + allocator: ResultIdAllocator | None = None, + seed: str = "text", + init_kwargs: TextGenerationInitKwargs | None = None, + ) -> TextSingleGeneration: + """Instantiate a text generation from a template.""" + generation_parameters = template.to_parameters( + base_param_updates=base_param_updates, + result_ids=result_ids, + allocator=allocator, + seed=seed, + ) + resolved_kwargs: TextGenerationInitKwargs = {} + if init_kwargs: + resolved_kwargs.update(init_kwargs) + if generation_id is not None: + resolved_kwargs.setdefault("generation_id", generation_id) + if dispatch_result_ids is not None: + resolved_kwargs.setdefault("dispatch_result_ids", list(dispatch_result_ids)) + resolved_kwargs.setdefault("result_ids", generation_parameters.result_ids) + return cls( + generation_parameters=generation_parameters, + **resolved_kwargs, + ) diff --git a/horde_sdk/worker/generations_base.py b/horde_sdk/worker/generations_base.py new file mode 100644 index 00000000..0afef4e0 --- /dev/null +++ b/horde_sdk/worker/generations_base.py @@ -0,0 +1,948 @@ +from __future__ import annotations + +import threading +import time +import uuid +from abc import ABC +from collections import OrderedDict +from collections.abc import Callable, Collection, Iterable, Mapping, Sequence +from enum import auto +from typing import TypeVar + +from loguru import logger +from strenum import StrEnum + +from horde_sdk.consts import ID_TYPES +from horde_sdk.generation_parameters.generic import CompositeParametersBase +from horde_sdk.safety import SafetyResult, SafetyRules, default_safety_rules +from horde_sdk.worker.consts import ( + GENERATION_PROGRESS, + HordeWorkerConfigDefaults, + base_generate_progress_no_submit_transitions, + base_generate_progress_transitions, + black_box_generate_progress_transitions, + validate_generation_progress_transitions, +) +from horde_sdk.worker.exceptions import GenerationStateErrorLimitExceeded + +GenerationResultTypeVar = TypeVar("GenerationResultTypeVar") + + +class InputCollectionConstraint(StrEnum): + """Types of constraints for inputs collections.""" + + NONE = auto() + """No constraints.""" + single_only = auto() + """Single input only.""" + multiple_only = auto() + """Multiple inputs only.""" + single_or_multiple = auto() + """Single or multiple inputs allowed.""" + + +class InputConstraintType(StrEnum): + """Types of constraints for inputs.""" + + IMAGE = auto() + """Image input only.""" + TEXT = auto() + """Text input only.""" + AUDIO = auto() + """Audio input only.""" + VIDEO = auto() + """Video input only.""" + SPECIFIC_CLASS_TYPE = auto() + """Specific class type input only.""" + + +class HordeSingleGeneration[GenerationResultTypeVar](ABC): + """Base class for tracking a generation. Generations are specific instances of inference or computation. + + This should not be confused with specific results, which are the output of a generation. For example, a generation + could result in several images, but as the result of a single round of inference. The generation is the process of + generating the images, while the images are the result of that generation. + + See `GENERATION_PROGRESS` for the possible states a generation can be in. + """ + + _generate_progress_transitions: Mapping[GENERATION_PROGRESS, Iterable[GENERATION_PROGRESS]] + _state_error_limits: Mapping[GENERATION_PROGRESS, int] + + @classmethod + def default_generate_progress_transitions(cls) -> Mapping[GENERATION_PROGRESS, Iterable[GENERATION_PROGRESS]]: + """Get the default generation progress transitions.""" + return base_generate_progress_transitions + + @classmethod + def default_generate_progress_transitions_no_submit( + cls, + ) -> Mapping[GENERATION_PROGRESS, Iterable[GENERATION_PROGRESS]]: + """Get the default generation progress transitions without submission.""" + return base_generate_progress_no_submit_transitions + + @classmethod + def default_interrupt_states(cls) -> set[GENERATION_PROGRESS]: + """Get the default interrupt states.""" + return {GENERATION_PROGRESS.ABORTED, GENERATION_PROGRESS.USER_REQUESTED_ABORT, GENERATION_PROGRESS.ABANDONED} + + @classmethod + def does_class_require_generation(cls) -> bool: + """Whether or not the generation steps are mandatory for this generation class.""" + return True + + @classmethod + def does_class_require_safety_check(cls) -> bool: + """Whether or not the generation requires a safety check.""" + return True + + _extra_logging: bool = True + + _result_type: type[GenerationResultTypeVar] + + @property + def result_type(self) -> type[GenerationResultTypeVar]: + """The type of the result of the generation.""" + return self._result_type + + _batch_size: int = 1 + """The batch size of the generation.""" + + @property + def batch_size(self) -> int: + """The batch size of the generation.""" + return self._batch_size + + _result_ids: list[ID_TYPES] + + @property + def result_ids(self) -> list[ID_TYPES]: + """The unique identifiers for the generations in the batch.""" + with self._lock: + return self._result_ids.copy() + + _lock: threading.RLock + + _strict_transition_mode: bool + + _black_box_mode: bool + + @property + def black_box_mode(self) -> bool: + """Whether or not the generation is in black box mode.""" + return self._black_box_mode + + _registered_callbacks: dict[GENERATION_PROGRESS, list[Callable[[], None]]] + + def register_callback( + self, + state: GENERATION_PROGRESS, + callback: Callable[[], None], + ) -> None: + """Register a callback to be called when the generation state is updated. + + Args: + state (GENERATION_PROGRESS): The state to register the callback for. + callback (Callable[[], None]): The callback to call when the state is updated. + """ + with self._lock: + self._registered_callbacks[state].append(callback) + + _dispatch_result_ids: list[ID_TYPES] | None + + def __init__( + self, + *, + generation_parameters: CompositeParametersBase, + result_type: type[GenerationResultTypeVar] | None = None, + generation_id: ID_TYPES | None = None, + dispatch_result_ids: Sequence[ID_TYPES] | None = None, + result_ids: list[ID_TYPES] | None = None, + requires_generation: bool = True, + requires_post_processing: bool = False, + requires_safety_check: bool = True, + requires_submit: bool = True, + safety_rules: SafetyRules = default_safety_rules, + state_error_limits: ( + Mapping[GENERATION_PROGRESS, int] | None + ) = HordeWorkerConfigDefaults.DEFAULT_STATE_ERROR_LIMITS, + generate_progress_transitions: Mapping[ + GENERATION_PROGRESS, + Iterable[GENERATION_PROGRESS], + ] = base_generate_progress_transitions, + black_box_mode: bool = False, + strict_transition_mode: bool = True, + extra_logging: bool = True, + ) -> None: + """Initialize the generation. + + Args: + generation_parameters (ComposedParameterSetBase): The parameters for the generation. + result_type (type): The type of the result of the generation. + generation_id (str | None): The unique identifier for the generation. \ + If None, a new UUID will be generated. + dispatch_result_ids (Sequence[str | uuid.UUID] | None): Identifiers provided by the dispatch system for + each result slot. Defaults to None when the generation has no remote counterpart yet. + result_ids (list[ID_TYPES] | None): The unique identifiers for the results of the generation. + If None, a new UUID will be generated for each generation in the batch. + requires_generation (bool, optional): Whether or not the generation requires generation (inference, etc). \ + Defaults to True. + requires_post_processing (bool, optional): Whether or not the generation requires post-processing. \ + Defaults to False. + requires_safety_check (bool, optional): Whether or not the generation requires a safety check. \ + Defaults to True. + requires_submit (bool, optional): Whether or not the generation requires submission. \ + Defaults to True. + safety_rules (SafetyRules, optional): The rules to use for safety checking. \ + Defaults to `default_safety_rules` from `horde_sdk.safety`. + state_error_limits (Mapping[GENERATION_PROGRESS, int], optional): The maximum number of times a \ + generation can be in an error state before it is considered failed. \ + Defaults to None. + generate_progress_transitions (dict[GenerationProgress, list[GenerationProgress]], optional): The \ + transitions that are allowed between generation states. \ + Defaults to `base_generate_progress_transitions` (found in consts.py). + black_box_mode (bool, optional): Whether the generation is in black box mode. \ + This removes all of the intermediate states between starting and finished states. \ + This should only be used when the backend has no observability into the generation process. \ + Defaults to False. + strict_transition_mode (bool, optional): Whether or not to enforce strict transition checking. \ + This prevents setting the same state multiple times in a row. \ + Defaults to True. + extra_logging (bool, optional): Whether or not to enable extra debug-level logging, \ + especially for state transitions. \ + Defaults to True. + + Raises: + ValueError: If result_type is None. + ValueError: If the result type is not iterable but the batch size is greater than 1. + ValueError: If result_ids is not None and its length does not match the batch size. + ValueError: If generate_progress_transitions is None. + ValueError: If the generation class requires generation but requires_generation is False. + ValueError: If the generation class requires a safety check but requires_safety_check is False. + """ + if result_type is None: + raise ValueError("result_type cannot be None") + + if generation_id is None: + logger.debug("Generation ID is None, creating a new one.") + generation_id = uuid.uuid4() + + self.generation_id = generation_id + self._dispatch_result_ids = list(dispatch_result_ids) if dispatch_result_ids is not None else None + self.generation_parameters = generation_parameters + + self._result_type = result_type + self._batch_size = generation_parameters.get_number_expected_results() + + if self._batch_size > 1 and not issubclass( + self._result_type, + Iterable, + ): + raise ValueError( + f"Result type {self._result_type} is not iterable, but batch size is {self._batch_size}", + ) + if result_ids is not None and len(result_ids) != self._batch_size: + raise ValueError( + f"Batch IDs length {len(result_ids)} does not match batch size {self._batch_size}", + ) + + if result_ids is None: + logger.trace("Batch IDs are None, creating new ones.") + result_ids = [uuid.uuid4() for _ in range(self._batch_size)] + + self._result_ids = result_ids + + self._generation_results: OrderedDict[ID_TYPES, GenerationResultTypeVar | None] = OrderedDict() + + self._extra_logging = extra_logging + + if self.does_class_require_generation() and not requires_generation: + raise ValueError("Generation class requires generation but requires_generation is False") + + self._requires_generation = requires_generation + self._requires_post_processing = requires_post_processing + + if self.does_class_require_safety_check() and not requires_safety_check: + raise ValueError("Generation class requires safety check but requires_safety_check is False") + + self._requires_safety_check = requires_safety_check + self._safety_results: list[SafetyResult | None] = [None] * self._batch_size + self._safety_rules = safety_rules + + self._requires_submit = requires_submit + + if generate_progress_transitions is None and not black_box_mode: + raise ValueError("generate_progress_transitions cannot be None") + + self._black_box_mode = black_box_mode + + if black_box_mode: + if generate_progress_transitions != base_generate_progress_transitions: + logger.trace( + "Black box mode is enabled, overriding generate_progress_transitions with " + "black_box_generate_progress_transitions.", + ) + self._generate_progress_transitions = black_box_generate_progress_transitions + else: + if not validate_generation_progress_transitions(generate_progress_transitions): + raise ValueError( + "Invalid generate_progress_transitions provided. " + "Please ensure it is a valid mapping of GENERATION_PROGRESS to iterable of GENERATION_PROGRESS." + "See horde_sdk.worker.consts for the default transitions.", + ) + self._generate_progress_transitions = generate_progress_transitions + + self._errored_states = [] + self._error_counts = {} + + self._state_error_limits = state_error_limits or {} + self._generation_failed_messages = [] + self._generation_failure_exceptions = [] + + # This initialization is critical. The first state must be NOT_STARTED and ERROR must not be the next state. + # Errors are only allowed after the first "action" state where something is done. + self._progress_history = [(GENERATION_PROGRESS.NOT_STARTED, time.time())] + + self._lock = threading.RLock() + + self._strict_transition_mode = strict_transition_mode + + self._registered_callbacks = {} + + for state in self._generate_progress_transitions: + self._registered_callbacks[state] = [] + + generation_id: ID_TYPES + """Unique identifier for the generation.""" + + @property + def dispatch_result_ids(self) -> list[ID_TYPES] | None: + """Identifiers supplied by the dispatch source, if any.""" + with self._lock: + if self._dispatch_result_ids is None: + return None + return self._dispatch_result_ids.copy() + + def set_dispatch_result_ids(self, dispatch_result_ids: Sequence[ID_TYPES] | None) -> None: + """Bind the generation to the result identifiers supplied by dispatch.""" + with self._lock: + self._dispatch_result_ids = list(dispatch_result_ids) if dispatch_result_ids is not None else None + + @property + def short_id(self) -> str: + """Get a short identifier for the generation.""" + return str(self.generation_id)[:8] + + generation_parameters: CompositeParametersBase + """The parameters for the generation.""" + + _generation_failure_count: int = 0 + """The number of times the generation has failed.""" + + @property + def generation_failure_count(self) -> int: + """The number of times the generation has failed during any step of the generation process.""" + with self._lock: + return self._generation_failure_count + + def get_generation_progress(self) -> GENERATION_PROGRESS: + """Return the state of the generation.""" + with self._lock: + return self._generation_progress + + _progress_history: list[tuple[GENERATION_PROGRESS, float]] + """A list of tuples containing all of the generation states and the time they were set.""" + + _errored_states: list[tuple[GENERATION_PROGRESS, float]] + _error_counts: dict[GENERATION_PROGRESS, int] + _any_error_count_exceeded: bool = False + + @property + def errored_states(self) -> list[tuple[GENERATION_PROGRESS, float]]: + """Return a tuple of states which occurred just before an error state and the time they were set.""" + with self._lock: + return self._errored_states.copy() + + @property + def error_counts(self) -> dict[GENERATION_PROGRESS, int]: + """Return a dictionary of states and the number of times they occurred before an error state.""" + with self._lock: + return self._error_counts.copy() + + def get_progress_history(self) -> list[tuple[GENERATION_PROGRESS, float]]: + """Get the generation progress history.""" + with self._lock: + return self._progress_history.copy() + + _generation_progress: GENERATION_PROGRESS = GENERATION_PROGRESS.NOT_STARTED + + _generation_failed_messages: list[str] + """The reasons the generation failed.""" + _generation_failure_exceptions: list[Exception] + """The exceptions that caused the generation to fail.""" + + _requires_post_processing: bool = False + + @property + def requires_post_processing(self) -> bool: + """Whether or not the generation requires post-processing.""" + return self._requires_post_processing + + _requires_generation: bool = False + + @property + def requires_generation(self) -> bool: + """Whether or not the generation requires generation.""" + return self._requires_generation + + _requires_safety_check: bool = False + + @property + def requires_safety_check(self) -> bool: + """Whether or not the generation requires a safety check.""" + return self._requires_safety_check + + _requires_submit: bool = False + """Whether or not the generation requires submission.""" + + @property + def requires_submit(self) -> bool: + """Whether or not the generation requires submission.""" + return self._requires_submit + + def _extra_log(self) -> Callable[[str], None]: + """Log a message at debug level if extra logging is enabled or trace level if it is not.""" + if self._extra_logging: + return logger.debug + + return logger.trace + + def _add_failure_message(self, message: str, exception: Exception | None = None) -> None: + """Add a message to the list of reasons the generation failed.""" + with self._lock: + self._generation_failed_messages.append(message) + + if exception is not None: + if self._generation_failure_exceptions is None: + self._generation_failure_exceptions = [] + self._generation_failure_exceptions.append(exception) + + def get_last_non_error_state(self) -> GENERATION_PROGRESS: + """Get the last non-error state.""" + with self._lock: + for state, _ in reversed(self._progress_history): + if state != GENERATION_PROGRESS.ERROR: + return state + raise RuntimeError("No non-error state found in progress history") + + def get_last_non_error_state_and_time(self) -> tuple[GENERATION_PROGRESS, float]: + """Get the last non-error state and the time it was set.""" + with self._lock: + for state, time_set in reversed(self._progress_history): + if state != GENERATION_PROGRESS.ERROR: + return state, time_set + raise RuntimeError("No non-error state found in progress history") + + def is_next_state_valid( + self, + next_state: GENERATION_PROGRESS, + ) -> bool: + """Check if the next state is valid.""" + with self._lock: + if next_state in self.default_interrupt_states(): + return True + + current_state = self._generation_progress + + if self._strict_transition_mode: + if current_state == next_state: + return False + + if self._any_error_count_exceeded: + return False + + if current_state == GENERATION_PROGRESS.ERROR and next_state == GENERATION_PROGRESS.ERROR: + return False + + if current_state == GENERATION_PROGRESS.ERROR and len(self._progress_history) < 2: + return False + + state_error_count = self._error_counts.get(next_state, 0) + state_error_limit = ( + self._state_error_limits.get(next_state, float("inf")) if self._state_error_limits else float("inf") + ) + error_count_exceeded = state_error_count >= state_error_limit + + if error_count_exceeded: + return False + + return True + + def _set_generation_progress( + self, + next_state: GENERATION_PROGRESS, + *, + failed_message: str | None = None, + failure_exception: Exception | None = None, + ) -> GENERATION_PROGRESS: + """Set the generation progress to the next state. + + Args: + next_state (GENERATION_PROGRESS): The state to transition to. + failed_message (str, optional): The reason the generation failed at this step. \ + Use this for errors that make the current step impossible to complete without intervention. + For example, if a sub-process is OOM killed. + + Use `add_metadata_entry` for non-critical errors. + + Defaults to None. + + failure_exception (Exception, optional): The exception that caused the generation to fail at this step. \ + Defaults to None. + + Returns: + GENERATION_PROGRESS: The new state of the generation. + + Raises: + ValueError: If the transition is invalid. + RuntimeError: If error limits have been exceeded or other state transition constraints are violated. + """ + with self._lock: + current_state = self._generation_progress + + # Handle failure messages + if next_state == GENERATION_PROGRESS.ABORTED and failed_message is None: + logger.error("Faulted reason should be set when transitioning to FAULTED") + + if failed_message is not None: + self._add_failure_message(failed_message, failure_exception) + + # Check if state is unchanged + if current_state == next_state: + if self._strict_transition_mode: + raise ValueError(f"Generation {self.generation_id} is already in state {current_state}") + logger.debug( + f"Generation {self.generation_id} is already in state {current_state}, " + f"not transitioning to {next_state}", + ) + return current_state + + transition_log_message = self._build_transition_log_message( + current_state, + next_state, + failed_message, + failure_exception, + ) + self._extra_log()(transition_log_message) + + if current_state == GENERATION_PROGRESS.ERROR and len(self._progress_history) < 2: + raise RuntimeError("Cannot transition from error state without a history!") + + last_non_error_state, last_non_error_state_time = self._get_last_non_error_state(current_state) + + if self._any_error_count_exceeded: + if self._strict_transition_mode: + if next_state not in self._state_error_limits: + raise RuntimeError( + f"Generation {self.generation_id} has exceeded the maximum number of errors " + f"but there is no limit set for {next_state}. This should be impossible.", + ) + raise GenerationStateErrorLimitExceeded( + generation_id=self.generation_id, + error_limit=self._state_error_limits[next_state], + last_non_error_state=last_non_error_state, + ) + logger.debug( + f"Generation {self.generation_id} has exceeded the maximum number of errors " + f"but is transitioning to {next_state}", + ) + + if next_state == GENERATION_PROGRESS.ERROR: + self._update_error_tracking(last_non_error_state, last_non_error_state_time) + elif current_state == GENERATION_PROGRESS.ERROR: + self._extra_log()(f"Generation {self.generation_id} last non-error state was {last_non_error_state}") + else: + self._validate_normal_transition(next_state, last_non_error_state) + + if self._check_error_limit_exceeded(next_state): + self._any_error_count_exceeded = True + raise RuntimeError( + f"Generation {self.generation_id} has exceeded the maximum number of errors " + f"for state {next_state}", + ) + + self._extra_log()(f"Generation {self.generation_id} progress history: {self._progress_history}") + self._generation_progress = next_state + self._progress_history.append((next_state, time.monotonic())) + + return next_state + + def _build_transition_log_message( + self, + current_state: GENERATION_PROGRESS, + next_state: GENERATION_PROGRESS, + failed_message: str | None, + failure_exception: Exception | None, + ) -> str: + """Build a log message describing the state transition.""" + message = f"Attempting transitioning generation {self.generation_id} from {current_state} to {next_state}. " + if failed_message is not None: + message += f"Reason: {failed_message}. " + if failure_exception is not None: + message += f"Exception: {failure_exception}. " + return message + + def _get_last_non_error_state(self, current_state: GENERATION_PROGRESS) -> tuple[GENERATION_PROGRESS, float]: + """Get the relevant previous state for transition logic.""" + for state, time_set in reversed(self._progress_history): + if state != GENERATION_PROGRESS.ERROR: + return state, time_set + + return current_state, time.monotonic() + + def _update_error_tracking(self, last_state: GENERATION_PROGRESS, last_state_time: float) -> None: + """Update error tracking when transitioning to an error state.""" + self._errored_states.append((last_state, last_state_time)) + self._error_counts[last_state] = self._error_counts.get(last_state, 0) + 1 + + def _validate_normal_transition(self, next_state: GENERATION_PROGRESS, last_state: GENERATION_PROGRESS) -> None: + """Validate a normal (non-error) state transition.""" + if next_state in self.default_interrupt_states(): + self._extra_log()(f"Generation {self.generation_id} is being interrupted by {next_state}") + elif next_state not in self._generate_progress_transitions[last_state]: + self._extra_log()( + f"{self._progress_history=}, {self._generation_progress=}, {next_state=}, " + f"{self._generate_progress_transitions=}", + ) + raise ValueError(f"Invalid transition from {last_state} to {next_state}") + elif next_state == last_state: + self._extra_log()(f"Retrying state {next_state} after error") + + def _check_error_limit_exceeded(self, state: GENERATION_PROGRESS) -> bool: + """Check if the error limit for a state has been exceeded.""" + if self._state_error_limits is None: + return False + + state_error_count = self._error_counts.get(state, 0) + state_error_limit = self._state_error_limits.get(state, float("inf")) + return state_error_count >= state_error_limit + + def on_error(self, *, failed_message: str, failure_exception: Exception | None = None) -> GENERATION_PROGRESS: + """Call when an error occurs at any point in the generation process, safety checks, or submission. + + This should be reserved for errors which make the current step **impossible** to complete. For example, if the + a sub-process is OOM killed. + + Contrast with the `add_metadata_entry` method, which is used for non-critical errors. If there is no + METADATA_TYPE for the error you encountered, you can use `failed_message` and `failure_exception` instead. + + Args: + failed_message (str): The reason the generation failed. + failure_exception (Exception, optional): The exception that caused the generation to fail. \ + Defaults to None. + + Returns: + GENERATION_PROGRESS: The new state of the generation, which will be set to `GENERATION_PROGRESS.ERROR`. + + Raises: + RuntimeError: If the generation has exceeded the maximum number of errors for the current state. + RuntimeError: If the generation is in an error state and has no previous state to transition from. + """ + with self._lock: + self._generation_failure_count += 1 + return self._set_generation_progress( + GENERATION_PROGRESS.ERROR, + failed_message=failed_message, + failure_exception=failure_exception, + ) + + def on_abort(self, *, failed_message: str, failure_exception: Exception | None = None) -> GENERATION_PROGRESS: + """Call when the generation is aborted. + + Args: + failed_message (str): The reason the generation was aborted. + failure_exception (Exception, optional): The exception that caused the generation to be aborted. \ + Defaults to None. + + Returns: + GENERATION_PROGRESS: The new state of the generation, which will be set to `GENERATION_PROGRESS.ABORTED`. + + Raises: + RuntimeError: If the generation has exceeded the maximum number of errors for the current state. + RuntimeError: If the generation is in an error state and has no previous state to transition from. + + """ + return self._set_generation_progress( + GENERATION_PROGRESS.ABORTED, + failed_message=failed_message, + failure_exception=failure_exception, + ) + + def on_preloading(self) -> GENERATION_PROGRESS: + """Call when preloading is about to begin.""" + return self._set_generation_progress(GENERATION_PROGRESS.PRELOADING) + + def on_preloading_complete(self) -> GENERATION_PROGRESS: + """Call after preloading is complete.""" + return self._set_generation_progress(GENERATION_PROGRESS.PRELOADING_COMPLETE) + + def _work_complete(self) -> GENERATION_PROGRESS: + if self._black_box_mode: + return self._generation_progress + + if self._requires_safety_check: + return self._set_generation_progress(GENERATION_PROGRESS.PENDING_SAFETY_CHECK) + + if self._requires_submit: + return self._set_generation_progress(GENERATION_PROGRESS.PENDING_SUBMIT) + + return self._set_generation_progress(GENERATION_PROGRESS.COMPLETE) + + def on_generation_work_complete( + self, + result: GenerationResultTypeVar | Collection[GenerationResultTypeVar] | None = None, + ) -> GENERATION_PROGRESS: + """Call when the generation work is complete, such as when inference is done. + + This does not mean the generation is finalized, as calling this function means that safety checks and + submission may still be pending. Examples of when this function would be called are when comfyui has + just returned an image, interrogating an image has just completed or when a text backend has just generated + text. + """ + if self.requires_post_processing and not self._black_box_mode: + return self._set_generation_progress(GENERATION_PROGRESS.PENDING_POST_PROCESSING) + + work_complete_progress = self._work_complete() + if result is not None: + self._set_generation_work_result(result) + return work_complete_progress + + def on_generating(self) -> GENERATION_PROGRESS: + """Call when the generation is about to begin.""" + return self._set_generation_progress(GENERATION_PROGRESS.GENERATING) + + def on_post_processing(self) -> GENERATION_PROGRESS: + """Call when post-processing is about to begin.""" + return self._set_generation_progress(GENERATION_PROGRESS.POST_PROCESSING) + + def on_post_processing_complete(self) -> GENERATION_PROGRESS: + """Call when post-processing is complete.""" + return self._work_complete() + + def on_pending_safety_check(self) -> GENERATION_PROGRESS: + """Call when the generation is pending safety check.""" + return self._set_generation_progress(GENERATION_PROGRESS.PENDING_SAFETY_CHECK) + + def set_work_result(self, result: GenerationResultTypeVar | Collection[GenerationResultTypeVar]) -> None: + """Set the result of the generation work. + + Args: + result (Any): The result of the generation work. + """ + self._set_generation_work_result(result) + + def on_complete(self) -> GENERATION_PROGRESS: + """Call when the generation is complete.""" + return self._set_generation_progress(GENERATION_PROGRESS.COMPLETE) + + _generation_results: OrderedDict[ID_TYPES, GenerationResultTypeVar | None] + + def on_state( + self, + state: GENERATION_PROGRESS, + ) -> GENERATION_PROGRESS: + """Call when the generation state is updated. + + Args: + state (GENERATION_PROGRESS): The new state of the generation. + """ + match state: + case GENERATION_PROGRESS.PRELOADING: + return self.on_preloading() + case GENERATION_PROGRESS.PRELOADING_COMPLETE: + return self.on_preloading_complete() + case GENERATION_PROGRESS.GENERATING: + return self.on_generating() + case GENERATION_PROGRESS.POST_PROCESSING: + return self.on_post_processing() + case GENERATION_PROGRESS.PENDING_POST_PROCESSING: + return self.on_post_processing_complete() + case GENERATION_PROGRESS.PENDING_SAFETY_CHECK: + return self.on_pending_safety_check() + case GENERATION_PROGRESS.SAFETY_CHECKING: + return self.on_safety_checking() + case GENERATION_PROGRESS.PENDING_SUBMIT: + return self.on_pending_submit() + case GENERATION_PROGRESS.SUBMITTING: + return self.on_submitting() + case GENERATION_PROGRESS.SUBMIT_COMPLETE: + return self.on_submit_complete() + case GENERATION_PROGRESS.COMPLETE: + return self.on_complete() + case _: + return self._set_generation_progress(state) + + def step(self, state: GENERATION_PROGRESS) -> GENERATION_PROGRESS: + """Call when the generation state is updated. + + Args: + state (GENERATION_PROGRESS): The new state of the generation. + """ + return self.on_state(state) + + @property + def generation_results(self) -> OrderedDict[ID_TYPES, GenerationResultTypeVar | None]: + """Get the result of the generation.""" + with self._lock: + return self._generation_results.copy() + + def _set_generation_work_result( + self, + result: GenerationResultTypeVar | Collection[GenerationResultTypeVar], + ) -> None: + """Set the result of the generation work. + + Args: + result (GenerationResultTypeVar): The result of the generation work. + """ + if (not isinstance(result, self._result_type)) and isinstance(result, Collection): + for item in result: + if not isinstance(item, self.result_type): + raise ValueError( + f"Result type {type(item)} does not match expected type {self.result_type}", + ) + + elif not isinstance(result, self.result_type): + raise ValueError( + f"Result type {type(result)} does not match expected type {self.result_type}", + ) + + with self._lock: + if len(self._generation_results) >= self.batch_size: + raise ValueError( + f"Generation result list is full ({len(self._generation_results)}), cannot add more results", + ) + + if isinstance(result, self._result_type): + self._generation_results[self._result_ids[len(self._generation_results)]] = result + + elif isinstance(result, Collection): + if len(result) + len(self._generation_results) > self.batch_size: + raise ValueError( + f"Result list is full ({len(self._generation_results)}), cannot add more results", + ) + if not all(isinstance(r, self.result_type) for r in result): + raise ValueError( + f"Result type {type(result)} does not match expected type {self.result_type}", + ) + + start = len(self._generation_results) + for index, passed_result in enumerate(result): + self._generation_results[self._result_ids[start + index]] = passed_result + + _safety_rules: SafetyRules + _safety_results: list[SafetyResult | None] + + def _set_safety_check_result( + self, + batch_index: int, + safety_result: SafetyResult, + ) -> None: + """Set the result of the safety check. + + Args: + batch_index (int): The index of the batch to set the safety check result for. + safety_result (SafetyResult): The result of the safety check. + + Raises: + ValueError: If is_nsfw or is_csam is not provided or is `None`. + """ + if len(self._generation_results) == 0 or len(self._generation_results) < batch_index + 1: + raise ValueError("Generation result must be set before setting safety check result") + + with self._lock: + if self._safety_results[batch_index] is not None: + logger.warning( + f"Safety check result for batch index {batch_index} has already been set.", + ) + + self._safety_results[batch_index] = safety_result + + if self._safety_rules.should_censor(safety_result): + logger.trace( + f"Safety check result for batch index {batch_index} is unsafe: {safety_result}. Censoring result.", + ) + + if self._generation_results[self._result_ids[batch_index]] is None: + logger.warning( + f"Generation result for batch index {batch_index} is None already", + ) + + self._generation_results[self._result_ids[batch_index]] = None + + def is_safety_checking_done_on_all_batch(self) -> bool: + """Check if the safety check is being done on a one-on-all basis. + + Returns: + bool: True if the safety check is being done on a one-on-all basis, False otherwise. + """ + all_batch_result_complete = False + with self._lock: + if self._safety_results is not None: + all_batch_result_complete = all(result is not None for result in self._safety_results) + + return all_batch_result_complete + + def get_safety_check_results(self) -> list[SafetyResult | None]: + """Get the results of the safety checks. + + Returns: + list[SafetyResult | None]: The results of the safety checks for each batch. + """ + with self._lock: + return self._safety_results.copy() if self._safety_results is not None else None + + def on_safety_checking(self) -> GENERATION_PROGRESS: + """Call when the safety check is about to start.""" + return self._set_generation_progress(GENERATION_PROGRESS.SAFETY_CHECKING) + + def on_safety_check_complete(self, batch_index: int, safety_result: SafetyResult) -> GENERATION_PROGRESS: + """Call when the safety check is complete. + + Args: + batch_index (int): The index of the batch to set the safety check result for. + This is 0-indexed and must match the position of the result_ids provided during initialization. + safety_result (SafetyResult): The result of the safety check. + """ + self._set_safety_check_result( + batch_index=batch_index, + safety_result=safety_result, + ) + + if not self.is_safety_checking_done_on_all_batch(): + return GENERATION_PROGRESS.SAFETY_CHECKING + + if self._requires_submit: + return self._set_generation_progress(GENERATION_PROGRESS.PENDING_SUBMIT) + + return self._set_generation_progress(GENERATION_PROGRESS.COMPLETE) + + def on_pending_submit(self) -> GENERATION_PROGRESS: + """Call when the generation is pending submission.""" + return self._set_generation_progress(GENERATION_PROGRESS.PENDING_SUBMIT) + + def on_submitting(self) -> GENERATION_PROGRESS: + """Call when an attempt is going to be made to submit the generation.""" + return self._set_generation_progress(GENERATION_PROGRESS.SUBMITTING) + + def on_submit_complete(self) -> GENERATION_PROGRESS: + """Call when the generation has been successfully submitted.""" + return self._set_generation_progress(GENERATION_PROGRESS.SUBMIT_COMPLETE) + + def on_user_requested_abort(self) -> GENERATION_PROGRESS: + """Call when the user requests to abort the generation.""" + return self._set_generation_progress(GENERATION_PROGRESS.USER_REQUESTED_ABORT) + + def on_user_abort_complete(self) -> GENERATION_PROGRESS: + """Call when the user requested abort is complete.""" + return self._set_generation_progress(GENERATION_PROGRESS.USER_ABORT_COMPLETE) diff --git a/horde_sdk/worker/job_base.py b/horde_sdk/worker/job_base.py new file mode 100644 index 00000000..f6d22c5e --- /dev/null +++ b/horde_sdk/worker/job_base.py @@ -0,0 +1,343 @@ +from __future__ import annotations + +import threading +import time +import uuid +from abc import ABC, abstractmethod +from collections.abc import Sequence +from enum import auto +from typing import Any, TypeVar + +from loguru import logger +from pydantic import BaseModel, Field +from strenum import StrEnum + +from horde_sdk.consts import ID_TYPES, WORKER_TYPE +from horde_sdk.generation_parameters import CompositeParametersBase +from horde_sdk.worker.consts import ( + GENERATION_PROGRESS, + WORKER_ERRORS, + HordeWorkerConfigDefaults, + finalized_generation_states, +) +from horde_sdk.worker.generations_base import HordeSingleGeneration + +SingleGenerationTypeVar = TypeVar("SingleGenerationTypeVar", bound=HordeSingleGeneration[Any]) +ComposedParameterSetTypeVar = TypeVar("ComposedParameterSetTypeVar", bound=CompositeParametersBase) + + +class JOB_EXECUTION_MODE(StrEnum): + """How the job should be executed in a chain context.""" + + LOCAL_ONLY = auto() + """Execute job locally on the worker without submitting back to API.""" + SUBMIT_TO_API = auto() + """Submit job results back to API for distributed chain execution.""" + + +class HordeWorkerJobConfig(BaseModel): + """Configuration for a HordeWorkerJob.""" + + max_consecutive_failed_job_submits: int = Field( + default=HordeWorkerConfigDefaults.DEFAULT_MAX_CONSECUTIVE_FAILED_JOB_SUBMITS, + ge=1, + le=HordeWorkerConfigDefaults.UNREASONABLE_MAX_CONSECUTIVE_FAILED_JOB_SUBMITS, + ) + max_generation_failures: int = Field( + default=HordeWorkerConfigDefaults.DEFAULT_MAX_GENERATION_FAILURES, + ge=0, + le=HordeWorkerConfigDefaults.UNREASONABLE_MAX_GENERATION_FAILURES, + ) + + job_submit_retry_delay: float = Field( + default=HordeWorkerConfigDefaults.DEFAULT_JOB_SUBMIT_RETRY_DELAY, + ge=0, + ) + + state_error_limits: dict[GENERATION_PROGRESS, int] = Field( + default_factory=lambda: HordeWorkerConfigDefaults.DEFAULT_STATE_ERROR_LIMITS.copy(), + ) + + generation_strict_transition_mode: bool = Field( + default=HordeWorkerConfigDefaults.DEFAULT_GENERATION_STRICT_TRANSITION_MODE, + ) + + upload_timeout: float = Field( + default=HordeWorkerConfigDefaults.DEFAULT_UPLOAD_TIMEOUT, + ge=0, + ) + + max_retries: int = Field( + default=HordeWorkerConfigDefaults.DEFAULT_MAX_RETRIES, + ge=0, + ) + + retry_delay: float = Field( + default=HordeWorkerConfigDefaults.DEFAULT_RETRY_DELAY, + ge=0, + ) + + result_image_format: str = Field( + default=HordeWorkerConfigDefaults.DEFAULT_RESULT_IMAGE_FORMAT, + ) + + result_image_quality: int = Field( + default=HordeWorkerConfigDefaults.DEFAULT_RESULT_IMAGE_QUALITY, + ge=1, + le=100, + ) + + result_image_pil_method: int = Field( + default=HordeWorkerConfigDefaults.DEFAULT_RESULT_IMAGE_PIL_METHOD, + ge=0, + ) + + +class HordeWorkerJob[ + SingleGenerationTypeVar: HordeSingleGeneration[Any], + ComposedParameterSetTypeVar: CompositeParametersBase, +]( + ABC, +): + """Base class for all worker jobs. + + Jobs wrap an underlying generation parameter set with meta-information about the job, including + information sent along from the dispatch source. This includes the remote job ID, the time the job + was received, and any other higher-level information that is useful for the worker to know in order + to process the job. + + """ + + _local_job_id: ID_TYPES + _dispatch_job_id: ID_TYPES | None = None + _job_config: HordeWorkerJobConfig + + _consecutive_failed_job_submits: int = 0 + """The number of consecutive times the job has failed to submit to the API.""" + + _lock: threading.RLock = threading.RLock() + + def __init__( + self, + generation: SingleGenerationTypeVar, + generation_cls: type[SingleGenerationTypeVar], + job_id: ID_TYPES | None = None, + *, + dispatch_job_id: ID_TYPES | None = None, + dispatch_result_ids: Sequence[ID_TYPES] | None = None, + job_config: HordeWorkerJobConfig | None = None, + time_received: float | None = None, + preserve_generation_id: bool = False, + ) -> None: + """Initialize the job. + + Args: + generation (SingleGenerationType): The generation to use for the job. + generation_cls (type[SingleGenerationType]): The class to use for the generations in the job. + job_id (ID_TYPES | None): The unique identifier for the job. If `None`, a new UUID will be generated. + dispatch_job_id (ID_TYPES | None): Identifier supplied by the dispatch source. Defaults to None when + the job has not been announced remotely. + dispatch_result_ids (Sequence[ID_TYPES] | None): Result identifiers supplied by dispatch for the attached + generation, if available. Defaults to None. + job_config (HordeWorkerJobConfig, optional): The configuration for the job. If `None`, the default \ + configuration will be used. Defaults to None. + time_received (float | None): The time the job was received. If `None`, the time the response model was \ + constructed will be used. Defaults to None. + preserve_generation_id (bool): When True, retain the generation's existing identifier instead of + rebinding it to the job identifier. Defaults to False. + """ + if job_config is None: + job_config = HordeWorkerJobConfig() + self._job_config = job_config + + self._generation = generation + self._generation_cls = generation_cls + + effective_job_id = job_id + if preserve_generation_id: + if effective_job_id is None: + effective_job_id = generation.generation_id + if effective_job_id is None: + effective_job_id = uuid.uuid4() + else: + if effective_job_id is None: + effective_job_id = uuid.uuid4() + self._generation.generation_id = effective_job_id + + self._local_job_id = effective_job_id + self._dispatch_job_id = dispatch_job_id + + if dispatch_result_ids is not None: + self._generation.set_dispatch_result_ids(dispatch_result_ids) + elif dispatch_job_id is not None and not self._generation.dispatch_result_ids: + self._generation.set_dispatch_result_ids([dispatch_job_id]) + + if time_received is not None: + self._time_received = time_received + + self._lock = threading.RLock() + + _generation_cls: type[SingleGenerationTypeVar] + + @property + def generation_cls(self) -> type[SingleGenerationTypeVar]: + """The (python) type created by the job.""" + return self._generation_cls + + _generation: SingleGenerationTypeVar + + _generation_parameters_cls: type[ComposedParameterSetTypeVar] + + @property + def generation_parameters_cls(self) -> type[ComposedParameterSetTypeVar]: + """The (python) type of the generation parameters.""" + return self._generation_parameters_cls + + @property + def generation(self) -> SingleGenerationTypeVar: + """The individual generations in this job.""" + return self._generation + + @property + def job_config(self) -> HordeWorkerJobConfig: + """Return the configuration associated with this job.""" + return self._job_config + + @property + def job_id(self) -> ID_TYPES: + """Return the identifier assigned to this job.""" + return self._local_job_id + + @property + def local_job_id(self) -> ID_TYPES: + """Alias for :meth:`job_id` to emphasize local scope.""" + return self._local_job_id + + @property + def dispatch_job_id(self) -> ID_TYPES | None: + """Return the identifier provided by the dispatch source, if any.""" + with self._lock: + return self._dispatch_job_id + + def set_dispatch_job_id(self, dispatch_job_id: ID_TYPES | None) -> None: + """Bind the job to the identifier supplied by dispatch.""" + with self._lock: + self._dispatch_job_id = dispatch_job_id + + @classmethod + @abstractmethod + def job_worker_type(cls) -> WORKER_TYPE: + """Type of worker that can process this job.""" + + _time_received: float | None = None + + @property + def time_received(self) -> float | None: + """The time the job response was either received or constructed (in epoch time). + + **Note:** This generally will be the time the job was popped from the server. However, manually constructed + api responses or jobs that are not popped from a queue may imbue this property with a different meaning. + + You can manually set this value with the `time_received` parameter in the constructor. + """ + return self._time_received + + @property + def time_since_received(self) -> float | None: + """The time since the job was popped from the queue in seconds, or None if not yet received.""" + if self._time_received is None: + return None + return time.time() - self._time_received + + _time_submitted: float | None = None + + @property + def time_submitted(self) -> float | None: + """The time the job was submitted to the API in epoch time or None if not submitted.""" + return self._time_submitted + + time_spent_generating: float = 0.0 + """The time spent generating the job in seconds.""" + time_to_download_aux_models: float = 0.0 + """The time spent downloading user-specified auxiliary models specific to the job (i.e., LoRas) in seconds.""" + + # _job_state_api: GENERATION_STATE | None = None + + @property + def job_identifier_string(self) -> str: + """Returns a string that identifies the job.""" + return f"{self.generation.generation_id}:{self.generation.result_ids}" + + _fault_reason: WORKER_ERRORS | None = None + _faulted_at: float | None = None + _faulted: bool = False + + @property + def faulted_reason(self) -> WORKER_ERRORS | None: + """The reason the job was faulted or None if not faulted.""" + with self._lock: + return self._fault_reason + + @property + def faulted_at(self) -> float | None: + """The time the job was faulted in epoch time or None if not faulted.""" + with self._lock: + return self._faulted_at + + @property + def is_faulted(self) -> bool: + """Whether or not the job has been marked as faulted.""" + with self._lock: + return self._faulted + + # TODO + # FIXME + # @property + # def job_state_api(self) -> GENERATION_STATE | None: + # """The state of the job using the codes used by the API or None if there is no comparable state.""" + # return self._job_state_api + + _should_censor_nsfw: bool = False + """Whether or not the user has requested that NSFW content be censored.""" + + @property + def should_censor_nsfw(self) -> bool: + """Whether or not the user has requested that NSFW content be censored.""" + return self._should_censor_nsfw + + def set_job_faulted(self, faulted_reason: WORKER_ERRORS, failure_exception: Exception | None = None) -> None: + """Mark the entire job as faulted. + + Note: This will mark all generations in the job as faulted. + """ + with self._lock: + if self._faulted: + logger.warning( + f"Job {self.job_identifier_string} is already marked faulted with " + f"reason {self._fault_reason} at {self._faulted_at}", + ) + + self._faulted = True + self._fault_reason = faulted_reason + self._faulted_at = time.time() + + self.generation.on_abort( + failed_message=faulted_reason, + failure_exception=failure_exception, + ) + + @property + def is_job_finalized(self) -> bool: + """Return true if the generation in the job is finalized. + + Note: This means the generation has been submitted as either successful or failed, or has been + abandoned. Accordingly, there is nothing more to do with the job. + """ + with self._lock: + return self.generation.get_generation_progress() in finalized_generation_states + + @property + def job_completed_successfully(self) -> bool: + """Return true if the generation in the job completed successfully.""" + with self._lock: + return self.generation.get_generation_progress() == GENERATION_PROGRESS.SUBMIT_COMPLETE diff --git a/horde_sdk/worker/jobs.py b/horde_sdk/worker/jobs.py new file mode 100644 index 00000000..087c5f08 --- /dev/null +++ b/horde_sdk/worker/jobs.py @@ -0,0 +1,310 @@ +from __future__ import annotations + +from collections.abc import Sequence +from typing import override + +from horde_sdk.consts import ID_TYPES, WORKER_TYPE, HTTPMethod +from horde_sdk.generation_parameters import ( + BasicImageGenerationParametersTemplate, + ImageGenerationParameters, + SingleAlchemyParameters, + TextGenerationParameters, +) +from horde_sdk.generation_parameters.alchemy.consts import KNOWN_ALCHEMY_FORMS +from horde_sdk.generation_parameters.alchemy.object_models import SingleAlchemyParametersTemplate +from horde_sdk.generation_parameters.image.object_models import ( + ImageGenerationComponentContainer, + ImageGenerationParametersTemplate, +) +from horde_sdk.generation_parameters.text.object_models import ( + BasicTextGenerationParametersTemplate, + TextGenerationParametersTemplate, +) +from horde_sdk.generation_parameters.utils import ResultIdAllocator +from horde_sdk.worker.generations import ( + AlchemyGenerationInitKwargs, + AlchemySingleGeneration, + ImageGenerationInitKwargs, + ImageSingleGeneration, + TextGenerationInitKwargs, + TextSingleGeneration, + _stringify_id, +) +from horde_sdk.worker.job_base import ( + HordeWorkerJob, + HordeWorkerJobConfig, +) + +DEFAULT_UPLOAD_METHOD = HTTPMethod.PUT + + +class ImageWorkerJob(HordeWorkerJob[ImageSingleGeneration, ImageGenerationParameters]): + """A job containing only image generations.""" + + def __init__( + self, + *, + generation: ImageSingleGeneration, + job_config: HordeWorkerJobConfig | None = None, + job_id: ID_TYPES | None = None, + dispatch_job_id: ID_TYPES | None = None, + dispatch_result_ids: Sequence[ID_TYPES] | None = None, + preserve_generation_id: bool = False, + ) -> None: + """Initialize the image worker job. + + Args: + generation (ImageSingleGeneration): The generation to use for the job. + job_config (HordeWorkerJobConfig | None): The configuration for the job. + job_config (HordeWorkerJobConfig, optional): The configuration for the job. If `None`, the default \ + configuration will be used. Defaults to None. + job_id (ID_TYPES | None): Optional identifier to associate with the job. + dispatch_job_id (ID_TYPES | None): Identifier supplied by dispatch for the job. + dispatch_result_ids (Sequence[ID_TYPES] | None): Result identifiers supplied by dispatch for the + generation. + preserve_generation_id (bool): Retain the existing generation identifier instead of overwriting it with the + job identifier. + """ + super().__init__( + generation=generation, + generation_cls=ImageSingleGeneration, + job_config=job_config, + job_id=job_id, + dispatch_job_id=dispatch_job_id, + dispatch_result_ids=dispatch_result_ids, + preserve_generation_id=preserve_generation_id, + ) + + @override + @classmethod + def job_worker_type(cls) -> WORKER_TYPE: + return WORKER_TYPE.image + + @classmethod + def from_template( + cls, + template: ImageGenerationParametersTemplate, + *, + generation_id: ID_TYPES | None = None, + dispatch_result_ids: Sequence[ID_TYPES] | None = None, + job_id: ID_TYPES | None = None, + dispatch_job_id: ID_TYPES | None = None, + base_param_updates: BasicImageGenerationParametersTemplate | None = None, + additional_param_updates: ImageGenerationComponentContainer | None = None, + result_ids: Sequence[ID_TYPES] | None = None, + allocator: ResultIdAllocator | None = None, + seed: str = "image", + generation_kwargs: ImageGenerationInitKwargs | None = None, + job_config: HordeWorkerJobConfig | None = None, + preserve_generation_id: bool = False, + ) -> ImageWorkerJob: + """Instantiate an image job from a template.""" + generation_parameters = template.to_parameters( + base_param_updates=base_param_updates, + additional_param_updates=additional_param_updates, + result_ids=result_ids, + allocator=allocator, + seed=seed, + ) + init_kwargs: ImageGenerationInitKwargs = {} + if generation_kwargs: + init_kwargs.update(generation_kwargs) + if generation_id is not None: + init_kwargs.setdefault("generation_id", generation_id) + if dispatch_result_ids is not None: + init_kwargs.setdefault("dispatch_result_ids", list(dispatch_result_ids)) + init_kwargs.setdefault("result_ids", generation_parameters.result_ids) + generation = ImageSingleGeneration( + generation_parameters=generation_parameters, + **init_kwargs, + ) + return cls( + generation=generation, + job_config=job_config, + job_id=job_id, + dispatch_job_id=dispatch_job_id, + dispatch_result_ids=dispatch_result_ids, + preserve_generation_id=preserve_generation_id, + ) + + +class AlchemyWorkerJob(HordeWorkerJob[AlchemySingleGeneration, SingleAlchemyParameters]): + """A job containing only alchemy generations.""" + + def __init__( + self, + *, + generation: AlchemySingleGeneration, + job_config: HordeWorkerJobConfig | None = None, + job_id: ID_TYPES | None = None, + dispatch_job_id: ID_TYPES | None = None, + dispatch_result_ids: Sequence[ID_TYPES] | None = None, + preserve_generation_id: bool = False, + ) -> None: + """Initialize the alchemy worker job. + + Args: + generation (AlchemySingleGeneration): The response from the API. + job_config (HordeWorkerJobConfig | None, optional): The configuration for the job. If `None`, the default \ + configuration will be used. Defaults to None. + job_id (ID_TYPES | None): Optional identifier to associate with the job. + dispatch_job_id (ID_TYPES | None): Identifier supplied by dispatch for the job. + dispatch_result_ids (Sequence[ID_TYPES] | None): Result identifiers supplied by dispatch for the + generation. + preserve_generation_id (bool): Retain the existing generation identifier instead of overwriting it with the + job identifier. + """ + super().__init__( + generation=generation, + generation_cls=AlchemySingleGeneration, + job_config=job_config, + job_id=job_id, + dispatch_job_id=dispatch_job_id, + dispatch_result_ids=dispatch_result_ids, + preserve_generation_id=preserve_generation_id, + ) + + @override + @classmethod + def job_worker_type(cls) -> WORKER_TYPE: + return WORKER_TYPE.alchemist + + @classmethod + def from_template( + cls, + template: SingleAlchemyParametersTemplate, + *, + source_image: bytes | str | None = None, + default_form: KNOWN_ALCHEMY_FORMS | str | None = KNOWN_ALCHEMY_FORMS.post_process, + generation_id: ID_TYPES | None = None, + dispatch_result_ids: Sequence[ID_TYPES] | None = None, + result_id: ID_TYPES | None = None, + job_id: ID_TYPES | None = None, + dispatch_job_id: ID_TYPES | None = None, + allocator: ResultIdAllocator | None = None, + seed: str = "alchemy", + generation_kwargs: AlchemyGenerationInitKwargs | None = None, + job_config: HordeWorkerJobConfig | None = None, + preserve_generation_id: bool = False, + ) -> AlchemyWorkerJob: + """Instantiate an alchemy job from a template.""" + generation_parameters = template.to_parameters( + result_id=result_id, + source_image=source_image, + default_form=default_form, + allocator=allocator, + seed=seed, + ) + init_kwargs: AlchemyGenerationInitKwargs = {} + if generation_kwargs: + init_kwargs.update(generation_kwargs) + if generation_id is not None: + init_kwargs.setdefault("generation_id", _stringify_id(generation_id)) + if dispatch_result_ids is not None: + init_kwargs.setdefault( + "dispatch_result_ids", + [ + stringified + for identifier in dispatch_result_ids + if (stringified := _stringify_id(identifier)) is not None + ], + ) + init_kwargs.setdefault("result_ids", [generation_parameters.result_id]) + generation = AlchemySingleGeneration( + generation_parameters=generation_parameters, + **init_kwargs, + ) + return cls( + generation=generation, + job_config=job_config, + job_id=job_id, + dispatch_job_id=dispatch_job_id, + dispatch_result_ids=dispatch_result_ids, + preserve_generation_id=preserve_generation_id, + ) + + +class TextWorkerJob(HordeWorkerJob[TextSingleGeneration, TextGenerationParameters]): + """A job containing only text generations.""" + + def __init__( + self, + generation: TextSingleGeneration, + job_config: HordeWorkerJobConfig | None = None, + job_id: ID_TYPES | None = None, + dispatch_job_id: ID_TYPES | None = None, + dispatch_result_ids: Sequence[ID_TYPES] | None = None, + preserve_generation_id: bool = False, + ) -> None: + """Initialize the text worker job. + + Args: + generation (TextSingleGeneration): The response from the API. + job_config (HordeWorkerJobConfig | None, optional): The configuration for the job. If `None`, the default \ + configuration will be used. Defaults to None. + job_id (ID_TYPES | None): Optional identifier to associate with the job. + dispatch_job_id (ID_TYPES | None): Identifier supplied by dispatch for the job. + dispatch_result_ids (Sequence[ID_TYPES] | None): Result identifiers supplied by dispatch for the + generation. + preserve_generation_id (bool): Retain the existing generation identifier instead of overwriting it with the + job identifier. + """ + super().__init__( + generation=generation, + generation_cls=TextSingleGeneration, + job_config=job_config, + job_id=job_id, + dispatch_job_id=dispatch_job_id, + dispatch_result_ids=dispatch_result_ids, + preserve_generation_id=preserve_generation_id, + ) + + @override + @classmethod + def job_worker_type(cls) -> WORKER_TYPE: + return WORKER_TYPE.text + + @classmethod + def from_template( + cls, + template: TextGenerationParametersTemplate, + *, + generation_id: ID_TYPES | None = None, + dispatch_result_ids: Sequence[ID_TYPES] | None = None, + job_id: ID_TYPES | None = None, + dispatch_job_id: ID_TYPES | None = None, + base_param_updates: BasicTextGenerationParametersTemplate | None = None, + result_ids: Sequence[ID_TYPES] | None = None, + allocator: ResultIdAllocator | None = None, + seed: str = "text", + generation_kwargs: TextGenerationInitKwargs | None = None, + job_config: HordeWorkerJobConfig | None = None, + preserve_generation_id: bool = False, + ) -> TextWorkerJob: + """Instantiate a text job from a template.""" + generation_parameters = template.to_parameters( + base_param_updates=base_param_updates, + result_ids=result_ids, + allocator=allocator, + seed=seed, + ) + init_kwargs: TextGenerationInitKwargs = {} + if generation_kwargs: + init_kwargs.update(generation_kwargs) + if generation_id is not None: + init_kwargs.setdefault("generation_id", generation_id) + if dispatch_result_ids is not None: + init_kwargs.setdefault("dispatch_result_ids", list(dispatch_result_ids)) + init_kwargs.setdefault("result_ids", generation_parameters.result_ids) + generation = TextSingleGeneration( + generation_parameters=generation_parameters, + **init_kwargs, + ) + return cls( + generation=generation, + job_config=job_config, + job_id=job_id, + dispatch_job_id=dispatch_job_id, + dispatch_result_ids=dispatch_result_ids, + preserve_generation_id=preserve_generation_id, + ) diff --git a/horde_sdk/ai_horde_worker/locale_info/README.md b/horde_sdk/worker/locale_info/README.md similarity index 100% rename from horde_sdk/ai_horde_worker/locale_info/README.md rename to horde_sdk/worker/locale_info/README.md diff --git a/horde_sdk/ai_horde_worker/locale_info/__init__.py b/horde_sdk/worker/locale_info/__init__.py similarity index 100% rename from horde_sdk/ai_horde_worker/locale_info/__init__.py rename to horde_sdk/worker/locale_info/__init__.py diff --git a/horde_sdk/ai_horde_worker/locale_info/bridge_data_fields.py b/horde_sdk/worker/locale_info/bridge_data_fields.py similarity index 100% rename from horde_sdk/ai_horde_worker/locale_info/bridge_data_fields.py rename to horde_sdk/worker/locale_info/bridge_data_fields.py diff --git a/horde_sdk/ai_horde_worker/model_meta.py b/horde_sdk/worker/model_meta.py similarity index 91% rename from horde_sdk/ai_horde_worker/model_meta.py rename to horde_sdk/worker/model_meta.py index 6ad3d877..65076261 100644 --- a/horde_sdk/ai_horde_worker/model_meta.py +++ b/horde_sdk/worker/model_meta.py @@ -1,7 +1,7 @@ import os import re -from horde_model_reference.meta_consts import MODEL_REFERENCE_CATEGORY, STABLE_DIFFUSION_BASELINE_CATEGORY +from horde_model_reference.meta_consts import KNOWN_IMAGE_GENERATION_BASELINE, MODEL_REFERENCE_CATEGORY from horde_model_reference.model_reference_manager import ModelReferenceManager from horde_model_reference.model_reference_records import ImageGenerationModelRecord from loguru import logger @@ -9,8 +9,8 @@ from horde_sdk.ai_horde_api.ai_horde_clients import AIHordeAPIManualClient from horde_sdk.ai_horde_api.apimodels import ImageStatsModelsRequest, ImageStatsModelsResponse, StatsModelsTimeframe from horde_sdk.ai_horde_api.consts import MODEL_STATE -from horde_sdk.ai_horde_worker.bridge_data import MetaInstruction from horde_sdk.generic_api.apimodels import RequestErrorResponse +from horde_sdk.worker.dispatch.ai_horde.bridge_data import MetaInstruction class ImageModelLoadResolver: @@ -21,21 +21,22 @@ class ImageModelLoadResolver: _model_reference_manager: ModelReferenceManager - def __init__(self, model_reference_manager: ModelReferenceManager) -> None: # noqa: D107 - if not isinstance(model_reference_manager, ModelReferenceManager): - raise TypeError("model_reference_manager must be of type ModelReferenceManager") - self._model_reference_manager = model_reference_manager + def __init__(self) -> None: # noqa: D107 + self._model_reference_manager = ModelReferenceManager() def resolve_meta_instructions( self, possible_meta_instructions: list[str], client: AIHordeAPIManualClient, + *, + load_large_models: bool = True, ) -> set[str]: """Return a set of model names based on the given meta instructions. Args: possible_meta_instructions: A list of strings representing meta instructions. client: An AIHordeAPIManualClient object to use for making requests. + load_large_models: A boolean representing whether to load large models. Returns: A set of strings representing the names of models to load. @@ -132,8 +133,10 @@ def resolve_meta_instructions( ): return_list.extend(self.resolve_all_nsfw_model_names()) - # If no valid meta instruction were found, return None - return self.remove_large_models(set(return_list)) + if not load_large_models: + return self.remove_large_models(set(return_list)) + + return set(return_list) @staticmethod def meta_instruction_regex_match(instruction: str, target_string: str) -> re.Match[str] | None: @@ -149,15 +152,23 @@ def meta_instruction_regex_match(instruction: str, target_string: str) -> re.Mat """ return re.match(instruction, target_string, re.IGNORECASE) - def remove_large_models(self, models: set[str]) -> set[str]: + def remove_large_models( + self, + models: set[str], + load_large_models: bool = True, + ) -> set[str]: """Remove large models from the input set of models.""" AI_HORDE_MODEL_META_LARGE_MODELS = os.getenv("AI_HORDE_MODEL_META_LARGE_MODELS") - if not AI_HORDE_MODEL_META_LARGE_MODELS: - cascade_models = self.resolve_all_models_of_baseline(STABLE_DIFFUSION_BASELINE_CATEGORY.stable_cascade) - flux_models = self.resolve_all_models_of_baseline(STABLE_DIFFUSION_BASELINE_CATEGORY.flux_1) - - logger.debug(f"Removing cascade models: {cascade_models}") - logger.debug(f"Removing flux models: {flux_models}") + if not AI_HORDE_MODEL_META_LARGE_MODELS or not load_large_models: + cascade_models = self.resolve_all_models_of_baseline(KNOWN_IMAGE_GENERATION_BASELINE.stable_cascade) + flux_models = self.resolve_all_models_of_baseline(KNOWN_IMAGE_GENERATION_BASELINE.flux_1) + + if not AI_HORDE_MODEL_META_LARGE_MODELS: + logger.debug( + "Loading of large models is disabled with `AI_HORDE_MODEL_META_LARGE_MODELS`. " + f"Removing {len(cascade_models) + len(flux_models)} models. " + f"({cascade_models}, {flux_models})", + ) models = models - cascade_models - flux_models return models diff --git a/horde_sdk/ai_horde_worker/worker_client.py b/horde_sdk/worker/worker_client.py similarity index 100% rename from horde_sdk/ai_horde_worker/worker_client.py rename to horde_sdk/worker/worker_client.py diff --git a/mkdocs.yml b/mkdocs.yml index d58de5c3..1e458927 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -2,6 +2,15 @@ site_name: Horde SDK Documentation plugins: - search +- mermaid2: + version: '10.1.0' + arguments: + theme: 'dark' + themeVariables: + darkMode: true + fontSize: 24 + primaryColor: '#ff0000' + - awesome-pages - autorefs - mkdocstrings: @@ -9,6 +18,7 @@ plugins: python: options: members_order: source + inherited_members: true docstring_section_style: list show_if_no_docstring: true separate_signature: true @@ -21,10 +31,18 @@ markdown_extensions: pygments_lang_class: true - pymdownx.inlinehilite - pymdownx.snippets - - pymdownx.superfences + - pymdownx.superfences: + # make exceptions to highlighting of code: + custom_fences: + - name: mermaid + class: mermaid + format: !!python/name:mermaid2.fence_mermaid_custom theme: name: material + language: en + palette: + scheme: slate extra_css: - stylesheets/extra.css diff --git a/pyproject.toml b/pyproject.toml index 74a63933..ba30dfb5 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -10,8 +10,8 @@ authors = [ {name = "db0", email = "mail@dbzer0.com"}, ] readme = "README.md" -requires-python = ">=3.10" -dynamic=["version", "dependencies"] +requires-python = ">=3.12" +dynamic=["version"] license = {file = "LICENSE"} classifiers = [ "Programming Language :: Python :: 3", @@ -19,6 +19,21 @@ classifiers = [ "License :: OSI Approved :: GNU Affero General Public License v3", "Development Status :: 4 - Beta", ] +dependencies = [ + "aiodns>=3.2.0", + "aiofiles>=24.1.0", + "aiohttp>=3.11.13", + "certifi>=2025.1.31", + "horde-model-reference>=0.10.0", + "logfire>=3.7.1", + "loguru>=0.7.3", + "networkx>=3.4.2", + "pillow>=11.1.0", + "pydantic>=2.10.6", + "python-dotenv>=1.0.1", + "requests>=2.32.3", + "strenum>=0.4.15", +] [tool.setuptools_scm] write_to = "horde_sdk/_version.py" @@ -38,7 +53,7 @@ tests = ["*.json"] [tool.ruff] line-length = 119 -exclude = ["codegen"] +exclude = ["codegen", ".venv*"] [tool.ruff.lint] select = [ @@ -49,6 +64,7 @@ select = [ "F", "UP", "YTT", + "TID", "B", "C4", "PIE", @@ -62,8 +78,7 @@ select = [ "ASYNC", ] ignore = [ - "ANN101", # Missing type annotation for self in method - "ANN102", # Missing type annotation for cls in classmethod + "COM812", # Missing trailing comma in a trailing collection (handled by ruff format directly) "D105", # Missing docstring in magic method "D100", # Missing docstring in public module # Ignore D rules for non-google docstring standard @@ -87,6 +102,7 @@ ignore = [ "docs/*" = ["D"] # Ignore D rules for docs "codegen/*" = ["D"] # Ignore D rules for codegen + [tool.black] line-length = 119 include = '\.pyi?$' @@ -95,11 +111,13 @@ exclude = ''' \.git | \.hg | \.mypy_cache + | \.ruff_cache | \.tox - | \.venv + | \.venv.* | _build | buck-out | build + | codegen | dist )/ ''' @@ -116,12 +134,16 @@ exclude = [ '\.git', '\.hg', '\.mypy_cache', + '\.ruff_cache', '\.tox', '\.venv', + '\.venv.*', '_build', 'buck-out', 'build', + 'codegen', 'dist', + 'docs', ] [tool.coverage.run] @@ -134,3 +156,42 @@ markers = [ "object_verify: marks tests that verify the API object structure and layout", "api_side_ci: indicates that the test is intended to run during CI for the API", ] + +[dependency-groups] +dev = [ + "babel>=2.17.0", + "black>=24.10.0", + "build>=0.10.0", + "coverage>=7.2.7", + "datamodel-code-generator>=0.28.4", + "gevent>=24.11.1", + "mypy>=1.13.0", + "pre-commit~=4.0.1", + "pytest>=8.3.3", + "pytest-asyncio>=0.25.3", + "pytest-cov>=6.0.0", + "pytest-xdist>=3.8.0", + "ruff>=0.7.2", + "tach>=0.29.0", + "tox~=4.23.2", + "types-aiofiles>=24.1.0.20241221", + "types-networkx>=3.5.0.20251104", + "types-pillow>=10.2.0.20240822", + "types-pytz>=2025.1.0.20250204", + "types-pyyaml>=6.0.12.20250402", + "types-requests>=2.32.0.20250306", + "types-setuptools>=75.8.2.20250305", + "types-urllib3>=1.26.25.14", +] +docs = [ + "mkdocs-awesome-pages-plugin>=2.10.1", + "mkdocs-material>=9.6.7", + "mkdocs-material-extensions>=1.3.1", + "mkdocs-mermaid2-plugin>=1.2.1", + "mkdocstrings>=0.29.0", + "mkdocstrings-python>=1.16.5", +] +full-telemetry = [ + "opentelemetry-instrumentation-aiohttp-client>=0.51b0", + "opentelemetry-instrumentation-requests>=0.51b0", +] diff --git a/requirements.dev.txt b/requirements.dev.txt index 414c2827..d09a5652 100644 --- a/requirements.dev.txt +++ b/requirements.dev.txt @@ -1,21 +1,65 @@ -pytest==8.3.3 -mypy==1.13.0 +# This file was autogenerated by uv via the following command: +# uv export --no-emit-project --only-group dev --no-hashes --format requirements-txt +babel==2.17.0 black==24.10.0 +build==1.2.2.post1 +cachetools==5.5.2 +cffi==1.17.1 ; platform_python_implementation == 'CPython' and sys_platform == 'win32' +cfgv==3.4.0 +chardet==5.2.0 +click==8.1.8 +colorama==0.4.6 +coverage==7.6.12 +distlib==0.3.9 +exceptiongroup==1.2.2 ; python_full_version < '3.11' +filelock==3.17.0 +gevent==24.11.1 +gitdb==4.0.12 +gitpython==3.1.44 +greenlet==3.1.1 ; platform_python_implementation == 'CPython' +identify==2.6.8 +importlib-metadata==8.6.1 ; python_full_version < '3.10.2' +iniconfig==2.0.0 +markdown-it-py==3.0.0 +mdurl==0.1.2 +mypy==1.13.0 +mypy-extensions==1.0.0 +networkx==3.4.2 +nodeenv==1.9.1 +packaging==24.2 +pathspec==0.12.1 +platformdirs==4.3.6 +pluggy==1.5.0 +pre-commit==4.0.1 +prompt-toolkit==3.0.50 +pycparser==2.22 ; platform_python_implementation == 'CPython' and sys_platform == 'win32' +pydot==3.0.4 +pygments==2.19.1 +pyparsing==3.2.1 +pyproject-api==1.9.0 +pyproject-hooks==1.2.0 +pytest==8.3.3 +pytest-asyncio==0.25.3 +pytest-cov==6.0.0 +pyyaml==6.0.2 +rich==13.9.4 ruff==0.7.2 -tox~=4.23.2 -pre-commit~=4.0.1 -build>=0.10.0 -coverage>=7.2.7 - -pytest-cov -pytest-asyncio -gevent - -babel - -types-Pillow -types-pytz -types-requests -types-setuptools -types-urllib3 -types-aiofiles +setuptools==75.8.2 +smmap==5.0.2 +tach==0.27.2 +tomli==2.2.1 +tomli-w==1.2.0 +tox==4.23.2 +types-aiofiles==24.1.0.20241221 +types-pillow==10.2.0.20240822 +types-pytz==2025.1.0.20250204 +types-requests==2.32.0.20250301 +types-setuptools==75.8.2.20250301 +types-urllib3==1.26.25.14 +typing-extensions==4.12.2 +urllib3==2.3.0 +virtualenv==20.29.2 +wcwidth==0.2.13 +zipp==3.21.0 ; python_full_version < '3.10.2' +zope-event==5.0 +zope-interface==7.2 diff --git a/requirements.docs.txt b/requirements.docs.txt index c8775fda..12903750 100644 --- a/requirements.docs.txt +++ b/requirements.docs.txt @@ -1,5 +1,46 @@ -mkdocstrings -mkdocstrings-python -mkdocs-awesome-pages-plugin -mkdocs-material -mkdocs-material-extensions +# This file was autogenerated by uv via the following command: +# uv export --no-emit-project --only-group docs --no-hashes --format requirements-txt +babel==2.17.0 +backrefs==5.8 +beautifulsoup4==4.13.3 +bracex==2.5.post1 +certifi==2025.1.31 +charset-normalizer==3.4.1 +click==8.1.8 +colorama==0.4.6 +editorconfig==0.17.0 +ghp-import==2.1.0 +griffe==1.6.0 +idna==3.10 +jinja2==3.1.5 +jsbeautifier==1.15.4 +markdown==3.7 +markupsafe==3.0.2 +mergedeep==1.3.4 +mkdocs==1.6.1 +mkdocs-autorefs==1.4.0 +mkdocs-awesome-pages-plugin==2.10.1 +mkdocs-get-deps==0.2.0 +mkdocs-material==9.6.6 +mkdocs-material-extensions==1.3.1 +mkdocs-mermaid2-plugin==1.2.1 +mkdocstrings==0.28.2 +mkdocstrings-python==1.16.2 +natsort==8.4.0 +packaging==24.2 +paginate==0.5.7 +pathspec==0.12.1 +platformdirs==4.3.6 +pygments==2.19.1 +pymdown-extensions==10.14.3 +python-dateutil==2.9.0.post0 +pyyaml==6.0.2 +pyyaml-env-tag==0.1 +requests==2.32.3 +setuptools==75.8.2 +six==1.17.0 +soupsieve==2.6 +typing-extensions==4.12.2 +urllib3==2.3.0 +watchdog==6.0.0 +wcmatch==10.0 diff --git a/requirements.txt b/requirements.txt index 77d1e418..e9a1c591 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,12 +1,36 @@ -horde_model_reference>=2.0.0 +# This file was autogenerated by uv via the following command: +# uv export --no-emit-project --no-dev --no-hashes --format requirements-txt +aiodns==3.2.0 +aiofiles==24.1.0 +aiohappyeyeballs==2.4.6 +aiohttp==3.11.13 +aiosignal==1.3.2 +annotated-types==0.7.0 +async-timeout==5.0.1 ; python_full_version < '3.11' +attrs==25.1.0 +certifi==2025.1.31 +cffi==1.17.1 +charset-normalizer==3.4.1 +colorama==0.4.6 ; sys_platform == 'win32' +frozenlist==1.5.0 +horde-model-reference==0.10.0 +idna==3.10 +loguru==0.7.3 +multidict==6.1.0 +pillow==11.1.0 +propcache==0.3.0 +pycares==4.5.0 +pycparser==2.22 +pydantic==2.10.6 +pydantic-core==2.27.2 +python-dotenv==1.0.1 +requests==2.32.3 +strenum==0.4.15 +typing-extensions==4.12.2 +urllib3==2.3.0 +win32-setctime==1.2.0 ; sys_platform == 'win32' +yarl==1.18.3 -pydantic>=2.9.2 -requests -StrEnum -loguru -certifi -aiohttp -aiofiles -aiodns -pillow -python-dotenv +logfire>=3.7.1 +opentelemetry-instrumentation-aiohttp-client>=0.51b0 +opentelemetry-instrumentation-requests>=0.51b0 diff --git a/tach.toml b/tach.toml new file mode 100644 index 00000000..5c3a0057 --- /dev/null +++ b/tach.toml @@ -0,0 +1,31 @@ +interfaces = [] +exclude = [ + "**/*__pycache__", + "**/*egg-info", + "**/docs", + "**/tests", + "**/venv", +] +source_roots = [ + ".", +] + +[[modules ]] +path = "horde_sdk.ai_horde_api" +depends_on = ["horde_sdk.generation_parameters", "horde_sdk.generic_api"] + +[[modules ]] +path = "horde_sdk.worker" +depends_on = ["horde_sdk.ai_horde_api", "horde_sdk.generic_api", "horde_sdk.generation_parameters", "horde_sdk.deploy_config"] + +[[modules ]] +path = "horde_sdk.generation_parameters" +depends_on = [] + +[[modules ]] +path = "horde_sdk.generic_api" +depends_on = [] + +[[modules ]] +path = "horde_sdk.deploy_config" +depends_on = ["horde_sdk.generation_parameters"] diff --git a/tests/ai_horde_api/test_ai_horde_alchemy_api_calls.py b/tests/ai_horde_api/test_ai_horde_alchemy_api_calls.py index ec8b8fa9..49042937 100644 --- a/tests/ai_horde_api/test_ai_horde_alchemy_api_calls.py +++ b/tests/ai_horde_api/test_ai_horde_alchemy_api_calls.py @@ -1,12 +1,12 @@ import pytest from horde_sdk.ai_horde_api.ai_horde_clients import AIHordeAPIClientSession -from horde_sdk.ai_horde_api.apimodels.alchemy._async import ( +from horde_sdk.ai_horde_api.apimodels.alchemy.async_ import ( AlchemyAsyncRequest, AlchemyAsyncRequestFormItem, AlchemyAsyncResponse, ) -from horde_sdk.ai_horde_api.consts import KNOWN_ALCHEMY_TYPES +from horde_sdk.generation_parameters.alchemy.consts import KNOWN_ALCHEMY_TYPES class HordeTestException(Exception): diff --git a/tests/ai_horde_api/test_ai_horde_api_calls.py b/tests/ai_horde_api/test_ai_horde_api_calls.py index 5eaa54bc..2fefb103 100644 --- a/tests/ai_horde_api/test_ai_horde_api_calls.py +++ b/tests/ai_horde_api/test_ai_horde_api_calls.py @@ -18,7 +18,8 @@ ImageGenerateAsyncResponse, ImageGenerateStatusResponse, ) -from horde_sdk.ai_horde_api.consts import WORKER_TYPE +from horde_sdk.ai_horde_api.consts import AI_HORDE_WORKER_TYPES +from horde_sdk.consts import WORKER_TYPE from horde_sdk.generic_api.apimodels import RequestErrorResponse from horde_sdk.generic_api.utils.swagger import SwaggerDoc @@ -111,7 +112,7 @@ def test_workers_all(self) -> None: pytest.fail(f"API Response was an error: {api_response_image.message}") assert isinstance(api_response_image, AllWorkersDetailsResponse) - assert all(worker.type_ == WORKER_TYPE.image for worker in api_response_image.root) + assert all(worker.type_ == AI_HORDE_WORKER_TYPES.image for worker in api_response_image.root) api_request_text = AllWorkersDetailsRequest(type=WORKER_TYPE.text) api_response_text = client.submit_request( @@ -123,7 +124,7 @@ def test_workers_all(self) -> None: pytest.fail(f"API Response was an error: {api_response_text.message}") assert isinstance(api_response_text, AllWorkersDetailsResponse) - assert all(worker.type_ == WORKER_TYPE.text for worker in api_response_text.root) + assert all(worker.type_ == AI_HORDE_WORKER_TYPES.text for worker in api_response_text.root) api_request_interrogation = AllWorkersDetailsRequest(type=WORKER_TYPE.interrogation) api_response_interrogation = client.submit_request( diff --git a/tests/ai_horde_api/test_ai_horde_api_models.py b/tests/ai_horde_api/test_ai_horde_api_models.py index 5034bca4..1a820416 100644 --- a/tests/ai_horde_api/test_ai_horde_api_models.py +++ b/tests/ai_horde_api/test_ai_horde_api_models.py @@ -10,42 +10,36 @@ import pytest from horde_sdk.ai_horde_api.apimodels import ( - KNOWN_ALCHEMY_TYPES, + AlchemyJobPopResponse, AlchemyPopFormPayload, - AlchemyPopResponse, - ImageGenerateAsyncResponse, -) -from horde_sdk.ai_horde_api.apimodels._find_user import ( + AllWorkersDetailsResponse, + ContributionsDetails, FindUserRequest, -) -from horde_sdk.ai_horde_api.apimodels._users import ContributionsDetails, UsageDetails, UserDetailsResponse -from horde_sdk.ai_horde_api.apimodels.base import GenMetadataEntry -from horde_sdk.ai_horde_api.apimodels.generate._async import ( + GenMetadataEntry, ImageGenerateAsyncRequest, - ImageGenerationInputPayload, -) -from horde_sdk.ai_horde_api.apimodels.generate._pop import ( + ImageGenerateAsyncResponse, ImageGenerateJobPopPayload, ImageGenerateJobPopResponse, ImageGenerateJobPopSkippedStatus, -) -from horde_sdk.ai_horde_api.apimodels.workers._workers import ( - AllWorkersDetailsResponse, + ImageGenerationInputPayload, TeamDetailsLite, + UsageDetails, + UserDetailsResponse, WorkerDetailItem, WorkerKudosDetails, ) from horde_sdk.ai_horde_api.consts import ( - KNOWN_CONTROLNETS, - KNOWN_FACEFIXERS, - KNOWN_SAMPLERS, - KNOWN_SOURCE_PROCESSING, - KNOWN_UPSCALERS, + AI_HORDE_WORKER_TYPES, METADATA_TYPE, METADATA_VALUE, - WORKER_TYPE, ) -from horde_sdk.ai_horde_api.fields import JobID +from horde_sdk.ai_horde_api.fields import GenerationID +from horde_sdk.generation_parameters.alchemy.consts import KNOWN_ALCHEMY_TYPES, KNOWN_FACEFIXERS, KNOWN_UPSCALERS +from horde_sdk.generation_parameters.image.consts import ( + KNOWN_IMAGE_CONTROLNETS, + KNOWN_IMAGE_SAMPLERS, + KNOWN_IMAGE_SOURCE_PROCESSING, +) def test_api_endpoint() -> None: @@ -61,7 +55,7 @@ def test_ImageGenerateAsyncRequest(ai_horde_api_key: str) -> None: prompt="test prompt", params=ImageGenerationInputPayload( # sampler_name="DDIM", - sampler_name=KNOWN_SAMPLERS.DDIM, + sampler_name=KNOWN_IMAGE_SAMPLERS.DDIM, cfg_scale=7.5, denoising_strength=1, seed="123456789", @@ -90,7 +84,7 @@ def test_ImageGenerateAsyncRequest(ai_horde_api_key: str) -> None: workers=[], censor_nsfw=False, source_image="test source image (usually base64)", - source_processing=KNOWN_SOURCE_PROCESSING.txt2img, + source_processing=KNOWN_IMAGE_SOURCE_PROCESSING.txt2img, source_mask="test source mask (usually base64)", r2=True, shared=False, @@ -130,7 +124,7 @@ def test_ImageGenerateAsyncRequest(ai_horde_api_key: str) -> None: assert test_async_request.workers == [] assert test_async_request.censor_nsfw is False assert test_async_request.source_image == "test source image (usually base64)" - assert test_async_request.source_processing == KNOWN_SOURCE_PROCESSING.txt2img + assert test_async_request.source_processing == KNOWN_IMAGE_SOURCE_PROCESSING.txt2img assert test_async_request.source_mask == "test source mask (usually base64)" assert test_async_request.r2 is True assert test_async_request.shared is False @@ -173,7 +167,7 @@ def test_ImageGenerateAsyncRequest_unknown_sampler(ai_horde_api_key: str) -> Non workers=[], censor_nsfw=False, source_image="test source image (usually base64)", - source_processing=KNOWN_SOURCE_PROCESSING.txt2img, + source_processing=KNOWN_IMAGE_SOURCE_PROCESSING.txt2img, source_mask="test source mask (usually base64)", r2=True, shared=False, @@ -206,7 +200,7 @@ def test_AllWorkersDetailsResponse() -> None: test_all_workers_details_response = AllWorkersDetailsResponse( root=[ WorkerDetailItem( - type=WORKER_TYPE.image, + type=AI_HORDE_WORKER_TYPES.image, name="test worker name", id="test worker id", online=True, @@ -244,7 +238,7 @@ def test_AllWorkersDetailsResponse() -> None: ), ], ) - assert test_all_workers_details_response[0].type_ == WORKER_TYPE.image + assert test_all_workers_details_response[0].type_ == AI_HORDE_WORKER_TYPES.image assert test_all_workers_details_response[0].name == "test worker name" assert test_all_workers_details_response[0].id_ == "test worker id" assert test_all_workers_details_response[0].online is True @@ -342,7 +336,7 @@ def test_ImageGenerateJobPopResponse() -> None: test_response = ImageGenerateJobPopResponse( id=None, - ids=[JobID(root=UUID("00000000-0000-0000-0000-000000000000"))], + ids=[GenerationID(root=UUID("00000000-0000-0000-0000-000000000000"))], payload=ImageGenerateJobPopPayload( post_processing=[KNOWN_UPSCALERS.RealESRGAN_x2plus], prompt="A cat in a hat", @@ -357,7 +351,7 @@ def test_ImageGenerateJobPopResponse() -> None: test_response = ImageGenerateJobPopResponse( id=None, - ids=[JobID(root=UUID("00000000-0000-0000-0000-000000000000"))], + ids=[GenerationID(root=UUID("00000000-0000-0000-0000-000000000000"))], payload=ImageGenerateJobPopPayload( prompt="A cat in a hat", ), @@ -370,7 +364,7 @@ def test_ImageGenerateJobPopResponse() -> None: test_response = ImageGenerateJobPopResponse( id=None, - ids=[JobID(root=UUID("00000000-0000-0000-0000-000000000000"))], + ids=[GenerationID(root=UUID("00000000-0000-0000-0000-000000000000"))], payload=ImageGenerateJobPopPayload( post_processing=[KNOWN_FACEFIXERS.CodeFormers], prompt="A cat in a hat", @@ -384,7 +378,7 @@ def test_ImageGenerateJobPopResponse() -> None: test_response = ImageGenerateJobPopResponse( id=None, - ids=[JobID(root=UUID("00000000-0000-0000-0000-000000000000"))], + ids=[GenerationID(root=UUID("00000000-0000-0000-0000-000000000000"))], payload=ImageGenerateJobPopPayload( post_processing=[KNOWN_FACEFIXERS.CodeFormers, KNOWN_UPSCALERS.RealESRGAN_x2plus], prompt="A cat in a hat", @@ -398,7 +392,7 @@ def test_ImageGenerateJobPopResponse() -> None: test_response = ImageGenerateJobPopResponse( id=None, - ids=[JobID(root=UUID("00000000-0000-0000-0000-000000000000"))], + ids=[GenerationID(root=UUID("00000000-0000-0000-0000-000000000000"))], payload=ImageGenerateJobPopPayload( post_processing=["unknown post processor"], control_type="unknown control type", @@ -410,10 +404,10 @@ def test_ImageGenerateJobPopResponse() -> None: ) test_response = ImageGenerateJobPopResponse( id=None, - ids=[JobID(root=UUID("00000000-0000-0000-0000-000000000000"))], + ids=[GenerationID(root=UUID("00000000-0000-0000-0000-000000000000"))], payload=ImageGenerateJobPopPayload( post_processing=["unknown post processor"], - control_type=KNOWN_CONTROLNETS.canny, + control_type=KNOWN_IMAGE_CONTROLNETS.canny, sampler_name="unknown sampler", prompt="A cat in a hat", ), @@ -421,7 +415,7 @@ def test_ImageGenerateJobPopResponse() -> None: ) test_response = ImageGenerateJobPopResponse( id=None, - ids=[JobID(root=UUID("00000000-0000-0000-0000-000000000000"))], + ids=[GenerationID(root=UUID("00000000-0000-0000-0000-000000000000"))], payload=ImageGenerateJobPopPayload( post_processing=["unknown post processor"], control_type="canny", @@ -433,7 +427,7 @@ def test_ImageGenerateJobPopResponse() -> None: test_response = ImageGenerateJobPopResponse( id=None, - ids=[JobID(root=UUID("00000000-0000-0000-0000-000000000000"))], + ids=[GenerationID(root=UUID("00000000-0000-0000-0000-000000000000"))], payload=ImageGenerateJobPopPayload( post_processing=["4x_AnimeSharp"], prompt="A cat in a hat", @@ -443,13 +437,13 @@ def test_ImageGenerateJobPopResponse() -> None: ) assert all( - post_processor in KNOWN_UPSCALERS._value2member_map_ + post_processor in KNOWN_UPSCALERS.__members__ or post_processor in KNOWN_UPSCALERS.__members__.values() for post_processor in test_response.payload.post_processing ) test_response = ImageGenerateJobPopResponse( id=None, - ids=[JobID(root=UUID("00000000-0000-0000-0000-000000000000"))], + ids=[GenerationID(root=UUID("00000000-0000-0000-0000-000000000000"))], payload=ImageGenerateJobPopPayload( post_processing=[KNOWN_UPSCALERS.four_4x_AnimeSharp], prompt="A cat in a hat", @@ -458,13 +452,16 @@ def test_ImageGenerateJobPopResponse() -> None: skipped=ImageGenerateJobPopSkippedStatus(), ) - assert all(post_processor in KNOWN_UPSCALERS for post_processor in test_response.payload.post_processing) + assert all( + post_processor in KNOWN_UPSCALERS.__members__ or post_processor in KNOWN_UPSCALERS.__members__.values() + for post_processor in test_response.payload.post_processing + ) test_response = ImageGenerateJobPopResponse( ids=[ - JobID(root=UUID("00000000-0000-0000-0000-000000000001")), - JobID(root=UUID("00000000-0000-0000-0000-000000000002")), - JobID(root=UUID("00000000-0000-0000-0000-000000000000")), + GenerationID(root=UUID("00000000-0000-0000-0000-000000000001")), + GenerationID(root=UUID("00000000-0000-0000-0000-000000000002")), + GenerationID(root=UUID("00000000-0000-0000-0000-000000000000")), ], payload=ImageGenerateJobPopPayload( prompt="A cat in a hat", @@ -479,22 +476,25 @@ def test_ImageGenerateJobPopResponse() -> None: ) assert test_response.ids_present - assert test_response.ids == [ - JobID(root=UUID("00000000-0000-0000-0000-000000000000")), - JobID(root=UUID("00000000-0000-0000-0000-000000000001")), - JobID(root=UUID("00000000-0000-0000-0000-000000000002")), - ] - assert test_response.r2_uploads == [ + assert GenerationID(root=UUID("00000000-0000-0000-0000-000000000000")) in test_response.ids + assert GenerationID(root=UUID("00000000-0000-0000-0000-000000000001")) in test_response.ids + assert GenerationID(root=UUID("00000000-0000-0000-0000-000000000002")) in test_response.ids + + uploads = [ "https://abbaabbaabbaabbaabbaabbaabbaabba.r2.cloudflarestorage.com/horde-transient/00000000-0000-0000-0000-000000000000.webp?AWSAccessKeyId=deadbeefdeadbeefdeadbeefdeadbeef&Signature=345567dfakes2ignature%3D&Expires=1727390285", "https://abbaabbaabbaabbaabbaabbaabbaabba.r2.cloudflarestorage.com/horde-transient/00000000-0000-0000-0000-000000000001.webp?AWSAccessKeyId=deadbeefdeadbeefdeadbeefdeadbeef&Signature=zxcbvfakesignature%3D&Expires=1727390285", "https://abbaabbaabbaabbaabbaabbaabbaabba.r2.cloudflarestorage.com/horde-transient/00000000-0000-0000-0000-000000000002.webp?AWSAccessKeyId=deadbeefdeadbeefdeadbeefdeadbeef&Signature=asdfg32fakesignature%3D&Expires=1727390285", ] + assert test_response.r2_uploads is not None + for upload in test_response.r2_uploads: + assert upload in uploads + def test_ImageGenerateJobPopResponse_hashability() -> None: test_response_ids = ImageGenerateJobPopResponse( id=None, - ids=[JobID(root=UUID("00000000-0000-0000-0000-000000000000"))], + ids=[GenerationID(root=UUID("00000000-0000-0000-0000-000000000000"))], payload=ImageGenerateJobPopPayload( post_processing=[KNOWN_UPSCALERS.RealESRGAN_x2plus], prompt="A cat in a hat", @@ -506,7 +506,7 @@ def test_ImageGenerateJobPopResponse_hashability() -> None: test_response_ids_copy = ImageGenerateJobPopResponse( id=None, - ids=[JobID(root=UUID("00000000-0000-0000-0000-000000000000"))], + ids=[GenerationID(root=UUID("00000000-0000-0000-0000-000000000000"))], payload=ImageGenerateJobPopPayload( post_processing=[KNOWN_UPSCALERS.RealESRGAN_x2plus], prompt="A cat in a hat", @@ -518,7 +518,7 @@ def test_ImageGenerateJobPopResponse_hashability() -> None: test_response2_ids = ImageGenerateJobPopResponse( id=None, - ids=[JobID(root=UUID("00000000-0000-0000-0000-000000000001"))], + ids=[GenerationID(root=UUID("00000000-0000-0000-0000-000000000001"))], payload=ImageGenerateJobPopPayload( post_processing=[KNOWN_UPSCALERS.RealESRGAN_x2plus], prompt="A cat in a hat", @@ -541,7 +541,7 @@ def test_ImageGenerateJobPopResponse_hashability() -> None: assert test_response2_ids in combined_container test_response_no_ids = ImageGenerateJobPopResponse( - id=JobID(root=UUID("00000000-0000-0000-0000-000000000000")), + id=GenerationID(root=UUID("00000000-0000-0000-0000-000000000000")), ids=[], payload=ImageGenerateJobPopPayload( post_processing=[KNOWN_UPSCALERS.RealESRGAN_x2plus], @@ -552,7 +552,7 @@ def test_ImageGenerateJobPopResponse_hashability() -> None: ) test_response_no_ids2 = ImageGenerateJobPopResponse( - id=JobID(root=UUID("00000000-0000-0000-0000-000000000001")), + id=GenerationID(root=UUID("00000000-0000-0000-0000-000000000001")), ids=[], payload=ImageGenerateJobPopPayload( post_processing=[KNOWN_UPSCALERS.RealESRGAN_x2plus], @@ -577,8 +577,8 @@ def test_ImageGenerateJobPopResponse_hashability() -> None: test_response_multiple_ids = ImageGenerateJobPopResponse( id=None, ids=[ - JobID(root=UUID("00000000-0000-0000-0000-000000000000")), - JobID(root=UUID("00000000-0000-0000-0000-000000000001")), + GenerationID(root=UUID("00000000-0000-0000-0000-000000000000")), + GenerationID(root=UUID("00000000-0000-0000-0000-000000000001")), ], payload=ImageGenerateJobPopPayload( post_processing=[KNOWN_UPSCALERS.RealESRGAN_x2plus], @@ -592,8 +592,8 @@ def test_ImageGenerateJobPopResponse_hashability() -> None: test_response_multiple_ids_copy = ImageGenerateJobPopResponse( id=None, ids=[ - JobID(root=UUID("00000000-0000-0000-0000-000000000001")), - JobID(root=UUID("00000000-0000-0000-0000-000000000000")), + GenerationID(root=UUID("00000000-0000-0000-0000-000000000001")), + GenerationID(root=UUID("00000000-0000-0000-0000-000000000000")), ], payload=ImageGenerateJobPopPayload( post_processing=[KNOWN_UPSCALERS.RealESRGAN_x2plus], @@ -607,8 +607,8 @@ def test_ImageGenerateJobPopResponse_hashability() -> None: test_response_multiple_ids_2 = ImageGenerateJobPopResponse( id=None, ids=[ - JobID(root=UUID("00000000-0000-0000-0000-000000000002")), - JobID(root=UUID("00000000-0000-0000-0000-000000000003")), + GenerationID(root=UUID("00000000-0000-0000-0000-000000000002")), + GenerationID(root=UUID("00000000-0000-0000-0000-000000000003")), ], payload=ImageGenerateJobPopPayload( post_processing=[KNOWN_UPSCALERS.RealESRGAN_x2plus], @@ -636,7 +636,7 @@ async def test_ImageGenerateJobPop_download_addtl_data() -> None: test_response = ImageGenerateJobPopResponse( id=None, - ids=[JobID(root=UUID("00000000-0000-0000-0000-000000000000"))], + ids=[GenerationID(root=UUID("00000000-0000-0000-0000-000000000000"))], payload=ImageGenerateJobPopPayload( post_processing=[KNOWN_UPSCALERS.RealESRGAN_x2plus], prompt="A cat in a hat", @@ -689,11 +689,11 @@ async def test_ImageGenerateJobPop_download_addtl_data() -> None: assert downloaded_extra_source_images[1].strength == 2.0 -def test_AlchemyPopResponse() -> None: - test_alchemy_pop_response = AlchemyPopResponse( +def test_AlchemyJobPopResponse() -> None: + test_alchemy_pop_response = AlchemyJobPopResponse( forms=[ AlchemyPopFormPayload( - id=JobID(root=UUID("00000000-0000-0000-0000-000000000000")), + id=GenerationID(root=UUID("00000000-0000-0000-0000-000000000000")), form=KNOWN_ALCHEMY_TYPES.RealESRGAN_x2plus, r2_upload="r2 download link", source_image="r2 download link", @@ -702,7 +702,7 @@ def test_AlchemyPopResponse() -> None: ) assert test_alchemy_pop_response.forms is not None - assert test_alchemy_pop_response.forms[0].id_ == JobID(root=UUID("00000000-0000-0000-0000-000000000000")) + assert test_alchemy_pop_response.forms[0].id_ == GenerationID(root=UUID("00000000-0000-0000-0000-000000000000")) assert test_alchemy_pop_response.forms[0].form == KNOWN_ALCHEMY_TYPES.RealESRGAN_x2plus assert test_alchemy_pop_response.forms[0].r2_upload == "r2 download link" assert test_alchemy_pop_response.forms[0].source_image == "r2 download link" @@ -711,16 +711,16 @@ def test_AlchemyPopResponse() -> None: assert test_alchemy_pop_response in container - test_alchemy_pop_response_multiple_forms = AlchemyPopResponse( + test_alchemy_pop_response_multiple_forms = AlchemyJobPopResponse( forms=[ AlchemyPopFormPayload( - id=JobID(root=UUID("00000000-0000-0000-0000-000000000010")), + id=GenerationID(root=UUID("00000000-0000-0000-0000-000000000010")), form=KNOWN_ALCHEMY_TYPES.RealESRGAN_x2plus, r2_upload="r2 download link", source_image="r2 download link", ), AlchemyPopFormPayload( - id=JobID(root=UUID("00000000-0000-0000-0000-000000000020")), + id=GenerationID(root=UUID("00000000-0000-0000-0000-000000000020")), form=KNOWN_ALCHEMY_TYPES.fourx_AnimeSharp, r2_upload="r2 download link", source_image="r2 download link", @@ -728,16 +728,16 @@ def test_AlchemyPopResponse() -> None: ], ) - test_alchemy_pop_response_multiple_forms_copy = AlchemyPopResponse( + test_alchemy_pop_response_multiple_forms_copy = AlchemyJobPopResponse( forms=[ AlchemyPopFormPayload( - id=JobID(root=UUID("00000000-0000-0000-0000-000000000020")), + id=GenerationID(root=UUID("00000000-0000-0000-0000-000000000020")), form=KNOWN_ALCHEMY_TYPES.RealESRGAN_x2plus, r2_upload="r2 download link", source_image="r2 download link", ), AlchemyPopFormPayload( - id=JobID(root=UUID("00000000-0000-0000-0000-000000000010")), + id=GenerationID(root=UUID("00000000-0000-0000-0000-000000000010")), form=KNOWN_ALCHEMY_TYPES.fourx_AnimeSharp, r2_upload="r2 download link", source_image="r2 download link", diff --git a/tests/ai_horde_api/test_ai_horde_generate_api_calls.py b/tests/ai_horde_api/test_ai_horde_generate_api_calls.py index 1daaa707..e83a7737 100644 --- a/tests/ai_horde_api/test_ai_horde_generate_api_calls.py +++ b/tests/ai_horde_api/test_ai_horde_generate_api_calls.py @@ -11,7 +11,6 @@ AIHordeAPISimpleClient, ) from horde_sdk.ai_horde_api.apimodels import ( - KNOWN_ALCHEMY_TYPES, AlchemyAsyncRequest, AlchemyAsyncRequestFormItem, AlchemyStatusResponse, @@ -24,13 +23,16 @@ ) from horde_sdk.ai_horde_api.apimodels.base import ExtraSourceImageEntry from horde_sdk.ai_horde_api.consts import ( + POST_PROCESSOR_ORDER_TYPE, +) +from horde_sdk.ai_horde_api.fields import GenerationID +from horde_sdk.generation_parameters.alchemy.consts import ( + KNOWN_ALCHEMY_TYPES, KNOWN_FACEFIXERS, KNOWN_MISC_POST_PROCESSORS, - KNOWN_SOURCE_PROCESSING, KNOWN_UPSCALERS, - POST_PROCESSOR_ORDER_TYPE, ) -from horde_sdk.ai_horde_api.fields import JobID +from horde_sdk.generation_parameters.image.consts import KNOWN_IMAGE_SOURCE_PROCESSING from horde_sdk.generic_api.apimodels import RequestErrorResponse @@ -49,11 +51,11 @@ async def submit_request( ) -> ImageGenerateAsyncResponse | RequestErrorResponse | None: await asyncio.sleep(delay) async with AIHordeAPIAsyncClientSession(aiohttp_session) as horde_session: - api_response: ImageGenerateAsyncResponse | RequestErrorResponse = ( - await horde_session.submit_request( - simple_image_gen_request, - simple_image_gen_request.get_default_success_response_type(), - ) + api_response: ( + ImageGenerateAsyncResponse | RequestErrorResponse + ) = await horde_session.submit_request( + simple_image_gen_request, + simple_image_gen_request.get_default_success_response_type(), ) return api_response return None @@ -75,7 +77,7 @@ def test_simple_client_image_generate( """Test that a simple image generation request can be submitted and cancelled.""" simple_client = AIHordeAPISimpleClient() - image_generate_status_respons, job_id = simple_client.image_generate_request(simple_image_gen_request) + image_generate_status_respons, _gen_id = simple_client.image_generate_request(simple_image_gen_request) if isinstance(image_generate_status_respons.generations, RequestErrorResponse): raise AssertionError(image_generate_status_respons.generations.message) @@ -104,7 +106,7 @@ def test_simple_client_image_generate_with_post_process( models=["Deliberate"], ) - image_generate_status_respons, job_id = simple_client.image_generate_request(pp_image_gen_request) + image_generate_status_respons, _gen_id = simple_client.image_generate_request(pp_image_gen_request) if isinstance(image_generate_status_respons.generations, RequestErrorResponse): raise AssertionError(image_generate_status_respons.generations.message) @@ -138,7 +140,7 @@ def test_simple_client_image_generate_with_post_process_costly_order( models=["Deliberate"], ) - image_generate_status_respons, job_id = simple_client.image_generate_request(pp_image_gen_request) + image_generate_status_respons, _gen_id = simple_client.image_generate_request(pp_image_gen_request) if isinstance(image_generate_status_respons.generations, RequestErrorResponse): raise AssertionError(image_generate_status_respons.generations.message) @@ -172,7 +174,7 @@ def test_simple_client_image_generate_with_post_process_fix_costly_order( models=["Deliberate"], ) - image_generate_status_respons, job_id = simple_client.image_generate_request(pp_image_gen_request) + image_generate_status_respons, _gen_id = simple_client.image_generate_request(pp_image_gen_request) if isinstance(image_generate_status_respons.generations, RequestErrorResponse): raise AssertionError(image_generate_status_respons.generations.message) @@ -189,7 +191,7 @@ def test_simple_client_image_generate_no_apikey_specified( """Test that a simple image generation request can be submitted and cancelled when no API key is specified.""" simple_client = AIHordeAPISimpleClient() - image_generate_status_respons, job_id = simple_client.image_generate_request( + image_generate_status_respons, _gen_id = simple_client.image_generate_request( ImageGenerateAsyncRequest( prompt="a cat in a hat", params=ImageGenerationInputPayload( @@ -225,7 +227,7 @@ def test_simple_client_image_generate_loras( simple_client = AIHordeAPISimpleClient() - image_generate_status_respons, job_id = simple_client.image_generate_request( + image_generate_status_respons, _gen_id = simple_client.image_generate_request( lora_image_gen_request, ) @@ -244,7 +246,7 @@ async def test_simple_client_async_image_generate( async with aiohttp.ClientSession() as aiohttp_session: simple_client = AIHordeAPIAsyncSimpleClient(aiohttp_session) - image_generate_status_respons, job_id = await simple_client.image_generate_request( + image_generate_status_respons, _gen_id = await simple_client.image_generate_request( simple_image_gen_request, ) @@ -261,7 +263,7 @@ def test_simple_client_image_generate_multiple_n( """Test that a batch of image generation requests can be submitted and cancelled.""" simple_client = AIHordeAPISimpleClient() - image_generate_status_respons, job_id = simple_client.image_generate_request(simple_image_gen_n_requests) + image_generate_status_respons, _gen_id = simple_client.image_generate_request(simple_image_gen_n_requests) assert simple_image_gen_n_requests.params is not None assert len(image_generate_status_respons.generations) == simple_image_gen_n_requests.params.n @@ -278,7 +280,7 @@ def test_simple_client_image_generate_multiple_requests( simple_client = AIHordeAPISimpleClient() for _ in range(5): - image_generate_status_respons, job_id = simple_client.image_generate_request(simple_image_gen_request) + image_generate_status_respons, _gen_id = simple_client.image_generate_request(simple_image_gen_request) if isinstance(image_generate_status_respons.generations, RequestErrorResponse): raise AssertionError(image_generate_status_respons.generations.message) @@ -295,7 +297,7 @@ def test_simple_client_alchemy_basic( ) -> None: simple_client = AIHordeAPISimpleClient() - result, jobid = simple_client.alchemy_request( + result, _GenerationID = simple_client.alchemy_request( alchemy_request=AlchemyAsyncRequest( forms=[ AlchemyAsyncRequestFormItem( @@ -322,7 +324,7 @@ async def test_simple_client_async_alchemy_basic_flood( simple_client = AIHordeAPIAsyncSimpleClient(aiohttp_session) async def submit_request() -> AlchemyStatusResponse: - result, jobid = await simple_client.alchemy_request( + result, _GenerationID = await simple_client.alchemy_request( alchemy_request=AlchemyAsyncRequest( forms=[ AlchemyAsyncRequestFormItem( @@ -357,7 +359,7 @@ async def test_simple_client_async_image_generate_multiple( async with aiohttp.ClientSession() as aiohttp_session: simple_client = AIHordeAPIAsyncSimpleClient(aiohttp_session) - image_generate_status_response, job_id = await simple_client.image_generate_request( + image_generate_status_response, _gen_id = await simple_client.image_generate_request( simple_image_gen_n_requests, ) @@ -381,7 +383,7 @@ async def test_simple_client_async_image_generate_multiple_with_timeout( async with aiohttp.ClientSession() as aiohttp_session: simple_client = AIHordeAPIAsyncSimpleClient(aiohttp_session) - image_generate_status_response, job_id = await simple_client.image_generate_request( + image_generate_status_response, _gen_id = await simple_client.image_generate_request( simple_image_gen_n_requests, timeout=7, # 7 seconds isn't (generally) going to be enough time for 3 generations to complete ) @@ -405,20 +407,20 @@ async def test_multiple_concurrent_async_requests_cancel_single_task( async with aiohttp.ClientSession() as aiohttp_session: simple_client = AIHordeAPIAsyncSimpleClient(aiohttp_session) - async def _submit_request(delay: int) -> tuple[ImageGenerateStatusResponse, JobID] | None: + async def _submit_request(delay: int) -> tuple[ImageGenerateStatusResponse, GenerationID] | None: try: await asyncio.sleep(delay) - image_generate_status_response, job_id = await simple_client.image_generate_request( + image_generate_status_response, gen_id = await simple_client.image_generate_request( simple_image_gen_request, timeout=-1, ) - return image_generate_status_response, job_id + return image_generate_status_response, gen_id except asyncio.CancelledError: return None # Run 5 concurrent requests using asyncio tasks = [asyncio.create_task(_submit_request(delay=delay)) for delay in range(5)] - all_generations: list[tuple[ImageGenerateStatusResponse, JobID] | None] = await asyncio.gather( + all_generations: list[tuple[ImageGenerateStatusResponse, GenerationID] | None] = await asyncio.gather( *tasks, self.delayed_cancel(tasks[0]), ) @@ -437,7 +439,7 @@ async def test_multiple_concurrent_async_requests_cancel_all_tasks( async def submit_request(delay: int) -> ImageGenerateStatusResponse | None: try: await asyncio.sleep(delay) - image_generate_status_response, job_id = await simple_client.image_generate_request( + image_generate_status_response, _gen_id = await simple_client.image_generate_request( simple_image_gen_request, timeout=-1, ) @@ -468,7 +470,7 @@ def example_callback(generation: ImageGenerateCheckResponse) -> None: print(f"Callback: {generation}") assert generation - image_generate_status_response, job_id = await simple_client.image_generate_request( + image_generate_status_response, _gen_id = await simple_client.image_generate_request( simple_image_gen_request, check_callback=example_callback, ) @@ -495,7 +497,7 @@ def check_callback(response: ImageGenerateCheckResponse) -> None: logger.debug(f"Response: {response}") raise KeyboardInterrupt("Test KeyboardInterrupt") - image_generate_status_response, job_id = await simple_client.image_generate_request( + _image_generate_status_response, _gen_id = await simple_client.image_generate_request( simple_image_gen_request, check_callback=check_callback, ) @@ -512,7 +514,7 @@ def example_callback(generation: AlchemyStatusResponse) -> None: print(f"Callback: {generation}") assert generation - result, jobid = await simple_client.alchemy_request( + result, _GenerationID = await simple_client.alchemy_request( alchemy_request=AlchemyAsyncRequest( forms=[ AlchemyAsyncRequestFormItem( @@ -538,7 +540,7 @@ def bad_callback() -> None: pass with pytest.raises(ValueError, match="Callback"): - image_generate_status_response, job_id = await simple_client.image_generate_request( + _image_generate_status_response, _gen_id = await simple_client.image_generate_request( simple_image_gen_request, check_callback=bad_callback, # type: ignore ) @@ -555,7 +557,7 @@ def bad_callback() -> None: pass with pytest.raises(ValueError, match="Callback"): - result, jobid = await simple_client.alchemy_request( + _result, _GenerationID = await simple_client.alchemy_request( alchemy_request=AlchemyAsyncRequest( forms=[ AlchemyAsyncRequestFormItem( @@ -594,7 +596,7 @@ async def test_remix( seed="1234", n=n, ), - source_processing=KNOWN_SOURCE_PROCESSING.remix, + source_processing=KNOWN_IMAGE_SOURCE_PROCESSING.remix, models=["Stable Cascade 1.0"], source_image=woman_headshot_testing_image_base64, extra_source_images=[ @@ -609,9 +611,9 @@ async def test_remix( assert len(response[0].generations) == n for generation in response[0].generations: - image, job_id = await simple_client.download_image_from_generation(generation) + image, gen_id = await simple_client.download_image_from_generation(generation) assert image is not None - image.save(f"tests/testing_result_images/remix_woman_default_{job_id}.webp") + image.save(f"tests/testing_result_images/remix_woman_default_{gen_id}.webp") response = await asyncio.create_task( simple_client.image_generate_request( @@ -623,7 +625,7 @@ async def test_remix( seed="1234", n=n, ), - source_processing=KNOWN_SOURCE_PROCESSING.remix, + source_processing=KNOWN_IMAGE_SOURCE_PROCESSING.remix, models=["Stable Cascade 1.0"], source_image=default_testing_image_base64, extra_source_images=[ @@ -638,6 +640,6 @@ async def test_remix( assert len(response[0].generations) == n for generation in response[0].generations: - image, job_id = await simple_client.download_image_from_generation(generation) + image, gen_id = await simple_client.download_image_from_generation(generation) assert image is not None - image.save(f"tests/testing_result_images/remix_default_woman_{job_id}.webp") + image.save(f"tests/testing_result_images/remix_default_woman_{gen_id}.webp") diff --git a/tests/ai_horde_api/test_ai_horde_generate_text_api_calls.py b/tests/ai_horde_api/test_ai_horde_generate_text_api_calls.py index 5877dd8c..50cafead 100644 --- a/tests/ai_horde_api/test_ai_horde_generate_text_api_calls.py +++ b/tests/ai_horde_api/test_ai_horde_generate_text_api_calls.py @@ -11,7 +11,7 @@ TextGenerateAsyncRequest, TextGenerateStatusResponse, ) -from horde_sdk.ai_horde_api.fields import JobID +from horde_sdk.ai_horde_api.fields import GenerationID class TestAIHordeTextGenerate: @@ -24,11 +24,11 @@ def test_text_generate(self) -> None: ], ) - response, job_id = simple_client.text_generate_request(request) + response, gen_id = simple_client.text_generate_request(request) - logger.debug(f"{job_id}: {response}") + logger.debug(f"{gen_id}: {response}") - assert isinstance(job_id, JobID) + assert isinstance(gen_id, GenerationID) assert isinstance(response, TextGenerateStatusResponse) assert len(response.generations) == 1 @@ -54,14 +54,14 @@ async def test_text_generate_async(self) -> None: ], ) - response, job_id = await simple_client.text_generate_request( + response, gen_id = await simple_client.text_generate_request( request, check_callback=lambda response: logger.debug(f"Response: {response}"), ) - logger.debug(f"{job_id}: {response}") + logger.debug(f"{gen_id}: {response}") - assert isinstance(job_id, JobID) + assert isinstance(gen_id, GenerationID) assert isinstance(response, TextGenerateStatusResponse) assert len(response.generations) == 1 diff --git a/tests/ai_horde_api/test_ai_horde_stats_api_calls.py b/tests/ai_horde_api/test_ai_horde_stats_api_calls.py index 8ce98e1d..6242e2c5 100644 --- a/tests/ai_horde_api/test_ai_horde_stats_api_calls.py +++ b/tests/ai_horde_api/test_ai_horde_stats_api_calls.py @@ -5,7 +5,7 @@ from horde_sdk.ai_horde_api.ai_horde_clients import ( AIHordeAPIAsyncClientSession, ) -from horde_sdk.ai_horde_api.apimodels._stats import ( +from horde_sdk.ai_horde_api.apimodels import ( ImageStatsModelsRequest, ImageStatsModelsResponse, ImageStatsModelsTotalRequest, diff --git a/tests/ai_horde_api/test_ai_horde_status_api_calls.py b/tests/ai_horde_api/test_ai_horde_status_api_calls.py index 14e7eee8..8e459bff 100644 --- a/tests/ai_horde_api/test_ai_horde_status_api_calls.py +++ b/tests/ai_horde_api/test_ai_horde_status_api_calls.py @@ -5,7 +5,7 @@ from horde_sdk.ai_horde_api.ai_horde_clients import ( AIHordeAPIAsyncClientSession, ) -from horde_sdk.ai_horde_api.apimodels._status import ( +from horde_sdk.ai_horde_api.apimodels import ( ActiveModel, AIHordeHeartbeatRequest, AIHordeHeartbeatResponse, diff --git a/tests/ai_horde_api/test_ai_worker_roundtrip_api_calls.py b/tests/ai_horde_api/test_ai_worker_roundtrip_api_calls.py index 2a6fb8d5..2de6b4f5 100644 --- a/tests/ai_horde_api/test_ai_worker_roundtrip_api_calls.py +++ b/tests/ai_horde_api/test_ai_worker_roundtrip_api_calls.py @@ -21,7 +21,7 @@ from horde_sdk.ai_horde_api.consts import ( GENERATION_STATE, ) -from horde_sdk.ai_horde_api.fields import JobID +from horde_sdk.ai_horde_api.fields import GenerationID class TestImageWorkerRoundtrip: @@ -159,10 +159,10 @@ async def test_basic_image_roundtrip(self, simple_image_gen_request: ImageGenera await asyncio.gather(image_gen_task, fake_worker_task) - image_gen_response, job_id = image_gen_task.result() + image_gen_response, gen_id = image_gen_task.result() assert isinstance(image_gen_response, ImageGenerateStatusResponse) - assert isinstance(job_id, JobID) + assert isinstance(gen_id, GenerationID) assert len(image_gen_response.generations) == 1 diff --git a/tests/ai_horde_api/test_dynamically_validate_against_swagger.py b/tests/ai_horde_api/test_dynamically_validate_against_swagger.py index 8d4726dd..5678209c 100644 --- a/tests/ai_horde_api/test_dynamically_validate_against_swagger.py +++ b/tests/ai_horde_api/test_dynamically_validate_against_swagger.py @@ -55,7 +55,6 @@ def get_fields_descriptions_and_types(class_type: type[BaseModel]) -> dict[str, return field_names_and_descriptions -@pytest.mark.object_verify def all_ai_horde_model_defs_in_swagger(swagger_doc: SwaggerDoc) -> None: """Ensure all models defined in ai_horde_api are defined in the swagger doc.""" all_request_types: list[type[HordeRequest]] = get_all_request_types(horde_sdk.ai_horde_api.apimodels.__name__) @@ -79,6 +78,8 @@ def all_ai_horde_model_defs_in_swagger(swagger_doc: SwaggerDoc) -> None: default_num_request_fields = len(HordeRequest.model_fields) + sdk_defined_endpoint_verbs: dict[str, list[HTTPMethod]] = {} + for request_type in all_request_types: endpoint_subpath: GENERIC_API_ENDPOINT_SUBPATH = request_type.get_api_endpoint_subpath() assert endpoint_subpath, f"Failed to get endpoint subpath for {request_type.__name__}" @@ -89,6 +90,11 @@ def all_ai_horde_model_defs_in_swagger(swagger_doc: SwaggerDoc) -> None: assert endpoint_subpath in swagger_doc.paths, f"Endpoint {endpoint_subpath} not found in the swagger" swagger_endpoint: SwaggerEndpoint = swagger_doc.paths[endpoint_subpath] + if endpoint_subpath not in sdk_defined_endpoint_verbs: + sdk_defined_endpoint_verbs[endpoint_subpath] = [] + + sdk_defined_endpoint_verbs[endpoint_subpath].append(request_type.get_http_method()) + # Check if the HTTP method used by the request type is defined in the Swagger documentation assert swagger_endpoint.get_endpoint_method_from_http_method(request_type.get_http_method()) is not None @@ -112,15 +118,15 @@ def all_ai_horde_model_defs_in_swagger(swagger_doc: SwaggerDoc) -> None: "Consider giving it a unique name on the API.", ) else: - assert ( - request_type.get_api_model_name() in swagger_defined_models - ), f"Model is defined in horde_sdk, but not in swagger: {request_type.get_api_model_name()}" + assert request_type.get_api_model_name() in swagger_defined_models, ( + f"Model is defined in horde_sdk, but not in swagger: {request_type.get_api_model_name()}" + ) assert endpoint_subpath in swagger_doc.paths, f"Missing {request_type.__name__} in swagger" - assert ( - endpoint_subpath in swagger_defined_payload_examples - ), f"Missing {request_type.__name__} in swagger examples" + assert endpoint_subpath in swagger_defined_payload_examples, ( + f"Missing {request_type.__name__} in swagger examples" + ) endpoint_http_status_code_responses: dict[HTTPStatusCode, dict[str, object] | list[Any]] | None | None = None @@ -149,18 +155,18 @@ def all_ai_horde_model_defs_in_swagger(swagger_doc: SwaggerDoc) -> None: for success_code in get_all_success_status_codes() if success_code in endpoint_http_status_code_responses ] - assert ( - len(endpoint_success_http_status_codes) > 0 - ), f"Failed to find any success status codes in {request_type.__name__}" + assert len(endpoint_success_http_status_codes) > 0, ( + f"Failed to find any success status codes in {request_type.__name__}" + ) for success_code in endpoint_success_http_status_codes: - assert ( - success_code in request_type.get_success_status_response_pairs() - ), f"Missing success response type for {request_type.__name__} with status code {success_code}" + assert success_code in request_type.get_success_status_response_pairs(), ( + f"Missing success response type for {request_type.__name__} with status code {success_code}" + ) else: - assert ( - request_type.get_default_success_response_type() is not None - ), f"Failed to get default success response type for {request_type.__name__}" + assert request_type.get_default_success_response_type() is not None, ( + f"Failed to get default success response type for {request_type.__name__}" + ) api_to_sdk_response_model_map[endpoint_subpath] = request_type.get_success_status_response_pairs() @@ -172,6 +178,31 @@ def all_ai_horde_model_defs_in_swagger(swagger_doc: SwaggerDoc) -> None: response_field_dict = get_fields_descriptions_and_types(response_type) response_field_names_and_descriptions[response_type.__name__] = response_field_dict + endpoint_verbs_missing_from_sdk: dict[str, list[HTTPMethod]] = {} + endpoint_verbs_missing_from_swagger: dict[str, list[HTTPMethod]] = {} + + for sdk_endpoint_subpath, sdk_endpoint_verbs in sdk_defined_endpoint_verbs.items(): + all_swagger_endpoint_verbs = swagger_doc.get_all_verbs_for_endpoint(sdk_endpoint_subpath) + + # Identify verbs missing from SDK + missing_from_sdk = [verb for verb in all_swagger_endpoint_verbs if verb not in sdk_endpoint_verbs] + if missing_from_sdk: + endpoint_verbs_missing_from_sdk[sdk_endpoint_subpath] = missing_from_sdk + + # Identify verbs missing from Swagger + missing_from_swagger = [verb for verb in sdk_endpoint_verbs if verb not in all_swagger_endpoint_verbs] + if missing_from_swagger: + endpoint_verbs_missing_from_swagger[sdk_endpoint_subpath] = missing_from_swagger + + assert not endpoint_verbs_missing_from_sdk, ( + "The following endpoints are defined in the Swagger documentation but not in the SDK: " + f"{endpoint_verbs_missing_from_sdk}" + ) + assert not endpoint_verbs_missing_from_swagger, ( + "The following endpoints are defined in the SDK but not in the Swagger documentation: " + f"{endpoint_verbs_missing_from_swagger}" + ) + def json_serializer(obj: object) -> object: if isinstance(obj, str): return obj @@ -180,19 +211,19 @@ def json_serializer(obj: object) -> object: return obj.__module__ + "." + obj.__name__ raise TypeError(f"Object of type {type(obj)} is not JSON serializable") - with open("docs/api_to_sdk_payload_map.json", "w") as f: + with open("docs/ai-horde/api_to_sdk_payload_map.json", "w") as f: f.write(json.dumps(api_to_sdk_payload_model_map, indent=4, default=json_serializer)) f.write("\n") - with open("docs/api_to_sdk_response_map.json", "w") as f: + with open("docs/ai-horde/api_to_sdk_response_map.json", "w") as f: f.write(json.dumps(api_to_sdk_response_model_map, indent=4, default=json_serializer)) f.write("\n") - with open("docs/request_field_names_and_descriptions.json", "w") as f: + with open("docs/ai-horde/request_field_names_and_descriptions.json", "w") as f: f.write(json.dumps(request_field_names_and_descriptions, indent=4, default=json_serializer)) f.write("\n") - with open("docs/response_field_names_and_descriptions.json", "w") as f: + with open("docs/ai-horde/response_field_names_and_descriptions.json", "w") as f: f.write(json.dumps(response_field_names_and_descriptions, indent=4, default=json_serializer)) f.write("\n") diff --git a/tests/ai_horde_api/test_shared_keys_api_calls.py b/tests/ai_horde_api/test_shared_keys_api_calls.py new file mode 100644 index 00000000..86007d57 --- /dev/null +++ b/tests/ai_horde_api/test_shared_keys_api_calls.py @@ -0,0 +1,21 @@ +def test_check_redis_cache_refreshed_on_shared_key_modified() -> None: + """From the PR description: + Redis cache is now refreshed when User is modified. + + Problem: + After creating or deleting a shared key, it is not immediately reflected on /get_user endpoint as it serves + now stale cached version. + + This adds functionality to bust the user cache - both by id and by api_key - ensuring the cached results + are always fresh. + + How to test: + call GET /api/v2/find_user, you should see [] under sharedkey_ids + call GET /api/v2/users/:user_id, you should see [] under sharedkey_ids + call PUT /api/v2/sharedkeys with body {} + call GET /api/v2/find_user again, you should see ['5d81849a...'] under sharedkey_ids + call GET /api/v2/users/:user_id again, you should also see ['5d81849a...'] under sharedkey_ids + call DELETE /api/v2/sharedkeys/:keyId + call GET /api/v2/find_user, you should see [] under sharedkey_ids + call GET /api/v2/users/:user_id, you should see [] under sharedkey_ids + """ diff --git a/tests/ai_horde_api/test_utils.py b/tests/ai_horde_api/test_utils.py new file mode 100644 index 00000000..32fe4e49 --- /dev/null +++ b/tests/ai_horde_api/test_utils.py @@ -0,0 +1,52 @@ +from horde_sdk.utils import seed_to_int + + +class TestSeedToInt: + def test_convert_integer_seed_to_integer(self) -> None: + """Test converting an integer seed to an integer.""" + seed = 12345 + result = seed_to_int(seed) + assert result == seed, f"Expected {seed}, got {result}" + + def test_handle_non_digit_string_seed(self) -> None: + """Handle non-digit string seed.""" + seed = "abc" + result = seed_to_int(seed) + expected = abs(int.from_bytes(seed.encode(), "little")) % (2**32) + assert result == expected, f"Expected {expected}, got {result}" + + def test_return_random_integer_when_seed_is_none(self) -> None: + """Return random integer when seed is None.""" + result = seed_to_int(None) + assert isinstance(result, int), f"Expected an integer, got {type(result)}" + + def test_random_integer_for_empty_string_seed(self) -> None: + """Return random integer when seed is an empty string.""" + result = seed_to_int("") + assert isinstance(result, int), f"Expected an integer, got {type(result)}" + + def test_negative_integer_seed(self) -> None: + """Handle negative integer seed.""" + seed = -12345 + result = seed_to_int(seed) + assert result == seed, f"Expected {seed}, got {result}" + + def test_consistent_output_for_same_string_seed(self) -> None: + """Ensure consistent output for same string seed.""" + seed = "test_seed" + result1 = seed_to_int(seed) + result2 = seed_to_int(seed) + assert result1 == result2, f"Expected consistent output, got {result1} and {result2}" + + def test_maximum_integer_seed(self) -> None: + """Test with maximum integer value.""" + max_int = (2**32) - 1 + result = seed_to_int(max_int) + assert result == max_int, f"Expected {max_int}, got {result}" + + def test_arbitrary_string_seed(self) -> None: + """Test with an arbitrary string seed.""" + seed = "test_seed" + result = seed_to_int(seed) + assert isinstance(result, int), f"Expected an integer, got {type(result)}" + assert result == 1953719668, f"Expected 1953719668, got {result}" diff --git a/tests/ai_horde_worker/__init__.py b/tests/ai_horde_worker/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/tests/ai_horde_worker/test_model_meta_api_calls.py b/tests/ai_horde_worker/test_model_meta_api_calls.py index 34c9d5dd..01c8e0e0 100644 --- a/tests/ai_horde_worker/test_model_meta_api_calls.py +++ b/tests/ai_horde_worker/test_model_meta_api_calls.py @@ -1,10 +1,9 @@ import pytest -from horde_model_reference.model_reference_manager import ModelReferenceManager from horde_sdk.ai_horde_api.ai_horde_clients import AIHordeAPIManualClient from horde_sdk.ai_horde_api.apimodels import ImageStatsModelsRequest, ImageStatsModelsResponse, StatsModelsTimeframe -from horde_sdk.ai_horde_worker.model_meta import ImageModelLoadResolver from horde_sdk.generic_api.apimodels import RequestErrorResponse +from horde_sdk.worker.model_meta import ImageModelLoadResolver @pytest.fixture(scope="session") @@ -20,8 +19,18 @@ def stats_response() -> ImageStatsModelsResponse: @pytest.fixture(scope="session") -def image_model_load_resolver() -> ImageModelLoadResolver: - return ImageModelLoadResolver(ModelReferenceManager()) +def stats_response_known_models() -> ImageStatsModelsResponse: + client = AIHordeAPIManualClient() + + stats_response = client.submit_request( + ImageStatsModelsRequest(model_state="known"), + ImageStatsModelsResponse, + ) + + if isinstance(stats_response, RequestErrorResponse): + raise Exception(f"Request error: {stats_response.message}. object_data: {stats_response.object_data}") + + return stats_response def test_image_model_load_resolver_all(image_model_load_resolver: ImageModelLoadResolver) -> None: @@ -60,6 +69,24 @@ def test_image_model_load_resolver_all_without_large(image_model_load_resolver: assert len(all_model_names) > len(all_model_names_without_large) +def test_removed_model_not_in_top_or_all( + image_model_load_resolver: ImageModelLoadResolver, + stats_response_known_models: ImageStatsModelsResponse, +) -> None: + removed_model_name = "Realisian" + + all_model_names = image_model_load_resolver.resolve_all_model_names() + assert removed_model_name not in all_model_names + + resolved_model_names = image_model_load_resolver.resolve_top_n_model_names( + len(all_model_names), + stats_response_known_models, + timeframe=StatsModelsTimeframe.month, + ) + + assert removed_model_name not in resolved_model_names + + def test_image_model_load_resolver_top_n( image_model_load_resolver: ImageModelLoadResolver, stats_response: ImageStatsModelsResponse, diff --git a/tests/backend_parsing/__init__.py b/tests/backend_parsing/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/tests/backend_parsing/test_backend_mapper.py b/tests/backend_parsing/test_backend_mapper.py new file mode 100644 index 00000000..c91853b3 --- /dev/null +++ b/tests/backend_parsing/test_backend_mapper.py @@ -0,0 +1,67 @@ +from horde_sdk.backend_parsing.image.comfyui.hordelib import ( + KNOWN_COMFYUI_CONTROLNETS, + KNOWN_COMFYUI_IMAGE_SAMPLERS, + KNOWN_COMFYUI_IMAGE_SCHEDULERS, + ComfyUIBackendValuesMapper, +) +from horde_sdk.backend_parsing.object_models import ImageBackendValuesMapper +from horde_sdk.generation_parameters.image.consts import ( + KNOWN_IMAGE_CONTROLNETS, + KNOWN_IMAGE_SAMPLERS, + KNOWN_IMAGE_SCHEDULERS, +) + + +def test_comfyui_backend_values_mapper_init() -> None: + """Test the ComfyUIBackendValuesMapper class.""" + mapper = ComfyUIBackendValuesMapper() + assert isinstance(mapper, ImageBackendValuesMapper) + + +def test_comfyui_backend_values_mapper_accurate() -> None: + """Test the ComfyUIBackendValuesMapper class.""" + mapper = ComfyUIBackendValuesMapper() + assert isinstance(mapper._to_sdk_sampler_map, dict) + assert isinstance(mapper._to_backend_sampler_map, dict) + assert len(mapper._to_sdk_sampler_map) > 0 + assert len(mapper._to_backend_sampler_map) > 0 + + for sdk_sampler_key, sdk_sampler_value in mapper._to_sdk_sampler_map.items(): + assert isinstance(sdk_sampler_key, str) + assert isinstance(sdk_sampler_value, KNOWN_IMAGE_SAMPLERS) + + for backend_sampler_key, backend_sampler_value in mapper._to_backend_sampler_map.items(): + assert isinstance(backend_sampler_key, KNOWN_IMAGE_SAMPLERS) + assert isinstance(backend_sampler_value, str) + + assert len(mapper._to_sdk_sampler_map) == len(mapper._to_backend_sampler_map) + + assert mapper.map_to_backend_sampler(KNOWN_IMAGE_SAMPLERS.k_lms) == KNOWN_COMFYUI_IMAGE_SAMPLERS.lms + assert mapper.map_to_backend_sampler("k_lms") == KNOWN_COMFYUI_IMAGE_SAMPLERS.lms + assert mapper.map_to_backend_sampler("lms") == "lms" + assert mapper.map_to_backend_sampler("lms") == KNOWN_COMFYUI_IMAGE_SAMPLERS.lms + + assert mapper.map_to_backend_scheduler(KNOWN_IMAGE_SCHEDULERS.simple) == KNOWN_COMFYUI_IMAGE_SCHEDULERS.simple + assert mapper.map_to_backend_scheduler("simple") == KNOWN_COMFYUI_IMAGE_SCHEDULERS.simple + assert mapper.map_to_backend_scheduler("simple") == "simple" + assert ( + mapper.map_to_backend_scheduler(KNOWN_COMFYUI_IMAGE_SCHEDULERS.simple) == KNOWN_COMFYUI_IMAGE_SCHEDULERS.simple + ) + + assert mapper.map_to_backend_controlnet(KNOWN_IMAGE_CONTROLNETS.canny) == KNOWN_COMFYUI_CONTROLNETS.canny + assert mapper.map_to_backend_controlnet("canny") == KNOWN_COMFYUI_CONTROLNETS.canny + + assert mapper.map_to_sdk_sampler(KNOWN_COMFYUI_IMAGE_SAMPLERS.lms) == KNOWN_IMAGE_SAMPLERS.k_lms + assert mapper.map_to_sdk_sampler("lms") == KNOWN_IMAGE_SAMPLERS.k_lms + assert mapper.map_to_sdk_sampler("k_lms") == "k_lms" + assert mapper.map_to_sdk_sampler(KNOWN_IMAGE_SAMPLERS.k_lms) == KNOWN_IMAGE_SAMPLERS.k_lms + + assert mapper.map_to_sdk_scheduler(KNOWN_COMFYUI_IMAGE_SCHEDULERS.simple) == KNOWN_IMAGE_SCHEDULERS.simple + assert mapper.map_to_sdk_scheduler("simple") == KNOWN_IMAGE_SCHEDULERS.simple + assert mapper.map_to_sdk_scheduler("simple") == "simple" + assert mapper.map_to_sdk_scheduler(KNOWN_IMAGE_SCHEDULERS.simple) == KNOWN_IMAGE_SCHEDULERS.simple + + assert mapper.map_to_sdk_controlnet(KNOWN_COMFYUI_CONTROLNETS.canny) == KNOWN_IMAGE_CONTROLNETS.canny + assert mapper.map_to_sdk_controlnet("canny") == KNOWN_IMAGE_CONTROLNETS.canny + assert mapper.map_to_sdk_controlnet("canny") == "canny" + assert mapper.map_to_sdk_controlnet(KNOWN_IMAGE_CONTROLNETS.canny) == "canny" diff --git a/tests/conftest.py b/tests/conftest.py index 1e348b21..edfc8330 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -1,19 +1,112 @@ import asyncio import base64 +import functools +import io import os import pathlib import sys +from collections.abc import Callable +from typing import Final +from uuid import UUID -import pytest -from loguru import logger +# We have to do these early so any other libraries use these settings os.environ["TESTS_ONGOING"] = "1" if sys.platform == "win32": asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy()) -from horde_sdk.ai_horde_api.apimodels import ImageGenerateAsyncRequest, ImageGenerationInputPayload + +import PIL.Image +import pytest +from horde_model_reference.meta_consts import KNOWN_IMAGE_GENERATION_BASELINE +from horde_model_reference.model_reference_manager import ModelReferenceManager +from loguru import logger + +from horde_sdk.ai_horde_api.apimodels import ( + AlchemyJobPopResponse, + AlchemyPopFormPayload, + ExtraSourceImageEntry, + ImageGenerateAsyncRequest, + ImageGenerateJobPopPayload, + ImageGenerateJobPopResponse, + ImageGenerateJobPopSkippedStatus, + ImageGenerationInputPayload, + ModelPayloadKobold, + NoValidAlchemyFound, + NoValidRequestFoundKobold, + TextGenerateJobPopResponse, +) +from horde_sdk.ai_horde_api.fields import GenerationID +from horde_sdk.consts import KNOWN_NSFW_DETECTOR +from horde_sdk.generation_parameters.alchemy.consts import KNOWN_ALCHEMY_FORMS, KNOWN_UPSCALERS +from horde_sdk.generation_parameters.alchemy.object_models import ( + AlchemyParameters, + NSFWAlchemyParameters, + UpscaleAlchemyParameters, +) +from horde_sdk.generation_parameters.image.consts import ( + KNOWN_IMAGE_SAMPLERS, + KNOWN_IMAGE_SCHEDULERS, + KNOWN_IMAGE_SOURCE_PROCESSING, +) +from horde_sdk.generation_parameters.image.object_models import ( + BasicImageGenerationParameters, + ImageGenerationParameters, +) +from horde_sdk.generation_parameters.text.object_models import BasicTextGenerationParameters, TextGenerationParameters from horde_sdk.generic_api.consts import ANON_API_KEY +from horde_sdk.worker.dispatch.ai_horde.image.convert import convert_image_job_pop_response_to_parameters +from horde_sdk.worker.generations import AlchemySingleGeneration, ImageSingleGeneration, TextSingleGeneration +from horde_sdk.worker.model_meta import ImageModelLoadResolver + +NETWORK_TEST_PATH_SUFFIXES: Final[set[str]] = { + "api_calls.py", +} + + +def pytest_addoption(parser: pytest.Parser) -> None: + """Register custom CLI options for the test suite.""" + + parser.addoption( + "--run-network-tests", + action="store_true", + default=False, + help="Run tests that require access to the live AI Horde API.", + ) + + +def _env_flag_enabled(value: str | None) -> bool: + """Return True when the provided environment flag string represents an enabled state.""" + + if value is None: + return False + + normalised = value.strip().lower() + return normalised in {"1", "true", "yes", "on"} + + +def _has_valid_api_key() -> bool: + """Return True when a non-anonymous AI Horde API key is available.""" + + dev_key = os.getenv("AI_HORDE_DEV_APIKEY") + if dev_key is None: + return False + + trimmed = dev_key.strip() + return bool(trimmed) and trimmed != ANON_API_KEY + + +def _should_run_network_tests(config: pytest.Config) -> bool: + """Return True when network-dependent tests should execute.""" + + if config.getoption("--run-network-tests"): + return _has_valid_api_key() + + if _env_flag_enabled(os.getenv("HORDE_SDK_RUN_NETWORK_TESTS")): + return _has_valid_api_key() + + return False @pytest.fixture(scope="session", autouse=True) @@ -39,13 +132,108 @@ def check_tests_ongoing_env_var() -> None: @pytest.fixture(scope="session") def ai_horde_api_key() -> str: + """Return the key being used for testing against an AI Horde API.""" dev_key = os.getenv("AI_HORDE_DEV_APIKEY", None) return dev_key if dev_key is not None else ANON_API_KEY +@pytest.fixture(scope="session") +def model_reference_manager() -> ModelReferenceManager: + return ModelReferenceManager() + + +@pytest.fixture(scope="session") +def image_model_load_resolver() -> ImageModelLoadResolver: + return ImageModelLoadResolver() + + +@functools.cache +def _get_testing_image(filename: str) -> bytes: + """Returns a test image.""" + + image_bytes = None + + # Get the directory of this file + dir_path = pathlib.Path(__file__).parent.absolute() + test_image_path = dir_path / "test_data" / "images" / filename + + with open(test_image_path, "rb") as f: + image_bytes = f.read() + + assert image_bytes is not None + + return image_bytes + + +@functools.cache +def _get_testing_image_base64(filename: str) -> str: + """Returns a base64 encoded test image.""" + return base64.b64encode(_get_testing_image(filename)).decode("utf-8") + + +@pytest.fixture(scope="function") +def default_testing_image_bytes() -> bytes: + """Returns a test image.""" + return _get_testing_image("haidra.png") + + +@pytest.fixture(scope="function") +def default_testing_image_PIL() -> PIL.Image.Image: + """Returns a test image.""" + return PIL.Image.open(io.BytesIO(_get_testing_image("haidra.png"))) + + +@pytest.fixture(scope="function") +def default_testing_image_base64() -> str: + """Returns a base64 encoded test image.""" + return _get_testing_image_base64("haidra.png") + + +@pytest.fixture(scope="function") +def img2img_testing_image_base64() -> str: + """Returns a base64 encoded test image.""" + return base64.b64encode(_get_testing_image("sketch-mountains-input.jpg")).decode("utf-8") + + +@pytest.fixture(scope="function") +def inpaint_source_image_and_mask_PIL() -> tuple[PIL.Image.Image, PIL.Image.Image]: + """Returns a test image and mask suitable for inpainting.""" + image = PIL.Image.open(io.BytesIO(_get_testing_image("test_inpaint_original.png"))) + mask = PIL.Image.open(io.BytesIO(_get_testing_image("test_inpaint_mask.png"))) + + return image, mask + + +@pytest.fixture(scope="function") +def inpainting_source_image_and_mask_base64() -> tuple[str, str]: + """Returns a base64 encoded test image and mask suitable for inpainting.""" + image = _get_testing_image_base64("test_inpaint_original.png") + mask = _get_testing_image_base64("test_inpaint_mask.png") + + return image, mask + + +@pytest.fixture(scope="function") +def outpaint_alpha_source_image_base64() -> str: + """Returns a base64 encoded test image suitable for outpainting.""" + return _get_testing_image_base64("test_outpaint.png") + + +@pytest.fixture(scope="function") +def openpose_control_map_base64() -> str: + """Returns a base64 encoded test image of a pre-processed control image.""" + return _get_testing_image_base64("test_openpose_control_map.png") + + +@pytest.fixture(scope="function") +def woman_headshot_testing_image_base64() -> str: + return base64.b64encode(_get_testing_image("woman_headshot_bokeh.png")).decode("utf-8") + + @pytest.fixture(scope="function") def simple_image_gen_request(ai_horde_api_key: str) -> ImageGenerateAsyncRequest: + """Return a simple `ImageGenerateAsyncRequest` instance with minimal arguments set.""" return ImageGenerateAsyncRequest( apikey=ai_horde_api_key, prompt="a cat in a hat", @@ -59,6 +247,7 @@ def simple_image_gen_request(ai_horde_api_key: str) -> ImageGenerateAsyncRequest @pytest.fixture(scope="function") def simple_image_gen_n_requests(ai_horde_api_key: str) -> ImageGenerateAsyncRequest: + """Return a simple `ImageGenerateAsyncRequest` instance with minimal arguments set, but with n==3.""" return ImageGenerateAsyncRequest( apikey=ai_horde_api_key, prompt="a cat in a hat", @@ -70,65 +259,826 @@ def simple_image_gen_n_requests(ai_horde_api_key: str) -> ImageGenerateAsyncRequ ) -def pytest_collection_modifyitems(items): # type: ignore # noqa - """Modifies test items to ensure test modules run in a given order.""" - MODULES_TO_RUN_FIRST = [ - "tests.tests_generic", - "tests.test_utils", - "tests.test_dynamically_check_apimodels", - "tests.test_verify_api_surface", - ] +_id_counter = 0 - MODULES_TO_RUN_LAST = [ - "tests.ai_horde_api.test_ai_horde_api_calls", - "tests.ai_horde_api.test_ai_horde_alchemy_api_calls", - "tests.ai_horde_api.test_ai_horde_generate_api_calls", - ] # FIXME make dynamic - module_mapping = {item: item.module.__name__ for item in items} - sorted_items = [] +def _single_id() -> GenerationID: + """Return a new UUID for each call.""" + global _id_counter + _id_counter += 1 - for module in MODULES_TO_RUN_FIRST: - sorted_items.extend([item for item in items if module_mapping[item] == module]) + num_to_use = _id_counter + # copy the last 8 bits to fill the rest of the UUID + for i in range(8): + num_to_use |= num_to_use << (8 * i) - sorted_items.extend( - [item for item in items if module_mapping[item] not in MODULES_TO_RUN_FIRST + MODULES_TO_RUN_LAST], + # mask to 128 bits (16 bytes, the size of a UUID) + num_to_use &= (1 << 128) - 1 + return GenerationID(root=UUID(int=num_to_use)) + + +@pytest.fixture(scope="function") +def single_id() -> GenerationID: + """Return a new UUID for each call.""" + return _single_id() + + +@pytest.fixture(scope="function") +def single_id_str(single_id: GenerationID) -> str: + """Return a new UUID for each call as a plain string.""" + return str(single_id) + + +@pytest.fixture(scope="function") +def id_factory() -> Callable[[], GenerationID]: + """Return a function that generates a new UUID for each call.""" + return _single_id + + +@pytest.fixture(scope="function") +def id_factory_str() -> Callable[[], str]: + """Return a function that generates a new UUID for each call as a plain string.""" + return lambda: str(_single_id()) + + +@pytest.fixture(scope="function") +def simple_text_generation_parameters( + single_id_str: str, +) -> TextGenerationParameters: + """Return a simple `TextGenerationParameters` object.""" + return TextGenerationParameters( + result_ids=[single_id_str], + base_params=BasicTextGenerationParameters( + prompt="Tell me about a cat in a hat.", + model="oFakeModel", + model_baseline="fake-baseline-v1", + ), ) - for module in MODULES_TO_RUN_LAST: - sorted_items.extend([item for item in items if module_mapping[item] == module]) - items[:] = sorted_items +@pytest.fixture(scope="function") +def simple_image_generation_parameters( + single_id_str: str, +) -> ImageGenerationParameters: + """Return a simple `ImageGenerationParameters` object.""" + return ImageGenerationParameters( + result_ids=[single_id_str], + source_processing=KNOWN_IMAGE_SOURCE_PROCESSING.txt2img, + base_params=BasicImageGenerationParameters( + model="Deliberate", + model_baseline=KNOWN_IMAGE_GENERATION_BASELINE.stable_diffusion_1, + prompt="a cat in a hat", + seed="42", + width=512, + height=512, + steps=10, + cfg_scale=5, + sampler_name=KNOWN_IMAGE_SAMPLERS.k_euler_a, + scheduler=KNOWN_IMAGE_SCHEDULERS.normal, + clip_skip=1, + denoising_strength=1.0, + ), + ) -def _get_testing_image(filename: str) -> bytes: - """Returns a test image.""" - image_bytes = None +@pytest.fixture(scope="function") +def simple_image_generation_parameters_n_iter( + id_factory_str: Callable[[], str], +) -> ImageGenerationParameters: + """Return a simple `ImageGenerationParameters` object.""" + batch_size = 3 + return ImageGenerationParameters( + result_ids=[id_factory_str() for _ in range(batch_size)], + batch_size=batch_size, + source_processing=KNOWN_IMAGE_SOURCE_PROCESSING.txt2img, + base_params=BasicImageGenerationParameters( + model="Deliberate", + model_baseline=KNOWN_IMAGE_GENERATION_BASELINE.stable_diffusion_1, + prompt="a cat in a hat", + seed="42", + width=512, + height=512, + steps=10, + cfg_scale=5, + sampler_name=KNOWN_IMAGE_SAMPLERS.k_euler_a, + scheduler=KNOWN_IMAGE_SCHEDULERS.normal, + clip_skip=1, + denoising_strength=1.0, + ), + ) - # Get the directory of this file - dir_path = pathlib.Path(__file__).parent.absolute() - test_image_path = dir_path / "test_data" / "images" / filename - with open(test_image_path, "rb") as f: - image_bytes = f.read() +@pytest.fixture(scope="function") +def simple_alchemy_generation_parameters( + single_id_str: str, + default_testing_image_bytes: bytes, +) -> AlchemyParameters: + """Return a simple `AlchemyParameters` object.""" + return AlchemyParameters( + upscalers=[ + UpscaleAlchemyParameters( + result_id=single_id_str, + form=KNOWN_ALCHEMY_FORMS.post_process, + source_image=default_testing_image_bytes, + upscaler=KNOWN_UPSCALERS.RealESRGAN_x2plus, + ), + ], + ) - assert image_bytes is not None - return image_bytes +@pytest.fixture(scope="function") +def simple_alchemy_generation_parameters_nsfw_detect( + single_id_str: str, + default_testing_image_bytes: bytes, +) -> AlchemyParameters: + """Return a simple `AlchemyParameters` object.""" + return AlchemyParameters( + nsfw_detectors=[ + NSFWAlchemyParameters( + result_id=single_id_str, + form=KNOWN_ALCHEMY_FORMS.post_process, + source_image=default_testing_image_bytes, + nsfw_detector=KNOWN_NSFW_DETECTOR.BACKEND_DEFAULT, + ), + ], + ) -@pytest.fixture(scope="session") -def default_testing_image_base64() -> str: - """Returns a base64 encoded test image.""" - return base64.b64encode(_get_testing_image("haidra.png")).decode("utf-8") +@pytest.fixture(scope="function") +def simple_image_generation_parameters_post_processing( + single_id_str: str, + simple_alchemy_generation_parameters: AlchemyParameters, +) -> ImageGenerationParameters: + """Return a simple `ImageGenerationParameters` object.""" + return ImageGenerationParameters( + result_ids=[single_id_str], + source_processing=KNOWN_IMAGE_SOURCE_PROCESSING.txt2img, + base_params=BasicImageGenerationParameters( + model="Deliberate", + model_baseline=KNOWN_IMAGE_GENERATION_BASELINE.stable_diffusion_1, + prompt="a cat in a hat", + seed="42", + width=512, + height=512, + steps=10, + cfg_scale=5, + sampler_name=KNOWN_IMAGE_SAMPLERS.k_euler_a, + scheduler=KNOWN_IMAGE_SCHEDULERS.normal, + clip_skip=1, + denoising_strength=1.0, + ), + alchemy_params=simple_alchemy_generation_parameters, + ) -@pytest.fixture(scope="session") -def img2img_testing_image_base64() -> str: - """Returns a base64 encoded test image.""" - return base64.b64encode(_get_testing_image("sketch-mountains-input.jpg")).decode("utf-8") +@pytest.fixture(scope="function") +def simple_image_generation_parameters_n_iter_post_processing( + id_factory_str: Callable[[], str], + simple_alchemy_generation_parameters: AlchemyParameters, +) -> ImageGenerationParameters: + """Return a simple `ImageGenerationParameters` object.""" + batch_size = 3 + return ImageGenerationParameters( + result_ids=[id_factory_str() for _ in range(batch_size)], + batch_size=batch_size, + source_processing=KNOWN_IMAGE_SOURCE_PROCESSING.txt2img, + base_params=BasicImageGenerationParameters( + model="Deliberate", + model_baseline=KNOWN_IMAGE_GENERATION_BASELINE.stable_diffusion_1, + prompt="a cat in a hat", + seed="42", + width=512, + height=512, + steps=10, + cfg_scale=5, + sampler_name=KNOWN_IMAGE_SAMPLERS.k_euler_a, + scheduler=KNOWN_IMAGE_SCHEDULERS.normal, + clip_skip=1, + denoising_strength=1.0, + ), + alchemy_params=simple_alchemy_generation_parameters, + ) -@pytest.fixture(scope="session") -def woman_headshot_testing_image_base64() -> str: - return base64.b64encode(_get_testing_image("woman_headshot_bokeh.png")).decode("utf-8") +@pytest.fixture(scope="function") +def simple_image_gen_job_pop_response( + single_id: UUID, +) -> ImageGenerateJobPopResponse: + """Return a `ImageGenerateJobPopResponse` instance with a typical model and no arguments set.""" + return ImageGenerateJobPopResponse( + ids=[single_id], + payload=ImageGenerateJobPopPayload(prompt="a cat in a hat", seed="42"), + skipped=ImageGenerateJobPopSkippedStatus(), + model="Deliberate", + r2_uploads=[f"https://not.a.real.url.internal/upload/{single_id}"], + ) + + +@pytest.fixture(scope="function") +def simple_image_gen_job_pop_response_n_requests( + id_factory: Callable[[], GenerationID], +) -> ImageGenerateJobPopResponse: + """Return a `ImageGenerateJobPopResponse` instance with no arguments set.""" + ids = [id_factory() for _ in range(3)] + return ImageGenerateJobPopResponse( + ids=ids, + payload=ImageGenerateJobPopPayload(prompt="a cat in a hat", seed="42"), + skipped=ImageGenerateJobPopSkippedStatus(), + model="Deliberate", + r2_uploads=[f"https://not.a.real.url.internal/upload/{id_}" for id_ in ids], + ) + + +@pytest.fixture(scope="function") +def simple_image_gen_job_pop_response_post_processing( + single_id: UUID, +) -> ImageGenerateJobPopResponse: + """Return a `ImageGenerateJobPopResponse` instance with an upscaling post-processing argument set.""" + from horde_sdk.generation_parameters.alchemy.consts import KNOWN_UPSCALERS + + return ImageGenerateJobPopResponse( + ids=[single_id], + payload=ImageGenerateJobPopPayload( + prompt="a cat in a hat", + seed="42", + post_processing=[KNOWN_UPSCALERS.RealESRGAN_x2plus], + ), + skipped=ImageGenerateJobPopSkippedStatus(), + model="Deliberate", + r2_uploads=[f"https://not.a.real.url.internal/upload/{single_id}"], + ) + + +@pytest.fixture(scope="function") +def simple_image_gen_job_pop_response_img2img( + single_id: UUID, + img2img_testing_image_base64: str, +) -> ImageGenerateJobPopResponse: + """Return a `ImageGenerateJobPopResponse` instance for `img2img`""" + from horde_sdk.generation_parameters.image.consts import KNOWN_IMAGE_SOURCE_PROCESSING + + return ImageGenerateJobPopResponse( + ids=[single_id], + payload=ImageGenerateJobPopPayload( + prompt="a cat in a hat", + seed="42", + ), + skipped=ImageGenerateJobPopSkippedStatus(), + model="Deliberate", + source_image=img2img_testing_image_base64, + source_processing=KNOWN_IMAGE_SOURCE_PROCESSING.img2img, + r2_uploads=[f"https://not.a.real.url.internal/upload/{single_id}"], + ) + + +@pytest.fixture(scope="function") +def simple_image_gen_job_pop_response_img2img_masked( + single_id: UUID, + inpainting_source_image_and_mask_base64: tuple[str, str], +) -> ImageGenerateJobPopResponse: + """Return a `ImageGenerateJobPopResponse` instance for `img2img`""" + from horde_sdk.generation_parameters.image.consts import KNOWN_IMAGE_SOURCE_PROCESSING + + source_image, source_mask = inpainting_source_image_and_mask_base64 + + return ImageGenerateJobPopResponse( + ids=[single_id], + payload=ImageGenerateJobPopPayload( + prompt="a cat in a hat", + seed="42", + ), + skipped=ImageGenerateJobPopSkippedStatus(), + model="Deliberate", + source_image=source_image, + source_mask=source_mask, + source_processing=KNOWN_IMAGE_SOURCE_PROCESSING.img2img, + r2_uploads=[f"https://not.a.real.url.internal/upload/{single_id}"], + ) + + +@pytest.fixture(scope="function") +def simple_image_gen_job_pop_response_inpainting( + single_id: UUID, + inpainting_source_image_and_mask_base64: tuple[str, str], +) -> ImageGenerateJobPopResponse: + """Return a `ImageGenerateJobPopResponse` instance for `img2img`""" + from horde_sdk.generation_parameters.image.consts import KNOWN_IMAGE_SOURCE_PROCESSING + + source_image, source_mask = inpainting_source_image_and_mask_base64 + + return ImageGenerateJobPopResponse( + ids=[single_id], + payload=ImageGenerateJobPopPayload( + prompt="a cat in a hat", + seed="42", + ), + skipped=ImageGenerateJobPopSkippedStatus(), + model="Deliberate", + source_image=source_image, + source_mask=source_mask, + source_processing=KNOWN_IMAGE_SOURCE_PROCESSING.inpainting, + r2_uploads=[f"https://not.a.real.url.internal/upload/{single_id}"], + ) + + +@pytest.fixture(scope="function") +def simple_image_gen_job_pop_response_outpainting_alpha( + single_id: UUID, + outpaint_alpha_source_image_base64: str, +) -> ImageGenerateJobPopResponse: + """Return a `ImageGenerateJobPopResponse` instance for `img2img`""" + from horde_sdk.generation_parameters.image.consts import KNOWN_IMAGE_SOURCE_PROCESSING + + return ImageGenerateJobPopResponse( + ids=[single_id], + payload=ImageGenerateJobPopPayload( + prompt="a cat in a hat", + seed="42", + ), + skipped=ImageGenerateJobPopSkippedStatus(), + model="Deliberate", + source_image=outpaint_alpha_source_image_base64, + source_processing=KNOWN_IMAGE_SOURCE_PROCESSING.inpainting, + r2_uploads=[f"https://not.a.real.url.internal/upload/{single_id}"], + ) + + +@pytest.fixture(scope="function") +def simple_image_gen_job_pop_response_controlnet_openpose( + single_id: UUID, + openpose_control_map_base64: str, +) -> ImageGenerateJobPopResponse: + """Return a `ImageGenerateJobPopResponse` instance with `controlnet` set""" + from horde_sdk.generation_parameters.image.consts import KNOWN_IMAGE_CONTROLNETS + + return ImageGenerateJobPopResponse( + ids=[single_id], + payload=ImageGenerateJobPopPayload( + prompt="a cat in a hat", + image_is_control=True, + control_type=KNOWN_IMAGE_CONTROLNETS.openpose, + seed="42", + ), + skipped=ImageGenerateJobPopSkippedStatus(), + model="Deliberate", + source_image=openpose_control_map_base64, + r2_uploads=[f"https://not.a.real.url.internal/upload/{single_id}"], + ) + + +@pytest.fixture(scope="function") +def simple_image_gen_job_pop_response_hires_fix( + single_id: UUID, +) -> ImageGenerateJobPopResponse: + """Return a `ImageGenerateJobPopResponse` instance for `hires_fix`""" + return ImageGenerateJobPopResponse( + ids=[single_id], + payload=ImageGenerateJobPopPayload( + prompt="a cat in a hat", + seed="42", + hires_fix=True, + ), + skipped=ImageGenerateJobPopSkippedStatus(), + model="Deliberate", + r2_uploads=[f"https://not.a.real.url.internal/upload/{single_id}"], + ) + + +@pytest.fixture(scope="function") +def simple_image_generation_parameters_hires_fix( + simple_image_gen_job_pop_response_hires_fix: ImageGenerateJobPopResponse, + model_reference_manager: ModelReferenceManager, +) -> ImageGenerationParameters: + """Return a simple `ImageGenerationParameters` object.""" + parameters, _ = convert_image_job_pop_response_to_parameters( + simple_image_gen_job_pop_response_hires_fix, + model_reference_manager, + ) + return parameters + + +@pytest.fixture(scope="function") +def simple_image_gen_job_pop_response_hires_fix_denoise( + single_id: UUID, +) -> ImageGenerateJobPopResponse: + """Return a `ImageGenerateJobPopResponse` instance for `hires_fix`""" + return ImageGenerateJobPopResponse( + ids=[single_id], + payload=ImageGenerateJobPopPayload( + prompt="a cat in a hat", + seed="42", + hires_fix=True, + hires_fix_denoising_strength=0.8, + ), + skipped=ImageGenerateJobPopSkippedStatus(), + model="Deliberate", + r2_uploads=[f"https://not.a.real.url.internal/upload/{single_id}"], + ) + + +@pytest.fixture(scope="function") +def simple_image_gen_job_pop_response_loras( + single_id: UUID, +) -> ImageGenerateJobPopResponse: + """Return a `ImageGenerateJobPopResponse` instance with `loras`""" + from horde_sdk.ai_horde_api.apimodels import LorasPayloadEntry + + return ImageGenerateJobPopResponse( + ids=[single_id], + payload=ImageGenerateJobPopPayload( + prompt="a cat in a hat", + seed="42", + loras=[LorasPayloadEntry(name="76693", model=1, clip=1)], + ), + skipped=ImageGenerateJobPopSkippedStatus(), + model="Deliberate", + r2_uploads=[f"https://not.a.real.url.internal/upload/{single_id}"], + ) + + +@pytest.fixture(scope="function") +def simple_image_gen_job_pop_response_tis( + single_id: UUID, +) -> ImageGenerateJobPopResponse: + """Return a `ImageGenerateJobPopResponse` instance with `tis`""" + from horde_sdk.ai_horde_api.apimodels import TIPayloadEntry + + return ImageGenerateJobPopResponse( + ids=[single_id], + payload=ImageGenerateJobPopPayload( + prompt="a cat in a hat", + seed="42", + tis=[ + TIPayloadEntry( + name="72437", + inject_ti="negprompt", + strength=1, + ), + ], + ), + skipped=ImageGenerateJobPopSkippedStatus(), + model="Deliberate", + r2_uploads=[f"https://not.a.real.url.internal/upload/{single_id}"], + ) + + +@pytest.fixture(scope="function") +def simple_image_gen_job_pop_response_remix( + single_id: UUID, + default_testing_image_base64: str, + woman_headshot_testing_image_base64: str, +) -> ImageGenerateJobPopResponse: + """Return a `ImageGenerateJobPopResponse` instance with `remix`""" + from horde_sdk.generation_parameters.image.consts import KNOWN_IMAGE_SOURCE_PROCESSING + + return ImageGenerateJobPopResponse( + ids=[single_id], + payload=ImageGenerateJobPopPayload( + prompt="a headshot of a woman with a stylized logo on her face", + width=1024, + height=1024, + seed="42", + ), + skipped=ImageGenerateJobPopSkippedStatus(), + source_processing=KNOWN_IMAGE_SOURCE_PROCESSING.remix, + model="Stable Cascade 1.0", + r2_uploads=[f"https://not.a.real.url.internal/upload/{single_id}"], + source_image=woman_headshot_testing_image_base64, + extra_source_images=[ExtraSourceImageEntry(image=default_testing_image_base64, strength=0.75)], + ) + + +@pytest.fixture(scope="function") +def simple_text_gen_job_pop_response( + single_id: UUID, +) -> TextGenerateJobPopResponse: + """Return a `TextGenerateJobPopResponse` instance with a dummy model no other arguments set.""" + return TextGenerateJobPopResponse( + ids=[single_id], + payload=ModelPayloadKobold(prompt="Tell me about a cat in a hat."), + skipped=NoValidRequestFoundKobold(), + model="oFakeModel", + ) + + +@pytest.fixture(scope="function") +def simple_alchemy_gen_job_pop_response_interrogate( + single_id: UUID, + default_testing_image_base64: str, +) -> AlchemyJobPopResponse: + """Return a `AlchemyJobPopResponse` instance for `interrogation` and no other arguments set""" + from horde_sdk.generation_parameters.alchemy.consts import KNOWN_ALCHEMY_TYPES + + return AlchemyJobPopResponse( + forms=[ + AlchemyPopFormPayload( + id=single_id, + form=KNOWN_ALCHEMY_TYPES.interrogation, + r2_upload=f"https://not.a.real.url.internal/upload/{single_id}", + source_image=default_testing_image_base64, + ), + ], + skipped=NoValidAlchemyFound(), + ) + + +@pytest.fixture(scope="function") +def simple_alchemy_gen_job_pop_response_nsfw_detect( + single_id: UUID, + default_testing_image_base64: str, +) -> AlchemyJobPopResponse: + """Return a `AlchemyJobPopResponse` instance for `nsfw` detect and no other arguments set""" + from horde_sdk.generation_parameters.alchemy.consts import KNOWN_ALCHEMY_TYPES + + return AlchemyJobPopResponse( + forms=[ + AlchemyPopFormPayload( + id=single_id, + form=KNOWN_ALCHEMY_TYPES.nsfw, + r2_upload=f"https://not.a.real.url.internal/upload/{single_id}", + source_image=default_testing_image_base64, + ), + ], + skipped=NoValidAlchemyFound(), + ) + + +@pytest.fixture(scope="function") +def simple_alchemy_gen_job_pop_response_all_feature_extractions( + id_factory_str: Callable[[], str], + default_testing_image_base64: str, +) -> AlchemyJobPopResponse: + """Return a `AlchemyJobPopResponse` instance for all feature extractions and no other arguments set""" + from horde_sdk.generation_parameters.alchemy.consts import KNOWN_ALCHEMY_TYPES + + ids = [id_factory_str() for _ in range(3)] + return AlchemyJobPopResponse( + forms=[ + AlchemyPopFormPayload( + id=ids[0], + form=KNOWN_ALCHEMY_TYPES.interrogation, + r2_upload=f"https://not.a.real.url.internal/upload/{ids[0]}", + source_image=default_testing_image_base64, + ), + AlchemyPopFormPayload( + id=ids[1], + form=KNOWN_ALCHEMY_TYPES.nsfw, + r2_upload=f"https://not.a.real.url.internal/upload/{ids[1]}", + source_image=default_testing_image_base64, + ), + AlchemyPopFormPayload( + id=ids[2], + form=KNOWN_ALCHEMY_TYPES.caption, + r2_upload=f"https://not.a.real.url.internal/upload/{ids[2]}", + source_image=default_testing_image_base64, + ), + ], + skipped=NoValidAlchemyFound(), + ) + + +@pytest.fixture(scope="function") +def simple_alchemy_gen_job_pop_response_upscale( + single_id: UUID, + default_testing_image_base64: str, +) -> AlchemyJobPopResponse: + """Return a `AlchemyJobPopResponse` instance for `upscaling` and no other arguments set""" + from horde_sdk.generation_parameters.alchemy.consts import KNOWN_ALCHEMY_TYPES + + return AlchemyJobPopResponse( + forms=[ + AlchemyPopFormPayload( + id=single_id, + form=KNOWN_ALCHEMY_TYPES.RealESRGAN_x2plus, + r2_upload=f"https://not.a.real.url.internal/upload/{single_id}", + source_image=default_testing_image_base64, + ), + ], + skipped=NoValidAlchemyFound(), + ) + + +@pytest.fixture(scope="function") +def simple_alchemy_gen_job_pop_response_upscale_multiple( + id_factory_str: Callable[[], str], + default_testing_image_base64: str, +) -> AlchemyJobPopResponse: + """Return a `AlchemyJobPopResponse` instance for multiple `upscaling` operations and no other arguments set""" + from horde_sdk.generation_parameters.alchemy.consts import KNOWN_ALCHEMY_TYPES + + ids = [id_factory_str() for _ in range(2)] + return AlchemyJobPopResponse( + forms=[ + AlchemyPopFormPayload( + id=ids[0], + form=KNOWN_ALCHEMY_TYPES.RealESRGAN_x2plus, + r2_upload=f"https://not.a.real.url.internal/upload/{ids[0]}", + source_image=default_testing_image_base64, + ), + AlchemyPopFormPayload( + id=ids[1], + form=KNOWN_ALCHEMY_TYPES.RealESRGAN_x4plus, + r2_upload=f"https://not.a.real.url.internal/upload/{ids[1]}", + source_image=default_testing_image_base64, + ), + ], + skipped=NoValidAlchemyFound(), + ) + + +@pytest.fixture(scope="function") +def simple_alchemy_gen_job_pop_response_facefix( + single_id: UUID, + default_testing_image_base64: str, +) -> AlchemyJobPopResponse: + """Return a `AlchemyJobPopResponse` instance for `facefix` and no other arguments set""" + from horde_sdk.generation_parameters.alchemy.consts import KNOWN_ALCHEMY_TYPES + + return AlchemyJobPopResponse( + forms=[ + AlchemyPopFormPayload( + id=single_id, + form=KNOWN_ALCHEMY_TYPES.CodeFormers, + r2_upload=f"https://not.a.real.url.internal/upload/{single_id}", + source_image=default_testing_image_base64, + ), + ], + skipped=NoValidAlchemyFound(), + ) + + +@pytest.fixture(scope="function") +def simple_alchemy_gen_job_pop_response_facefix_multiple( + id_factory_str: Callable[[], str], + default_testing_image_base64: str, +) -> AlchemyJobPopResponse: + """Return a `AlchemyJobPopResponse` instance for multiple `facefix` operations and no other arguments set""" + from horde_sdk.generation_parameters.alchemy.consts import KNOWN_ALCHEMY_TYPES + + ids = [id_factory_str() for _ in range(2)] + return AlchemyJobPopResponse( + forms=[ + AlchemyPopFormPayload( + id=ids[0], + form=KNOWN_ALCHEMY_TYPES.CodeFormers, + r2_upload=f"https://not.a.real.url.internal/upload/{ids[0]}", + source_image=default_testing_image_base64, + ), + AlchemyPopFormPayload( + id=ids[1], + form=KNOWN_ALCHEMY_TYPES.GFPGAN, + r2_upload=f"https://not.a.real.url.internal/upload/{ids[1]}", + source_image=default_testing_image_base64, + ), + ], + skipped=NoValidAlchemyFound(), + ) + + +@pytest.fixture(scope="function") +def simple_alchemy_gen_job_pop_response_strip_background( + single_id: UUID, + default_testing_image_base64: str, +) -> AlchemyJobPopResponse: + """Return a `AlchemyJobPopResponse` instance for `strip_background` and no other arguments set""" + from horde_sdk.generation_parameters.alchemy.consts import KNOWN_ALCHEMY_TYPES + + return AlchemyJobPopResponse( + forms=[ + AlchemyPopFormPayload( + id=single_id, + form=KNOWN_ALCHEMY_TYPES.strip_background, + r2_upload=f"https://not.a.real.url.internal/upload/{single_id}", + source_image=default_testing_image_base64, + ), + ], + skipped=NoValidAlchemyFound(), + ) + + +@pytest.fixture(scope="function") +def simple_alchemy_gen_job_pop_response_all( + id_factory_str: Callable[[], str], + default_testing_image_base64: str, +) -> AlchemyJobPopResponse: + """Return a `AlchemyJobPopResponse` instance for all operations and no other arguments set""" + from horde_sdk.generation_parameters.alchemy.consts import KNOWN_ALCHEMY_TYPES + + ids = [id_factory_str() for _ in range(5)] + return AlchemyJobPopResponse( + forms=[ + AlchemyPopFormPayload( + id=ids[0], + form=KNOWN_ALCHEMY_TYPES.interrogation, + r2_upload=f"https://not.a.real.url.internal/upload/{ids[0]}", + source_image=default_testing_image_base64, + ), + AlchemyPopFormPayload( + id=ids[1], + form=KNOWN_ALCHEMY_TYPES.nsfw, + r2_upload=f"https://not.a.real.url.internal/upload/{ids[1]}", + source_image=default_testing_image_base64, + ), + AlchemyPopFormPayload( + id=ids[2], + form=KNOWN_ALCHEMY_TYPES.strip_background, + r2_upload=f"https://not.a.real.url.internal/upload/{ids[2]}", + source_image=default_testing_image_base64, + ), + AlchemyPopFormPayload( + id=ids[3], + form=KNOWN_ALCHEMY_TYPES.RealESRGAN_x2plus, + r2_upload=f"https://not.a.real.url.internal/upload/{ids[3]}", + source_image=default_testing_image_base64, + ), + AlchemyPopFormPayload( + id=ids[4], + form=KNOWN_ALCHEMY_TYPES.CodeFormers, + r2_upload=f"https://not.a.real.url.internal/upload/{ids[4]}", + source_image=default_testing_image_base64, + ), + ], + skipped=NoValidAlchemyFound(), + ) + + +@pytest.fixture(scope="function") +def simple_image_generation( + simple_image_generation_parameters: ImageGenerationParameters, +) -> ImageSingleGeneration: + """Return a simple `ImageSingleGeneration` object.""" + + return ImageSingleGeneration( + generation_parameters=simple_image_generation_parameters, + ) + + +@pytest.fixture(scope="function") +def simple_text_generation( + simple_text_generation_parameters: TextGenerationParameters, +) -> TextSingleGeneration: + """Return a simple `TextSingleGeneration` object.""" + + return TextSingleGeneration( + generation_parameters=simple_text_generation_parameters, + ) + + +@pytest.fixture(scope="function") +def simple_alchemy_generation( + simple_alchemy_generation_parameters: AlchemyParameters, +) -> AlchemySingleGeneration: + """Return a simple `AlchemySingleGeneration` object.""" + assert len(simple_alchemy_generation_parameters.all_alchemy_operations) == 1 + alchemy_single_generation_parameters = simple_alchemy_generation_parameters.all_alchemy_operations[0] + + return AlchemySingleGeneration( + generation_parameters=alchemy_single_generation_parameters, + ) + + +def pytest_collection_modifyitems(session: pytest.Session, config: pytest.Config, items: list[pytest.Item]) -> None: + """Modifies test items to ensure test modules run in a given order and skips network tests when disabled.""" + MODULES_TO_RUN_FIRST = [ + "tests/tests_generic.py", + "tests/test_utils.py", + "tests/test_dynamically_check_apimodels.py", + "tests/test_verify_api_surface.py", + ] + + MODULES_TO_RUN_LAST: list[str] = [] + + run_network_tests = _should_run_network_tests(config) + if not run_network_tests: + skip_network = pytest.mark.skip( + reason=("network-dependent tests require --run-network-tests and a valid AI_HORDE_DEV_APIKEY"), + ) + for item in items: + item_path = item.path.as_posix() + if any(item_path.endswith(suffix) for suffix in NETWORK_TEST_PATH_SUFFIXES): + item.add_marker(skip_network) + + module_mapping = {item: item.path.as_posix() for item in items} + + def _matches(path: str, target: str) -> bool: + return path.endswith(target) + + sorted_items: list[pytest.Item] = [] + + for module in MODULES_TO_RUN_FIRST: + sorted_items.extend([item for item in items if _matches(module_mapping[item], module)]) + + sorted_items.extend( + [ + item + for item in items + if not any( + _matches(module_mapping[item], candidate) for candidate in MODULES_TO_RUN_FIRST + MODULES_TO_RUN_LAST + ) + ], + ) + + for module in MODULES_TO_RUN_LAST: + sorted_items.extend([item for item in items if _matches(module_mapping[item], module)]) + + items[:] = sorted_items diff --git a/tests/deploy_config/__init__.py b/tests/deploy_config/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/tests/deploy_config/test_feature_flags.py b/tests/deploy_config/test_feature_flags.py new file mode 100644 index 00000000..b108a01f --- /dev/null +++ b/tests/deploy_config/test_feature_flags.py @@ -0,0 +1,156 @@ +import pytest +from horde_model_reference.meta_consts import KNOWN_IMAGE_GENERATION_BASELINE + +from horde_sdk.generation_parameters.alchemy.consts import KNOWN_ALCHEMY_TYPES +from horde_sdk.generation_parameters.generic.consts import KNOWN_AUX_MODEL_SOURCE +from horde_sdk.generation_parameters.image.consts import ( + CLIP_SKIP_REPRESENTATION, + KNOWN_IMAGE_CONTROLNETS, + KNOWN_IMAGE_SAMPLERS, + KNOWN_IMAGE_SCHEDULERS, + KNOWN_IMAGE_SOURCE_PROCESSING, + KNOWN_IMAGE_WORKFLOWS, +) +from horde_sdk.generation_parameters.image.object_models import ( + ControlnetFeatureFlags, + ImageGenerationFeatureFlags, + ImageGenerationParameters, + image_parameters_to_feature_flags, +) +from horde_sdk.worker.feature_flags import ( + IMAGE_WORKER_NOT_CAPABLE_REASON, + RESULT_RETURN_METHOD, + ImageWorkerFeatureFlags, +) + + +@pytest.fixture(scope="function") +def reference_image_worker_feature_flags() -> ImageWorkerFeatureFlags: + """Fixture for a 'reference' worker capable of all features.""" + return ImageWorkerFeatureFlags( + supported_result_return_methods=list(RESULT_RETURN_METHOD.__members__.values()), + supports_threads=True, + image_generation_feature_flags=ImageGenerationFeatureFlags( + baselines=list(KNOWN_IMAGE_GENERATION_BASELINE.__members__.values()), + clip_skip_representation=CLIP_SKIP_REPRESENTATION.NEGATIVE_OFFSET, + hires_fix=True, + schedulers=list(KNOWN_IMAGE_SCHEDULERS.__members__.values()), + samplers=list(KNOWN_IMAGE_SAMPLERS.__members__.values()), + tiling=True, + controlnets_feature_flags=ControlnetFeatureFlags( + controlnets=list(KNOWN_IMAGE_CONTROLNETS.__members__.values()), + image_is_control=True, + return_control_map=True, + ), + post_processing=list(KNOWN_ALCHEMY_TYPES.__members__.values()), + source_processing=list(KNOWN_IMAGE_SOURCE_PROCESSING.__members__.values()), + workflows=list(KNOWN_IMAGE_WORKFLOWS.__members__.values()), + tis=list(KNOWN_AUX_MODEL_SOURCE.__members__.values()), + loras=list(KNOWN_AUX_MODEL_SOURCE.__members__.values()), + ), + ) + + +@pytest.fixture(scope="function") +def minimal_image_worker_feature_flags() -> ImageWorkerFeatureFlags: + """Fixture for a 'minimal' worker capable of only the most basic features.""" + return ImageWorkerFeatureFlags( + supported_result_return_methods=[RESULT_RETURN_METHOD.base64_post_back], + supports_threads=False, + image_generation_feature_flags=ImageGenerationFeatureFlags( + baselines=[KNOWN_IMAGE_GENERATION_BASELINE.stable_diffusion_1], + clip_skip_representation=CLIP_SKIP_REPRESENTATION.NEGATIVE_OFFSET, + hires_fix=False, + schedulers=[KNOWN_IMAGE_SCHEDULERS.simple], + samplers=[KNOWN_IMAGE_SAMPLERS.k_lms], + tiling=False, + controlnets_feature_flags=None, + post_processing=None, + source_processing=[KNOWN_IMAGE_SOURCE_PROCESSING.txt2img], + workflows=None, + tis=None, + loras=None, + ), + ) + + +def test_image_parameters_to_feature_flags( + simple_image_generation_parameters: ImageGenerationParameters, +) -> None: + feature_flags = image_parameters_to_feature_flags(simple_image_generation_parameters) + assert isinstance(feature_flags, ImageGenerationFeatureFlags) + + assert len(feature_flags.baselines) == 1 + assert feature_flags.baselines + assert KNOWN_IMAGE_GENERATION_BASELINE.stable_diffusion_1 in feature_flags.baselines + + assert feature_flags.workflows is None + assert feature_flags.loras is None + assert feature_flags.tis is None + + assert len(feature_flags.samplers) == 1 + assert len(feature_flags.schedulers) == 1 + assert not feature_flags.extra_source_images + assert not feature_flags.extra_texts + assert not feature_flags.tiling + assert not feature_flags.hires_fix + + assert simple_image_generation_parameters.base_params.model_baseline in feature_flags.baselines + assert simple_image_generation_parameters.base_params.sampler_name in feature_flags.samplers + assert simple_image_generation_parameters.base_params.scheduler in feature_flags.schedulers + + +def test_is_image_worker_capable( + reference_image_worker_feature_flags: ImageWorkerFeatureFlags, + simple_image_generation_parameters: ImageGenerationParameters, +) -> None: + assert reference_image_worker_feature_flags.is_capable_of_features( + image_parameters_to_feature_flags(simple_image_generation_parameters), + ) + + +def test_minimal_worker_not_capable( + minimal_image_worker_feature_flags: ImageWorkerFeatureFlags, + simple_image_generation_parameters: ImageGenerationParameters, +) -> None: + assert not minimal_image_worker_feature_flags.is_capable_of_features( + image_parameters_to_feature_flags(simple_image_generation_parameters), + ) + + +def test_reasons_minimal_worker_not_capable( + minimal_image_worker_feature_flags: ImageWorkerFeatureFlags, + simple_image_generation_parameters: ImageGenerationParameters, + simple_image_generation_parameters_hires_fix: ImageGenerationParameters, +) -> None: + reasons_simple = minimal_image_worker_feature_flags.reasons_not_capable_of_features( + image_parameters_to_feature_flags(simple_image_generation_parameters), + ) + assert reasons_simple is not None + assert len(reasons_simple) == 2 + assert IMAGE_WORKER_NOT_CAPABLE_REASON.schedulers in reasons_simple + assert IMAGE_WORKER_NOT_CAPABLE_REASON.samplers in reasons_simple + + tiling_parameters = simple_image_generation_parameters.model_copy( + update={ + "base_params": simple_image_generation_parameters.base_params.model_copy(update={"tiling": True}), + }, + ) + reasons_tiling = minimal_image_worker_feature_flags.reasons_not_capable_of_features( + image_parameters_to_feature_flags(tiling_parameters), + ) + assert reasons_tiling is not None + assert len(reasons_tiling) == 3 + assert IMAGE_WORKER_NOT_CAPABLE_REASON.schedulers in reasons_tiling + assert IMAGE_WORKER_NOT_CAPABLE_REASON.samplers in reasons_tiling + assert IMAGE_WORKER_NOT_CAPABLE_REASON.tiling in reasons_tiling + + reasons_hires = minimal_image_worker_feature_flags.reasons_not_capable_of_features( + image_parameters_to_feature_flags(simple_image_generation_parameters_hires_fix), + ) + + assert reasons_hires is not None + assert len(reasons_hires) == 3 + assert IMAGE_WORKER_NOT_CAPABLE_REASON.schedulers in reasons_hires + assert IMAGE_WORKER_NOT_CAPABLE_REASON.samplers in reasons_hires + assert IMAGE_WORKER_NOT_CAPABLE_REASON.hires_fix in reasons_hires diff --git a/tests/generation_parameters/test_alchemy_dispatch.py b/tests/generation_parameters/test_alchemy_dispatch.py new file mode 100644 index 00000000..e7c80656 --- /dev/null +++ b/tests/generation_parameters/test_alchemy_dispatch.py @@ -0,0 +1,120 @@ +from __future__ import annotations + +import pytest + +from horde_sdk.generation_parameters.alchemy.consts import ( + KNOWN_ALCHEMY_FORMS, + KNOWN_CAPTION_MODELS, + KNOWN_FACEFIXERS, + KNOWN_INTERROGATORS, + KNOWN_UPSCALERS, +) +from horde_sdk.generation_parameters.alchemy.object_models import ( + CaptionAlchemyParameters, + CaptionAlchemyParametersTemplate, + FacefixAlchemyParameters, + FacefixAlchemyParametersTemplate, + InterrogateAlchemyParameters, + InterrogateAlchemyParametersTemplate, + NSFWAlchemyParameters, + NSFWAlchemyParametersTemplate, + ResolverRule, + SingleAlchemyParameters, + SingleAlchemyParametersTemplate, + UpscaleAlchemyParameters, + UpscaleAlchemyParametersTemplate, + instantiate_alchemy_parameters, + register_alchemy_parameter_rule, + unregister_alchemy_parameter_rule, +) + + +@pytest.mark.parametrize( + "template, expected_type", + [ + ( + UpscaleAlchemyParametersTemplate( + form=KNOWN_ALCHEMY_FORMS.post_process, + upscaler=KNOWN_UPSCALERS.RealESRGAN_x4plus, + ), + UpscaleAlchemyParameters, + ), + ( + FacefixAlchemyParametersTemplate( + form=KNOWN_ALCHEMY_FORMS.post_process, + facefixer=KNOWN_FACEFIXERS.GFPGAN, + ), + FacefixAlchemyParameters, + ), + ( + InterrogateAlchemyParametersTemplate( + form=KNOWN_ALCHEMY_FORMS.interrogation, + interrogator=KNOWN_INTERROGATORS.BACKEND_DEFAULT, + ), + InterrogateAlchemyParameters, + ), + ( + CaptionAlchemyParametersTemplate( + form=KNOWN_ALCHEMY_FORMS.caption, + caption_model=KNOWN_CAPTION_MODELS.BLIP_BASE_SALESFORCE, + ), + CaptionAlchemyParameters, + ), + ( + NSFWAlchemyParametersTemplate( + form=KNOWN_ALCHEMY_FORMS.nsfw, + nsfw_detector="demo", + ), + NSFWAlchemyParameters, + ), + ], +) +def test_template_dispatches_to_specialised_model( + template: SingleAlchemyParametersTemplate, + expected_type: type[SingleAlchemyParameters], +) -> None: + parameters = template.to_parameters(result_id="example-id") + assert isinstance(parameters, expected_type) + + +def test_template_without_specialised_fields_uses_base_model() -> None: + template = SingleAlchemyParametersTemplate(form="custom-form") + parameters = template.to_parameters(result_id="example-id") + assert isinstance(parameters, SingleAlchemyParameters) + + +class CustomAlchemyParameters(SingleAlchemyParameters): + custom_value: str + + +class CustomAlchemyParametersTemplate(SingleAlchemyParametersTemplate): + custom_value: str | None = None + + +def test_custom_resolver_can_be_registered() -> None: + template = CustomAlchemyParametersTemplate(custom_value="demo", form="custom") + + rule = ResolverRule( + predicate=lambda payload: payload.get("custom_value") == "demo", + model=CustomAlchemyParameters, + ) + + register_alchemy_parameter_rule(rule) + try: + parameters = template.to_parameters(result_id="example-id") + assert isinstance(parameters, CustomAlchemyParameters) + finally: + unregister_alchemy_parameter_rule(rule) + + +def test_instantiate_accepts_template_payload() -> None: + template = UpscaleAlchemyParametersTemplate( + form=KNOWN_ALCHEMY_FORMS.post_process, + upscaler=KNOWN_UPSCALERS.RealESRGAN_x4plus, + result_id="template-id", + ) + + parameters = instantiate_alchemy_parameters(template) + + assert isinstance(parameters, UpscaleAlchemyParameters) + assert parameters.result_id == "template-id" diff --git a/tests/generation_parameters/test_image_templates.py b/tests/generation_parameters/test_image_templates.py new file mode 100644 index 00000000..0353d68e --- /dev/null +++ b/tests/generation_parameters/test_image_templates.py @@ -0,0 +1,51 @@ +from __future__ import annotations + +from horde_sdk.generation_parameters.image.object_models import ( + BasicImageGenerationParameters, + BasicImageGenerationParametersTemplate, + ImageGenerationParameters, + ImageGenerationParametersTemplate, +) + + +def _create_basic_template(prompt: str) -> BasicImageGenerationParametersTemplate: + return BasicImageGenerationParametersTemplate( + prompt=prompt, + model="example-model", + ) + + +def test_image_template_to_parameters_applies_base_updates() -> None: + template = ImageGenerationParametersTemplate( + base_params=_create_basic_template(prompt="starting"), + ) + + parameters = template.to_parameters(base_param_updates=BasicImageGenerationParametersTemplate(prompt="updated")) + + assert isinstance(parameters.base_params, BasicImageGenerationParameters) + assert parameters.base_params.prompt == "updated" + + +def test_image_template_to_parameters_reuses_existing_result_ids() -> None: + parameters = ImageGenerationParameters( + base_params=BasicImageGenerationParameters( + prompt="kept", + model="example-model", + ), + result_ids=["existing-id"], + ) + + converted = parameters.to_parameters() + + assert converted.result_ids == ["existing-id"] + + +def test_image_parameters_accept_existing_instance_via_model_validate() -> None: + template = ImageGenerationParametersTemplate( + base_params=_create_basic_template(prompt="validate"), + ) + + parameters = template.to_parameters() + clone = ImageGenerationParameters.model_validate(parameters, from_attributes=True) + + assert clone.model_dump() == parameters.model_dump() diff --git a/tests/generation_parameters/test_template_finalization_enforcement.py b/tests/generation_parameters/test_template_finalization_enforcement.py new file mode 100644 index 00000000..a64f221d --- /dev/null +++ b/tests/generation_parameters/test_template_finalization_enforcement.py @@ -0,0 +1,85 @@ +from __future__ import annotations + +from collections.abc import Callable +from dataclasses import dataclass +from importlib import import_module +from types import ModuleType +from typing import Protocol + +import pytest + + +class TemplateConversionProbe(Protocol): + def to_parameters(self, **kwargs: object) -> object: # pragma: no cover - Protocol definition + ... + + +@dataclass(frozen=True, slots=True) +class FinalizationEnforcementCase: + module_path: str + case_id: str + template_factory: Callable[[ModuleType], TemplateConversionProbe] + call_kwargs_factory: Callable[[ModuleType], dict[str, object]] + + +CASES: tuple[FinalizationEnforcementCase, ...] = ( + FinalizationEnforcementCase( + module_path="horde_sdk.generation_parameters.image.object_models", + case_id="image", + template_factory=lambda module: module.ImageGenerationParametersTemplate( + base_params=module.BasicImageGenerationParametersTemplate( + model="example-image-model", + prompt="prompt", + ), + ), + call_kwargs_factory=lambda module: {"result_ids": ["img-1"]}, + ), + FinalizationEnforcementCase( + module_path="horde_sdk.generation_parameters.text.object_models", + case_id="text", + template_factory=lambda module: module.TextGenerationParametersTemplate( + base_params=module.BasicTextGenerationParametersTemplate( + model="example-text-model", + prompt="prompt", + ), + ), + call_kwargs_factory=lambda module: {"result_ids": ["txt-1"]}, + ), + FinalizationEnforcementCase( + module_path="horde_sdk.generation_parameters.alchemy.object_models", + case_id="alchemy", + template_factory=lambda module: module.UpscaleAlchemyParametersTemplate( + form=module.KNOWN_ALCHEMY_FORMS.post_process, + upscaler=module.KNOWN_UPSCALERS.RealESRGAN_x4plus, + ), + call_kwargs_factory=lambda module: { + "result_id": "alchemy-1", + "source_image": b"binary", + }, + ), +) + + +@pytest.mark.parametrize("case", CASES, ids=[case.case_id for case in CASES]) +def test_templates_call_finalize_template_for_parameters( + case: FinalizationEnforcementCase, + monkeypatch: pytest.MonkeyPatch, +) -> None: + module = import_module(case.module_path) + original = module.finalize_template_for_parameters + finalize_called = False + + def _tracking_finalize(*args: object, **kwargs: object) -> object: + nonlocal finalize_called + finalize_called = True + return original(*args, **kwargs) + + monkeypatch.setattr(module, "finalize_template_for_parameters", _tracking_finalize) + + template = case.template_factory(module) + call_kwargs = case.call_kwargs_factory(module) + template.to_parameters(**call_kwargs) + + assert finalize_called, ( + "Template.conversion must call finalize_template_for_parameters() to ensure deterministic result IDs." + ) diff --git a/tests/generation_parameters/test_text_templates.py b/tests/generation_parameters/test_text_templates.py new file mode 100644 index 00000000..b871cae2 --- /dev/null +++ b/tests/generation_parameters/test_text_templates.py @@ -0,0 +1,51 @@ +from __future__ import annotations + +from horde_sdk.generation_parameters.text.object_models import ( + BasicTextGenerationParameters, + BasicTextGenerationParametersTemplate, + TextGenerationParameters, + TextGenerationParametersTemplate, +) + + +def _create_basic_template(prompt: str) -> BasicTextGenerationParametersTemplate: + return BasicTextGenerationParametersTemplate( + prompt=prompt, + model="example-model", + ) + + +def test_text_template_to_parameters_applies_base_updates() -> None: + template = TextGenerationParametersTemplate( + base_params=_create_basic_template(prompt="initial"), + ) + + parameters = template.to_parameters(base_param_updates=BasicTextGenerationParametersTemplate(prompt="updated")) + + assert isinstance(parameters.base_params, BasicTextGenerationParameters) + assert parameters.base_params.prompt == "updated" + + +def test_text_template_to_parameters_reuses_existing_result_ids() -> None: + parameters = TextGenerationParameters( + base_params=BasicTextGenerationParameters( + prompt="kept", + model="example-model", + ), + result_ids=["existing-id"], + ) + + converted = parameters.to_parameters() + + assert converted.result_ids == ["existing-id"] + + +def test_text_parameters_accept_existing_instance_via_model_validate() -> None: + template = TextGenerationParametersTemplate( + base_params=_create_basic_template(prompt="validate"), + ) + + parameters = template.to_parameters() + clone = TextGenerationParameters.model_validate(parameters, from_attributes=True) + + assert clone.model_dump() == parameters.model_dump() diff --git a/tests/generation_parameters/test_utils.py b/tests/generation_parameters/test_utils.py new file mode 100644 index 00000000..4ca902b9 --- /dev/null +++ b/tests/generation_parameters/test_utils.py @@ -0,0 +1,180 @@ +from __future__ import annotations + +from enum import Enum +from uuid import UUID + +import pytest + +from horde_sdk.generation_parameters.generic import CompositeParametersBase +from horde_sdk.generation_parameters.utils import ( + ResultIdAllocator, + compute_parameter_fingerprint, + ensure_result_id, + ensure_result_ids, + finalize_template_for_parameters, + resolve_result_id_from_payload, + resolve_result_ids_from_payload, +) + + +class SampleEnum(Enum): + """Enum used to validate fingerprint normalisation.""" + + FIRST = "first" + SECOND = 2 + + +class _SimpleTemplate(CompositeParametersBase): + value: int = 1 + result_id: str | None = None + result_ids: list[str] | None = None + + def get_number_expected_results(self) -> int: # pragma: no cover - simple return + return 1 + + +def test_compute_parameter_fingerprint_deterministic_order() -> None: + """The fingerprint should stay stable regardless of mapping iteration order.""" + + payload_one = { + "list": [1, {"value": SampleEnum.FIRST}], + "mapping": {"alpha": True, "beta": 3}, + "number": 42, + } + payload_two = { + "number": 42, + "mapping": {"beta": 3, "alpha": True}, + "list": [1, {"value": SampleEnum.FIRST}], + } + + assert compute_parameter_fingerprint(payload_one) == compute_parameter_fingerprint(payload_two) + + +def test_result_id_allocator_includes_fingerprint() -> None: + """Allocator derives different identifiers when payload fingerprints diverge.""" + + allocator = ResultIdAllocator() + fingerprint_a = compute_parameter_fingerprint({"value": "alpha"}) + fingerprint_b = compute_parameter_fingerprint({"value": "beta"}) + + allocation_a = allocator.allocate(seed="example", index=0, fingerprint=fingerprint_a) + allocation_a_repeat = allocator.allocate(seed="example", index=0, fingerprint=fingerprint_a) + allocation_b = allocator.allocate(seed="example", index=0, fingerprint=fingerprint_b) + + assert allocation_a == allocation_a_repeat + assert allocation_a != allocation_b + assert UUID(allocation_a) + + +def test_ensure_result_ids_respects_existing_values() -> None: + """Existing identifiers should be returned untouched.""" + + allocator = ResultIdAllocator() + result = ensure_result_ids(["existing"], 1, allocator=allocator, seed="seed", fingerprint="fp") + assert result == ["existing"] + + +def test_ensure_result_ids_uses_fingerprint() -> None: + """Generated identifiers should differ when fingerprints do not match.""" + + allocator = ResultIdAllocator() + fingerprint_a = compute_parameter_fingerprint({"index": 1}) + fingerprint_b = compute_parameter_fingerprint({"index": 2}) + + ids_a = ensure_result_ids(None, 2, allocator=allocator, seed="seed", fingerprint=fingerprint_a) + ids_a_repeat = ensure_result_ids(None, 2, allocator=allocator, seed="seed", fingerprint=fingerprint_a) + ids_b = ensure_result_ids(None, 2, allocator=allocator, seed="seed", fingerprint=fingerprint_b) + + assert ids_a == ids_a_repeat + assert ids_a != ids_b + + +def test_ensure_result_ids_length_mismatch() -> None: + """Providing mismatched identifiers should raise an error.""" + + allocator = ResultIdAllocator() + + with pytest.raises(ValueError): + ensure_result_ids(["only-one"], 2, allocator=allocator, seed="seed", fingerprint="fp") + + +def test_ensure_result_id_uses_fingerprint() -> None: + """Single identifier allocation should also depend on fingerprints.""" + + allocator = ResultIdAllocator() + fingerprint_a = compute_parameter_fingerprint({"value": 1}) + fingerprint_b = compute_parameter_fingerprint({"value": 2}) + + result_a = ensure_result_id(None, allocator=allocator, seed="seed", fingerprint=fingerprint_a) + result_a_repeat = ensure_result_id(None, allocator=allocator, seed="seed", fingerprint=fingerprint_a) + result_b = ensure_result_id(None, allocator=allocator, seed="seed", fingerprint=fingerprint_b) + + assert result_a == result_a_repeat + assert result_a != result_b + + +def test_finalize_template_excludes_fields_from_fingerprint() -> None: + """Excluded fields must not influence the fingerprint.""" + + first_snapshot = finalize_template_for_parameters( + _SimpleTemplate(result_ids=["alpha"]), + fingerprint_exclude_fields=("result_ids",), + ) + second_snapshot = finalize_template_for_parameters( + _SimpleTemplate(result_ids=["beta"]), + fingerprint_exclude_fields=("result_ids",), + ) + + assert first_snapshot.fingerprint == second_snapshot.fingerprint + + +def test_resolve_result_ids_from_payload_prefers_explicit_values() -> None: + """Explicit identifiers should override payload-provided ones.""" + + payload_ids = ["payload"] + explicit_ids = ["explicit"] + fingerprint = compute_parameter_fingerprint({"value": 42}) + + resolved = resolve_result_ids_from_payload( + explicit_ids=explicit_ids, + payload_value=payload_ids, + count=1, + allocator=None, + seed="seed", + fingerprint=fingerprint, + ) + assert resolved == explicit_ids + + fallback = resolve_result_ids_from_payload( + explicit_ids=None, + payload_value=payload_ids, + count=1, + allocator=None, + seed="seed", + fingerprint=fingerprint, + ) + assert fallback == payload_ids + + +def test_resolve_result_id_from_payload_validates_types() -> None: + """Invalid payload identifiers should raise informative errors.""" + + fingerprint = compute_parameter_fingerprint({"value": "x"}) + + resolved = resolve_result_id_from_payload( + explicit_id=None, + payload_value="payload-id", + allocator=None, + seed="seed", + fingerprint=fingerprint, + ) + assert resolved == "payload-id" + + with pytest.raises(TypeError): + resolve_result_id_from_payload( + explicit_id=None, + payload_value=123, + allocator=None, + seed="seed", + fingerprint=fingerprint, + ) diff --git a/tests/test_data/ai_horde_api/example_payloads/_v2_generate_async_post.json b/tests/test_data/ai_horde_api/example_payloads/_v2_generate_async_post.json index 09c5584f..d8812815 100644 --- a/tests/test_data/ai_horde_api/example_payloads/_v2_generate_async_post.json +++ b/tests/test_data/ai_horde_api/example_payloads/_v2_generate_async_post.json @@ -1,7 +1,7 @@ { "prompt": "a", "params": { - "sampler_name": "k_euler", + "sampler_name": "k_dpm_2_a", "cfg_scale": 7.5, "denoising_strength": 0.75, "hires_fix_denoising_strength": 0.75, diff --git a/tests/test_data/ai_horde_api/example_payloads/_v2_generate_rate_id_post.json b/tests/test_data/ai_horde_api/example_payloads/_v2_generate_rate_id_post.json index 0d3a9a34..95b8eb5c 100644 --- a/tests/test_data/ai_horde_api/example_payloads/_v2_generate_rate_id_post.json +++ b/tests/test_data/ai_horde_api/example_payloads/_v2_generate_rate_id_post.json @@ -2,7 +2,7 @@ "best": "6038971e-f0b0-4fdd-a3bb-148f561f815e", "ratings": [ { - "id": "6038971e-f0b0-4fdd-a3bb-148f561f815e", + "id": "00000000-0000-0000-0000-000000000000", "rating": 1.0, "artifacts": 1 } diff --git a/tests/test_data/ai_horde_api/example_payloads/_v2_generate_text_async_post.json b/tests/test_data/ai_horde_api/example_payloads/_v2_generate_text_async_post.json index 129d637d..eadbd267 100644 --- a/tests/test_data/ai_horde_api/example_payloads/_v2_generate_text_async_post.json +++ b/tests/test_data/ai_horde_api/example_payloads/_v2_generate_text_async_post.json @@ -27,7 +27,7 @@ "dynatemp_range": 0.0, "dynatemp_exponent": 1.0, "n": 1, - "max_context_length": 1024, + "max_context_length": 2048, "max_length": 80 }, "softprompt": "a", diff --git a/tests/test_data/ai_horde_api/example_payloads/_v2_styles_image_post.json b/tests/test_data/ai_horde_api/example_payloads/_v2_styles_image_post.json index b95c443b..6f629872 100644 --- a/tests/test_data/ai_horde_api/example_payloads/_v2_styles_image_post.json +++ b/tests/test_data/ai_horde_api/example_payloads/_v2_styles_image_post.json @@ -3,12 +3,12 @@ "info": "photorealism excellence.", "prompt": "{p}{np}", "params": { - "sampler_name": "k_euler", - "cfg_scale": 7.5, + "sampler_name": "k_dpm_2_a", + "cfg_scale": 0.0, "denoising_strength": 0.75, "hires_fix_denoising_strength": 0.75, - "height": 512, - "width": 512, + "height": 64.0, + "width": 64.0, "post_processing": [ "GFPGAN" ], @@ -40,7 +40,7 @@ }, "workflow": "qr_code", "transparent": false, - "steps": 30 + "steps": 1.0 }, "public": true, "nsfw": false, @@ -49,5 +49,6 @@ ], "models": [ "stable_diffusion" - ] + ], + "sharedkey": "00000000-0000-0000-0000-000000000000" } diff --git a/tests/test_data/ai_horde_api/example_payloads/_v2_styles_image_style_id_patch.json b/tests/test_data/ai_horde_api/example_payloads/_v2_styles_image_style_id_patch.json index bb6b8059..a9e6ca65 100644 --- a/tests/test_data/ai_horde_api/example_payloads/_v2_styles_image_style_id_patch.json +++ b/tests/test_data/ai_horde_api/example_payloads/_v2_styles_image_style_id_patch.json @@ -3,12 +3,12 @@ "info": "photorealism excellence.", "prompt": "aaaaaaa", "params": { - "sampler_name": "k_euler", - "cfg_scale": 7.5, + "sampler_name": "k_dpm_2_a", + "cfg_scale": 0.0, "denoising_strength": 0.75, "hires_fix_denoising_strength": 0.75, - "height": 512, - "width": 512, + "height": 64.0, + "width": 64.0, "post_processing": [ "GFPGAN" ], @@ -40,7 +40,7 @@ }, "workflow": "qr_code", "transparent": false, - "steps": 30 + "steps": 1.0 }, "public": true, "nsfw": false, @@ -49,5 +49,6 @@ ], "models": [ "stable_diffusion" - ] + ], + "sharedkey": "00000000-0000-0000-0000-000000000000" } diff --git a/tests/test_data/ai_horde_api/example_payloads/_v2_users_user_id_put.json b/tests/test_data/ai_horde_api/example_payloads/_v2_users_user_id_put.json index e1b78660..31b9976c 100644 --- a/tests/test_data/ai_horde_api/example_payloads/_v2_users_user_id_put.json +++ b/tests/test_data/ai_horde_api/example_payloads/_v2_users_user_id_put.json @@ -14,6 +14,7 @@ "service": false, "education": false, "special": false, + "undelete": false, "filtered": false, "reset_suspicion": false, "contact": "email@example.com", diff --git a/tests/test_data/ai_horde_api/example_payloads/_v2_workers_messages_post.json b/tests/test_data/ai_horde_api/example_payloads/_v2_workers_messages_post.json new file mode 100644 index 00000000..c93526b0 --- /dev/null +++ b/tests/test_data/ai_horde_api/example_payloads/_v2_workers_messages_post.json @@ -0,0 +1,6 @@ +{ + "worker_id": "00000000-0000-0000-0000-000000000000", + "message": "Hello Worker!", + "origin": "AI Horde Moderators", + "expiry": 12 +} diff --git a/tests/test_data/ai_horde_api/example_responses/_v2_collection_by_name_collection_name_get_200.json b/tests/test_data/ai_horde_api/example_responses/_v2_collection_by_name_collection_name_get_200.json index 14c40bff..d4ae23b6 100644 --- a/tests/test_data/ai_horde_api/example_responses/_v2_collection_by_name_collection_name_get_200.json +++ b/tests/test_data/ai_horde_api/example_responses/_v2_collection_by_name_collection_name_get_200.json @@ -1,5 +1,5 @@ { - "id": "", + "id": "00000000-0000-0000-0000-000000000000", "name": "a", "type": "image", "info": "a", diff --git a/tests/test_data/ai_horde_api/example_responses/_v2_collections_collection_id_get_200.json b/tests/test_data/ai_horde_api/example_responses/_v2_collections_collection_id_get_200.json index 14c40bff..d4ae23b6 100644 --- a/tests/test_data/ai_horde_api/example_responses/_v2_collections_collection_id_get_200.json +++ b/tests/test_data/ai_horde_api/example_responses/_v2_collections_collection_id_get_200.json @@ -1,5 +1,5 @@ { - "id": "", + "id": "00000000-0000-0000-0000-000000000000", "name": "a", "type": "image", "info": "a", diff --git a/tests/test_data/ai_horde_api/example_responses/_v2_collections_get_200.json b/tests/test_data/ai_horde_api/example_responses/_v2_collections_get_200.json index 7bfacfd0..a7eed56c 100644 --- a/tests/test_data/ai_horde_api/example_responses/_v2_collections_get_200.json +++ b/tests/test_data/ai_horde_api/example_responses/_v2_collections_get_200.json @@ -1,6 +1,6 @@ [ { - "id": "", + "id": "00000000-0000-0000-0000-000000000000", "name": "a", "type": "image", "info": "a", diff --git a/tests/test_data/ai_horde_api/example_responses/_v2_filters_filter_id_get_200.json b/tests/test_data/ai_horde_api/example_responses/_v2_filters_filter_id_get_200.json index 5b99b750..5477fdd4 100644 --- a/tests/test_data/ai_horde_api/example_responses/_v2_filters_filter_id_get_200.json +++ b/tests/test_data/ai_horde_api/example_responses/_v2_filters_filter_id_get_200.json @@ -1,10 +1,8 @@ -[ - { - "id": "", - "regex": "ac.*", - "filter_type": 10, - "description": "", - "replacement": "", - "user": "" - } -] +{ + "id": "00000000-0000-0000-0000-000000000000", + "regex": "ac.*", + "filter_type": 10, + "description": "", + "replacement": "", + "user": "" +} diff --git a/tests/test_data/ai_horde_api/example_responses/_v2_filters_filter_id_patch_200.json b/tests/test_data/ai_horde_api/example_responses/_v2_filters_filter_id_patch_200.json index 518b5301..5477fdd4 100644 --- a/tests/test_data/ai_horde_api/example_responses/_v2_filters_filter_id_patch_200.json +++ b/tests/test_data/ai_horde_api/example_responses/_v2_filters_filter_id_patch_200.json @@ -1,5 +1,5 @@ { - "id": "", + "id": "00000000-0000-0000-0000-000000000000", "regex": "ac.*", "filter_type": 10, "description": "", diff --git a/tests/test_data/ai_horde_api/example_responses/_v2_filters_get_200.json b/tests/test_data/ai_horde_api/example_responses/_v2_filters_get_200.json index 5b99b750..bab82df9 100644 --- a/tests/test_data/ai_horde_api/example_responses/_v2_filters_get_200.json +++ b/tests/test_data/ai_horde_api/example_responses/_v2_filters_get_200.json @@ -1,6 +1,6 @@ [ { - "id": "", + "id": "00000000-0000-0000-0000-000000000000", "regex": "ac.*", "filter_type": 10, "description": "", diff --git a/tests/test_data/ai_horde_api/example_responses/_v2_filters_put_201.json b/tests/test_data/ai_horde_api/example_responses/_v2_filters_put_201.json index 518b5301..5477fdd4 100644 --- a/tests/test_data/ai_horde_api/example_responses/_v2_filters_put_201.json +++ b/tests/test_data/ai_horde_api/example_responses/_v2_filters_put_201.json @@ -1,5 +1,5 @@ { - "id": "", + "id": "00000000-0000-0000-0000-000000000000", "regex": "ac.*", "filter_type": 10, "description": "", diff --git a/tests/test_data/ai_horde_api/example_responses/_v2_find_user_get_200.json b/tests/test_data/ai_horde_api/example_responses/_v2_find_user_get_200.json index a461c22c..39f68ee9 100644 --- a/tests/test_data/ai_horde_api/example_responses/_v2_find_user_get_200.json +++ b/tests/test_data/ai_horde_api/example_responses/_v2_find_user_get_200.json @@ -52,6 +52,7 @@ "education": false, "customizer": false, "special": false, + "deleted": false, "suspicious": 0, "pseudonymous": false, "contact": "email@example.com", diff --git a/tests/test_data/ai_horde_api/example_responses/_v2_generate_async_post_202.json b/tests/test_data/ai_horde_api/example_responses/_v2_generate_async_post_202.json index 9f0a5df9..a57a2134 100644 --- a/tests/test_data/ai_horde_api/example_responses/_v2_generate_async_post_202.json +++ b/tests/test_data/ai_horde_api/example_responses/_v2_generate_async_post_202.json @@ -1,5 +1,5 @@ { - "id": "", + "id": "00000000-0000-0000-0000-000000000000", "kudos": 0.0, "message": "", "warnings": [ diff --git a/tests/test_data/ai_horde_api/example_responses/_v2_generate_pop_post_200.json b/tests/test_data/ai_horde_api/example_responses/_v2_generate_pop_post_200.json index a608a733..d1b9af94 100644 --- a/tests/test_data/ai_horde_api/example_responses/_v2_generate_pop_post_200.json +++ b/tests/test_data/ai_horde_api/example_responses/_v2_generate_pop_post_200.json @@ -1,6 +1,6 @@ { "payload": { - "sampler_name": "k_euler", + "sampler_name": "k_dpmpp_2m", "cfg_scale": 7.5, "denoising_strength": 0.75, "hires_fix_denoising_strength": 0.75, @@ -53,10 +53,18 @@ "n_iter": 1, "use_nsfw_censor": false }, - "id": "", + "id": "00000000-0000-0000-0000-000000000000", "ids": [ "00000000-0000-0000-0000-000000000000" ], + "messages": [ + { + "id": "00000000-0000-0000-0000-000000000000", + "message": "Hello Worker!", + "origin": "moderator", + "expiry": "2021-01-01T00:00:00Z" + } + ], "ttl": 0, "skipped": { "worker_id": 0.0, diff --git a/tests/test_data/ai_horde_api/example_responses/_v2_generate_status_id_delete_200.json b/tests/test_data/ai_horde_api/example_responses/_v2_generate_status_id_delete_200.json index cee94c9b..211fd196 100644 --- a/tests/test_data/ai_horde_api/example_responses/_v2_generate_status_id_delete_200.json +++ b/tests/test_data/ai_horde_api/example_responses/_v2_generate_status_id_delete_200.json @@ -11,7 +11,7 @@ "is_possible": true, "generations": [ { - "worker_id": "", + "worker_id": "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", "worker_name": "", "model": "", "state": "ok", diff --git a/tests/test_data/ai_horde_api/example_responses/_v2_generate_status_id_get_200.json b/tests/test_data/ai_horde_api/example_responses/_v2_generate_status_id_get_200.json index cee94c9b..211fd196 100644 --- a/tests/test_data/ai_horde_api/example_responses/_v2_generate_status_id_get_200.json +++ b/tests/test_data/ai_horde_api/example_responses/_v2_generate_status_id_get_200.json @@ -11,7 +11,7 @@ "is_possible": true, "generations": [ { - "worker_id": "", + "worker_id": "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", "worker_name": "", "model": "", "state": "ok", diff --git a/tests/test_data/ai_horde_api/example_responses/_v2_generate_text_async_post_202.json b/tests/test_data/ai_horde_api/example_responses/_v2_generate_text_async_post_202.json index 9f0a5df9..a57a2134 100644 --- a/tests/test_data/ai_horde_api/example_responses/_v2_generate_text_async_post_202.json +++ b/tests/test_data/ai_horde_api/example_responses/_v2_generate_text_async_post_202.json @@ -1,5 +1,5 @@ { - "id": "", + "id": "00000000-0000-0000-0000-000000000000", "kudos": 0.0, "message": "", "warnings": [ diff --git a/tests/test_data/ai_horde_api/example_responses/_v2_generate_text_pop_post_200.json b/tests/test_data/ai_horde_api/example_responses/_v2_generate_text_pop_post_200.json index dffb7e4a..a2b642d9 100644 --- a/tests/test_data/ai_horde_api/example_responses/_v2_generate_text_pop_post_200.json +++ b/tests/test_data/ai_horde_api/example_responses/_v2_generate_text_pop_post_200.json @@ -26,14 +26,22 @@ "dynatemp_range": 0.0, "dynatemp_exponent": 1.0, "n": 1, - "max_context_length": 1024, + "max_context_length": 2048, "max_length": 80, "prompt": "" }, - "id": "", + "id": "00000000-0000-0000-0000-000000000000", "ids": [ "00000000-0000-0000-0000-000000000000" ], + "messages": [ + { + "id": "00000000-0000-0000-0000-000000000000", + "message": "Hello Worker!", + "origin": "moderator", + "expiry": "2021-01-01T00:00:00Z" + } + ], "ttl": 0, "extra_source_images": [ { diff --git a/tests/test_data/ai_horde_api/example_responses/_v2_generate_text_status_id_delete_200.json b/tests/test_data/ai_horde_api/example_responses/_v2_generate_text_status_id_delete_200.json index acd42f58..ec316d1b 100644 --- a/tests/test_data/ai_horde_api/example_responses/_v2_generate_text_status_id_delete_200.json +++ b/tests/test_data/ai_horde_api/example_responses/_v2_generate_text_status_id_delete_200.json @@ -11,7 +11,7 @@ "is_possible": true, "generations": [ { - "worker_id": "", + "worker_id": "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", "worker_name": "", "model": "", "state": "ok", diff --git a/tests/test_data/ai_horde_api/example_responses/_v2_generate_text_status_id_get_200.json b/tests/test_data/ai_horde_api/example_responses/_v2_generate_text_status_id_get_200.json index acd42f58..ec316d1b 100644 --- a/tests/test_data/ai_horde_api/example_responses/_v2_generate_text_status_id_get_200.json +++ b/tests/test_data/ai_horde_api/example_responses/_v2_generate_text_status_id_get_200.json @@ -11,7 +11,7 @@ "is_possible": true, "generations": [ { - "worker_id": "", + "worker_id": "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", "worker_name": "", "model": "", "state": "ok", diff --git a/tests/test_data/ai_horde_api/example_responses/_v2_interrogate_async_post_202.json b/tests/test_data/ai_horde_api/example_responses/_v2_interrogate_async_post_202.json index e7a3f041..ba14b07f 100644 --- a/tests/test_data/ai_horde_api/example_responses/_v2_interrogate_async_post_202.json +++ b/tests/test_data/ai_horde_api/example_responses/_v2_interrogate_async_post_202.json @@ -1,4 +1,4 @@ { - "id": "", + "id": "00000000-0000-0000-0000-000000000000", "message": "" } diff --git a/tests/test_data/ai_horde_api/example_responses/_v2_interrogate_pop_post_200.json b/tests/test_data/ai_horde_api/example_responses/_v2_interrogate_pop_post_200.json index 6efc014b..706bb93e 100644 --- a/tests/test_data/ai_horde_api/example_responses/_v2_interrogate_pop_post_200.json +++ b/tests/test_data/ai_horde_api/example_responses/_v2_interrogate_pop_post_200.json @@ -1,7 +1,7 @@ { "forms": [ { - "id": "", + "id": "00000000-0000-0000-0000-000000000000", "form": "caption", "payload": { "additionalProp1": "", diff --git a/tests/test_data/ai_horde_api/example_responses/_v2_sharedkeys_put_200.json b/tests/test_data/ai_horde_api/example_responses/_v2_sharedkeys_put_200.json index 4f99993c..0af264e7 100644 --- a/tests/test_data/ai_horde_api/example_responses/_v2_sharedkeys_put_200.json +++ b/tests/test_data/ai_horde_api/example_responses/_v2_sharedkeys_put_200.json @@ -1,5 +1,5 @@ { - "id": "", + "id": "00000000-0000-0000-0000-000000000000", "username": "", "name": "", "kudos": 0, diff --git a/tests/test_data/ai_horde_api/example_responses/_v2_sharedkeys_sharedkey_id_get_200.json b/tests/test_data/ai_horde_api/example_responses/_v2_sharedkeys_sharedkey_id_get_200.json index 4f99993c..0af264e7 100644 --- a/tests/test_data/ai_horde_api/example_responses/_v2_sharedkeys_sharedkey_id_get_200.json +++ b/tests/test_data/ai_horde_api/example_responses/_v2_sharedkeys_sharedkey_id_get_200.json @@ -1,5 +1,5 @@ { - "id": "", + "id": "00000000-0000-0000-0000-000000000000", "username": "", "name": "", "kudos": 0, diff --git a/tests/test_data/ai_horde_api/example_responses/_v2_sharedkeys_sharedkey_id_patch_200.json b/tests/test_data/ai_horde_api/example_responses/_v2_sharedkeys_sharedkey_id_patch_200.json index 4f99993c..0af264e7 100644 --- a/tests/test_data/ai_horde_api/example_responses/_v2_sharedkeys_sharedkey_id_patch_200.json +++ b/tests/test_data/ai_horde_api/example_responses/_v2_sharedkeys_sharedkey_id_patch_200.json @@ -1,5 +1,5 @@ { - "id": "", + "id": "00000000-0000-0000-0000-000000000000", "username": "", "name": "", "kudos": 0, diff --git a/tests/test_data/ai_horde_api/example_responses/_v2_styles_image_by_name_style_name_get_200.json b/tests/test_data/ai_horde_api/example_responses/_v2_styles_image_by_name_style_name_get_200.json index db63b5e3..ede87f77 100644 --- a/tests/test_data/ai_horde_api/example_responses/_v2_styles_image_by_name_style_name_get_200.json +++ b/tests/test_data/ai_horde_api/example_responses/_v2_styles_image_by_name_style_name_get_200.json @@ -3,12 +3,12 @@ "info": "photorealism excellence.", "prompt": "{p}{np}", "params": { - "sampler_name": "k_euler", - "cfg_scale": 7.5, + "sampler_name": "k_dpmpp_2m", + "cfg_scale": 0.0, "denoising_strength": 0.75, "hires_fix_denoising_strength": 0.75, - "height": 512, - "width": 512, + "height": 64.0, + "width": 64.0, "post_processing": [ "GFPGAN" ], @@ -40,7 +40,7 @@ }, "workflow": "qr_code", "transparent": false, - "steps": 30 + "steps": 1.0 }, "public": true, "nsfw": false, @@ -50,6 +50,7 @@ "models": [ "stable_diffusion" ], + "sharedkey": "00000000-0000-0000-0000-000000000000", "id": "00000000-0000-0000-0000-000000000000", "use_count": 0, "creator": "db0#1", @@ -59,5 +60,16 @@ "primary": false, "id": "00000000-0000-0000-0000-000000000000" } - ] + ], + "shared_key": { + "id": "00000000-0000-0000-0000-000000000000", + "username": "", + "name": "", + "kudos": 0, + "expiry": "2021-01-01T00:00:00Z", + "utilized": 0, + "max_image_pixels": 0, + "max_image_steps": 0, + "max_text_tokens": 0 + } } diff --git a/tests/test_data/ai_horde_api/example_responses/_v2_styles_image_get_200.json b/tests/test_data/ai_horde_api/example_responses/_v2_styles_image_get_200.json index d6b0dd46..0bb8835f 100644 --- a/tests/test_data/ai_horde_api/example_responses/_v2_styles_image_get_200.json +++ b/tests/test_data/ai_horde_api/example_responses/_v2_styles_image_get_200.json @@ -4,12 +4,12 @@ "info": "photorealism excellence.", "prompt": "{p}{np}", "params": { - "sampler_name": "k_euler", - "cfg_scale": 7.5, + "sampler_name": "k_dpmpp_2m", + "cfg_scale": 0.0, "denoising_strength": 0.75, "hires_fix_denoising_strength": 0.75, - "height": 512, - "width": 512, + "height": 64.0, + "width": 64.0, "post_processing": [ "GFPGAN" ], @@ -41,7 +41,7 @@ }, "workflow": "qr_code", "transparent": false, - "steps": 30 + "steps": 1.0 }, "public": true, "nsfw": false, @@ -51,6 +51,7 @@ "models": [ "stable_diffusion" ], + "sharedkey": "00000000-0000-0000-0000-000000000000", "id": "00000000-0000-0000-0000-000000000000", "use_count": 0, "creator": "db0#1", @@ -60,6 +61,17 @@ "primary": false, "id": "00000000-0000-0000-0000-000000000000" } - ] + ], + "shared_key": { + "id": "00000000-0000-0000-0000-000000000000", + "username": "", + "name": "", + "kudos": 0, + "expiry": "2021-01-01T00:00:00Z", + "utilized": 0, + "max_image_pixels": 0, + "max_image_steps": 0, + "max_text_tokens": 0 + } } ] diff --git a/tests/test_data/ai_horde_api/example_responses/_v2_styles_image_style_id_get_200.json b/tests/test_data/ai_horde_api/example_responses/_v2_styles_image_style_id_get_200.json index db63b5e3..ede87f77 100644 --- a/tests/test_data/ai_horde_api/example_responses/_v2_styles_image_style_id_get_200.json +++ b/tests/test_data/ai_horde_api/example_responses/_v2_styles_image_style_id_get_200.json @@ -3,12 +3,12 @@ "info": "photorealism excellence.", "prompt": "{p}{np}", "params": { - "sampler_name": "k_euler", - "cfg_scale": 7.5, + "sampler_name": "k_dpmpp_2m", + "cfg_scale": 0.0, "denoising_strength": 0.75, "hires_fix_denoising_strength": 0.75, - "height": 512, - "width": 512, + "height": 64.0, + "width": 64.0, "post_processing": [ "GFPGAN" ], @@ -40,7 +40,7 @@ }, "workflow": "qr_code", "transparent": false, - "steps": 30 + "steps": 1.0 }, "public": true, "nsfw": false, @@ -50,6 +50,7 @@ "models": [ "stable_diffusion" ], + "sharedkey": "00000000-0000-0000-0000-000000000000", "id": "00000000-0000-0000-0000-000000000000", "use_count": 0, "creator": "db0#1", @@ -59,5 +60,16 @@ "primary": false, "id": "00000000-0000-0000-0000-000000000000" } - ] + ], + "shared_key": { + "id": "00000000-0000-0000-0000-000000000000", + "username": "", + "name": "", + "kudos": 0, + "expiry": "2021-01-01T00:00:00Z", + "utilized": 0, + "max_image_pixels": 0, + "max_image_steps": 0, + "max_text_tokens": 0 + } } diff --git a/tests/test_data/ai_horde_api/example_responses/_v2_teams_get_200.json b/tests/test_data/ai_horde_api/example_responses/_v2_teams_get_200.json index 8dc89dc1..cdebbe9d 100644 --- a/tests/test_data/ai_horde_api/example_responses/_v2_teams_get_200.json +++ b/tests/test_data/ai_horde_api/example_responses/_v2_teams_get_200.json @@ -1,7 +1,7 @@ [ { "name": "", - "id": "", + "id": "00000000-0000-0000-0000-000000000000", "info": "Anarchy is emergent order.", "requests_fulfilled": 0, "kudos": 0.0, @@ -12,7 +12,7 @@ { "type": "image", "name": "", - "id": "", + "id": "00000000-0000-0000-0000-000000000000", "online": false } ], diff --git a/tests/test_data/ai_horde_api/example_responses/_v2_teams_team_id_get_200.json b/tests/test_data/ai_horde_api/example_responses/_v2_teams_team_id_get_200.json index 15d7aff9..746316a0 100644 --- a/tests/test_data/ai_horde_api/example_responses/_v2_teams_team_id_get_200.json +++ b/tests/test_data/ai_horde_api/example_responses/_v2_teams_team_id_get_200.json @@ -1,6 +1,6 @@ { "name": "", - "id": "", + "id": "00000000-0000-0000-0000-000000000000", "info": "Anarchy is emergent order.", "requests_fulfilled": 0, "kudos": 0.0, @@ -11,7 +11,7 @@ { "type": "image", "name": "", - "id": "", + "id": "00000000-0000-0000-0000-000000000000", "online": false } ], diff --git a/tests/test_data/ai_horde_api/example_responses/_v2_users_get_200.json b/tests/test_data/ai_horde_api/example_responses/_v2_users_get_200.json index 1de1f075..5f0e82b1 100644 --- a/tests/test_data/ai_horde_api/example_responses/_v2_users_get_200.json +++ b/tests/test_data/ai_horde_api/example_responses/_v2_users_get_200.json @@ -53,6 +53,7 @@ "education": false, "customizer": false, "special": false, + "deleted": false, "suspicious": 0, "pseudonymous": false, "contact": "email@example.com", diff --git a/tests/test_data/ai_horde_api/example_responses/_v2_users_user_id_delete_200.json b/tests/test_data/ai_horde_api/example_responses/_v2_users_user_id_delete_200.json new file mode 100644 index 00000000..3a3319e6 --- /dev/null +++ b/tests/test_data/ai_horde_api/example_responses/_v2_users_user_id_delete_200.json @@ -0,0 +1,3 @@ +{ + "message": "OK" +} diff --git a/tests/test_data/ai_horde_api/example_responses/_v2_users_user_id_get_200.json b/tests/test_data/ai_horde_api/example_responses/_v2_users_user_id_get_200.json index a461c22c..39f68ee9 100644 --- a/tests/test_data/ai_horde_api/example_responses/_v2_users_user_id_get_200.json +++ b/tests/test_data/ai_horde_api/example_responses/_v2_users_user_id_get_200.json @@ -52,6 +52,7 @@ "education": false, "customizer": false, "special": false, + "deleted": false, "suspicious": 0, "pseudonymous": false, "contact": "email@example.com", diff --git a/tests/test_data/ai_horde_api/example_responses/_v2_users_user_id_put_200.json b/tests/test_data/ai_horde_api/example_responses/_v2_users_user_id_put_200.json index 2560d2c2..bc112fec 100644 --- a/tests/test_data/ai_horde_api/example_responses/_v2_users_user_id_put_200.json +++ b/tests/test_data/ai_horde_api/example_responses/_v2_users_user_id_put_200.json @@ -14,6 +14,7 @@ "service": false, "education": false, "special": false, + "undeleted": false, "new_suspicion": 0, "contact": "email@example.com", "admin_comment": "User is sus" diff --git a/tests/test_data/ai_horde_api/example_responses/_v2_workers_get_200.json b/tests/test_data/ai_horde_api/example_responses/_v2_workers_get_200.json index 41eba74f..4afecd61 100644 --- a/tests/test_data/ai_horde_api/example_responses/_v2_workers_get_200.json +++ b/tests/test_data/ai_horde_api/example_responses/_v2_workers_get_200.json @@ -2,7 +2,7 @@ { "type": "image", "name": "", - "id": "", + "id": "00000000-0000-0000-0000-000000000000", "online": false, "requests_fulfilled": 0, "kudos_rewards": 0.0, @@ -31,7 +31,7 @@ ], "team": { "name": "", - "id": "" + "id": "00000000-0000-0000-0000-000000000000" }, "contact": "email@example.com", "bridge_agent": "AI Horde Worker reGen:4.1.0:https://github.com/Haidra-Org/horde-worker-reGen", @@ -45,6 +45,14 @@ "sdxl_controlnet": false, "max_length": 80, "max_context_length": 80, - "tokens_generated": 0.0 + "tokens_generated": 0.0, + "messages": [ + { + "worker_id": "00000000-0000-0000-0000-000000000000", + "message": "Hello Worker!", + "origin": "AI Horde Moderators", + "expiry": 12 + } + ] } ] diff --git a/tests/test_data/ai_horde_api/example_responses/_v2_workers_messages_get_200.json b/tests/test_data/ai_horde_api/example_responses/_v2_workers_messages_get_200.json new file mode 100644 index 00000000..a72bf510 --- /dev/null +++ b/tests/test_data/ai_horde_api/example_responses/_v2_workers_messages_get_200.json @@ -0,0 +1,8 @@ +[ + { + "worker_id": "00000000-0000-0000-0000-000000000000", + "message": "Hello Worker!", + "origin": "AI Horde Moderators", + "expiry": 12 + } +] diff --git a/tests/test_data/ai_horde_api/example_responses/_v2_workers_messages_message_id_delete_200.json b/tests/test_data/ai_horde_api/example_responses/_v2_workers_messages_message_id_delete_200.json new file mode 100644 index 00000000..3a3319e6 --- /dev/null +++ b/tests/test_data/ai_horde_api/example_responses/_v2_workers_messages_message_id_delete_200.json @@ -0,0 +1,3 @@ +{ + "message": "OK" +} diff --git a/tests/test_data/ai_horde_api/example_responses/_v2_workers_messages_message_id_get_200.json b/tests/test_data/ai_horde_api/example_responses/_v2_workers_messages_message_id_get_200.json new file mode 100644 index 00000000..c93526b0 --- /dev/null +++ b/tests/test_data/ai_horde_api/example_responses/_v2_workers_messages_message_id_get_200.json @@ -0,0 +1,6 @@ +{ + "worker_id": "00000000-0000-0000-0000-000000000000", + "message": "Hello Worker!", + "origin": "AI Horde Moderators", + "expiry": 12 +} diff --git a/tests/test_data/ai_horde_api/example_responses/_v2_workers_messages_post_200.json b/tests/test_data/ai_horde_api/example_responses/_v2_workers_messages_post_200.json new file mode 100644 index 00000000..c93526b0 --- /dev/null +++ b/tests/test_data/ai_horde_api/example_responses/_v2_workers_messages_post_200.json @@ -0,0 +1,6 @@ +{ + "worker_id": "00000000-0000-0000-0000-000000000000", + "message": "Hello Worker!", + "origin": "AI Horde Moderators", + "expiry": 12 +} diff --git a/tests/test_data/ai_horde_api/example_responses/_v2_workers_name_worker_name_get_200.json b/tests/test_data/ai_horde_api/example_responses/_v2_workers_name_worker_name_get_200.json index 26bdee0a..5500b42e 100644 --- a/tests/test_data/ai_horde_api/example_responses/_v2_workers_name_worker_name_get_200.json +++ b/tests/test_data/ai_horde_api/example_responses/_v2_workers_name_worker_name_get_200.json @@ -1,7 +1,7 @@ { "type": "image", "name": "", - "id": "", + "id": "00000000-0000-0000-0000-000000000000", "online": false, "requests_fulfilled": 0, "kudos_rewards": 0.0, @@ -30,7 +30,7 @@ ], "team": { "name": "", - "id": "" + "id": "00000000-0000-0000-0000-000000000000" }, "contact": "email@example.com", "bridge_agent": "AI Horde Worker reGen:4.1.0:https://github.com/Haidra-Org/horde-worker-reGen", @@ -44,5 +44,13 @@ "sdxl_controlnet": false, "max_length": 80, "max_context_length": 80, - "tokens_generated": 0.0 + "tokens_generated": 0.0, + "messages": [ + { + "worker_id": "00000000-0000-0000-0000-000000000000", + "message": "Hello Worker!", + "origin": "AI Horde Moderators", + "expiry": 12 + } + ] } diff --git a/tests/test_data/ai_horde_api/example_responses/_v2_workers_worker_id_get_200.json b/tests/test_data/ai_horde_api/example_responses/_v2_workers_worker_id_get_200.json index 26bdee0a..5500b42e 100644 --- a/tests/test_data/ai_horde_api/example_responses/_v2_workers_worker_id_get_200.json +++ b/tests/test_data/ai_horde_api/example_responses/_v2_workers_worker_id_get_200.json @@ -1,7 +1,7 @@ { "type": "image", "name": "", - "id": "", + "id": "00000000-0000-0000-0000-000000000000", "online": false, "requests_fulfilled": 0, "kudos_rewards": 0.0, @@ -30,7 +30,7 @@ ], "team": { "name": "", - "id": "" + "id": "00000000-0000-0000-0000-000000000000" }, "contact": "email@example.com", "bridge_agent": "AI Horde Worker reGen:4.1.0:https://github.com/Haidra-Org/horde-worker-reGen", @@ -44,5 +44,13 @@ "sdxl_controlnet": false, "max_length": 80, "max_context_length": 80, - "tokens_generated": 0.0 + "tokens_generated": 0.0, + "messages": [ + { + "worker_id": "00000000-0000-0000-0000-000000000000", + "message": "Hello Worker!", + "origin": "AI Horde Moderators", + "expiry": 12 + } + ] } diff --git a/tests/test_data/images/test_facefix.png b/tests/test_data/images/test_facefix.png new file mode 100644 index 00000000..8309cf33 Binary files /dev/null and b/tests/test_data/images/test_facefix.png differ diff --git a/tests/test_data/images/test_img2img_alpha.png b/tests/test_data/images/test_img2img_alpha.png new file mode 100644 index 00000000..a755408a Binary files /dev/null and b/tests/test_data/images/test_img2img_alpha.png differ diff --git a/tests/test_data/images/test_inpaint_alpha.png b/tests/test_data/images/test_inpaint_alpha.png new file mode 100644 index 00000000..a755408a Binary files /dev/null and b/tests/test_data/images/test_inpaint_alpha.png differ diff --git a/tests/test_data/images/test_inpaint_mask.png b/tests/test_data/images/test_inpaint_mask.png new file mode 100644 index 00000000..8b155ece Binary files /dev/null and b/tests/test_data/images/test_inpaint_mask.png differ diff --git a/tests/test_data/images/test_inpaint_original.png b/tests/test_data/images/test_inpaint_original.png new file mode 100644 index 00000000..4acaeb29 Binary files /dev/null and b/tests/test_data/images/test_inpaint_original.png differ diff --git a/tests/test_data/images/test_openpose_control_map.png b/tests/test_data/images/test_openpose_control_map.png new file mode 100644 index 00000000..9112d65d Binary files /dev/null and b/tests/test_data/images/test_openpose_control_map.png differ diff --git a/tests/test_data/images/test_outpaint.png b/tests/test_data/images/test_outpaint.png new file mode 100644 index 00000000..ecbf9ccd Binary files /dev/null and b/tests/test_data/images/test_outpaint.png differ diff --git a/tests/test_dynamically_check_apimodels.py b/tests/test_dynamically_check_apimodels.py index 4eeeef88..8586d979 100644 --- a/tests/test_dynamically_check_apimodels.py +++ b/tests/test_dynamically_check_apimodels.py @@ -4,6 +4,7 @@ import os from pathlib import Path from types import ModuleType +from typing import Any import pytest from loguru import logger @@ -14,6 +15,7 @@ from horde_sdk.generic_api._reflection import get_all_request_types from horde_sdk.generic_api.apimodels import HordeRequest, HordeResponse from horde_sdk.generic_api.decoration import is_unhashable +from horde_sdk.generic_api.metadata import GenericPathFields from horde_sdk.generic_api.utils.swagger import SwaggerDoc EXAMPLE_PAYLOADS: dict[ModuleType, Path] = { @@ -73,13 +75,18 @@ def test_get_all_request_types(self) -> None: ), f"Response type is not a subclass of `HordeResponse`: {request_type}" @staticmethod - def dynamic_json_load(module: ModuleType) -> None: + def dynamic_json_load( + module: ModuleType, + path_data_type: type[GenericPathFields], + path_data_defaults: dict[GenericPathFields, str], + ) -> None: """Attempts to create instances of all non-abstract children of `RequestBase`.""" # Get the name of the module being tested. module_name = module.__name__ # Get a list of all non-abstract request types in the module. all_request_types: list[type[HordeRequest]] = get_all_request_types(module_name) + all_request_exceptions: list[Exception] = [] # Loop through each request type and test it. for request_type in all_request_types: @@ -109,6 +116,47 @@ def dynamic_json_load(module: ModuleType) -> None: target_payload_file_path, ), f"Missing example payload file: {target_payload_file_path}" + sample_data_json: dict[str, Any] = {} + + with open(target_payload_file_path, encoding="utf-8") as sample_file_handle: + sample_data_json = json.loads(sample_file_handle.read()) + + for field_name, field_info in request_type.model_fields.items(): + if field_name in path_data_type.__members__: + parsed_path_data = path_data_type(field_info.alias or field_name) + if parsed_path_data in path_data_defaults: + sample_data_json[field_info.alias or field_name] = path_data_defaults[parsed_path_data] + + try: + parsed_request_model = request_type.model_validate(sample_data_json) + try: + if is_unhashable(parsed_request_model): + logger.debug(f"Unhashable model for {target_payload_file_path}") + else: + try: + hash(parsed_request_model) + except Exception as e: + print(f"Failed to hash {target_payload_file_path}") + print(f"Error: {e}") + all_request_exceptions.append( + Exception( + f"Failed to hash {request_type} {target_payload_file_path}: {e}", + ), + ) + + except NotImplementedError: + logger.debug(f"Hashing not implemented for {target_payload_file_path}") + except Exception as e: + print(f"Failed to hash {target_payload_file_path}") + print(f"Error: {e}") + all_request_exceptions.append(e) + except Exception as e: + print(f"Failed to validate {target_payload_file_path}") + print(f"Request type: {request_type}") + print(f"Error: {e}") + print(f"Sample data: {sample_data_json}") + all_request_exceptions.append(e) + # Loop through each success status code and test the corresponding success response type. success_status_codes = request_type.get_success_status_response_pairs() for success_status_code, success_response_type in success_status_codes.items(): @@ -141,6 +189,8 @@ def dynamic_json_load(module: ModuleType) -> None: raise e except Exception as e: print(f"Failed to validate {example_response_file_path}") + print(f"Request type: {request_type}") + print(f"Response type: {success_response_type}") print(f"Error: {e}") print(f"Sample data: {sample_data_json}") raise e @@ -166,6 +216,8 @@ def dynamic_json_load(module: ModuleType) -> None: raise e except Exception as e: print(f"Failed to validate {production_response_file_path}") + print(f"Request type: {request_type}") + print(f"Response type: {success_response_type}") print(f"Error: {e}") print(f"Sample data: {sample_data_json}") raise e @@ -188,13 +240,24 @@ def dynamic_json_load(module: ModuleType) -> None: logger.debug(f"Hashing not implemented for {example_response_file_path}") except Exception as e: print(f"Failed to hash {example_response_file_path}") + print(f"Request type: {request_type}") + print(f"Response type: {success_response_type}") print(f"Error: {e}") raise e + if all_request_exceptions: + print("*" * 80) + for exception in all_request_exceptions: + print(f"Error: {exception}") + print("-" * 80) + raise Exception("Failed to validate all request types") + @pytest.mark.object_verify def test_horde_api(self) -> None: """Test all models in the `horde_sdk.ai_horde_api.apimodels` module can be instantiated from example JSON.""" - self.dynamic_json_load(horde_sdk.ai_horde_api.apimodels) + from horde_sdk.ai_horde_api.metadata import AIHordePathData, _default_path_values + + self.dynamic_json_load(horde_sdk.ai_horde_api.apimodels, AIHordePathData, _default_path_values) # def test_ratings_api(self) -> None: # self.dynamic_json_load(horde_sdk.ratings_api.apimodels) diff --git a/tests/test_verify_api_surface.py b/tests/test_verify_api_surface.py index 1757145b..36eb6e3c 100644 --- a/tests/test_verify_api_surface.py +++ b/tests/test_verify_api_surface.py @@ -1,4 +1,5 @@ import pytest +from loguru import logger @pytest.mark.object_verify @@ -53,9 +54,9 @@ def test_all_ai_horde_api_models_defined() -> None: if key in undefined_classes: undefined_classes.remove(key) - assert ( - "GenerationInputStable" not in undefined_classes - ), "A model which is known to be defined in the SDK was not found. Something critically bad has happened." + assert "GenerationInputStable" not in undefined_classes, ( + "A model which is known to be defined in the SDK was not found. Something critically bad has happened." + ) # Pretty print the undefined classes sorted by dict values, NOT by keys import json @@ -73,8 +74,38 @@ def test_all_ai_horde_api_models_defined() -> None: undefined_classes_sorted = sorted(undefined_classes) print(json.dumps(undefined_classes_sorted, indent=4)) + skipped_classes = [ # FIXME + "ModelPayloadStable", + "WorkerDetailsLite", + "ModelPayloadStyleKobold", + "ModelStyleInputParamsStableNoDefaults", + "SinglePeriodTxtModelStats", + "ModelPayloadRootKobold", + "ModelGenerationInputKobold", + "SinglePeriodImgModelStats", + "ModelSpecialPayloadStable", + "ModelPayloadKobold", + "InterrogationFormResult", + "GenerationMetadataKobold", + "InterrogationFormStatus", + "SubmitInput", + "UserActiveGenerations", + "ModelInterrogationFormStable", + "ModelPayloadStyleStable", + "ResponseModelMessagePop", + ] + """Many of these classes are abstractions or mislabeled and their absence in the SDK is an implementation detail. + While they may later be added, the intent of this test is to detect *new* unsupported classes. + """ + + logger.warning(f"Skipped classes: {skipped_classes}") # TODO + + for skipped_class in skipped_classes: + if skipped_class in undefined_classes: + undefined_classes.remove(skipped_class) + assert not undefined_classes, ( - "The following models are defined in the API but not in the SDK: " f"{undefined_classes}" + f"The following models are defined in the API but not in the SDK: {undefined_classes}" ) @@ -85,9 +116,9 @@ def test_all_ai_horde_api_models_defined_for_endpoints() -> None: undefined_classes = all_undefined_classes_for_endpoints(horde_sdk.ai_horde_api.apimodels) - assert ( - "GenerationInputStable" not in undefined_classes - ), "A model which is known to be defined in the SDK was not found. Something critically bad has happened." + assert "GenerationInputStable" not in undefined_classes, ( + "A model which is known to be defined in the SDK was not found. Something critically bad has happened." + ) # Pretty print the undefined classes sorted by dict values, NOT by keys import json @@ -106,7 +137,7 @@ def test_all_ai_horde_api_models_defined_for_endpoints() -> None: print(json.dumps(undefined_classes_sorted, indent=4)) assert not undefined_classes, ( - "The following models are defined in the API but not in the SDK: " f"{undefined_classes}" + f"The following models are defined in the API but not in the SDK: {undefined_classes}" ) @@ -117,7 +148,7 @@ def test_all_ai_horde_endpoints_known() -> None: unknown_endpoints = all_unknown_endpoints_ai_horde() assert not unknown_endpoints, ( - "The following endpoints are defined in the API but not in the SDK: " f"{unknown_endpoints}" + f"The following endpoints are defined in the API but not in the SDK: {unknown_endpoints}" ) @@ -138,7 +169,7 @@ def test_all_ai_horde_endpoints_addressed() -> None: print(f"Unaddressed path: {unaddressed_endpoint}.") assert not unaddressed_endpoints, ( - "The following endpoints are defined in the API but not in the SDK: " f"{unaddressed_endpoints}" + f"The following endpoints are defined in the API but not in the SDK: {unaddressed_endpoints}" ) @@ -171,4 +202,54 @@ def test_all_models_have_docstrings() -> None: stringified_missing_docstrings = {k.__name__: list(v) for k, v in missing_docstrings.items()} jsonified_missing_docstrings = json.dumps(stringified_missing_docstrings, indent=4) - assert not missing_docstrings, "The following models are missing docstrings: " f"{jsonified_missing_docstrings}" + assert not missing_docstrings, f"The following models are missing docstrings: {jsonified_missing_docstrings}" + + +@pytest.mark.object_verify +def test_all_models_non_conforming_docstrings() -> None: + import horde_sdk.meta + + non_conforming_docstrings = horde_sdk.meta.all_models_non_conforming_docstrings() + + import json + + stringified_non_conforming_docstrings = {k.__name__: v for k, v in non_conforming_docstrings.items()} + jsonified_non_conforming_docstrings = json.dumps(stringified_non_conforming_docstrings, indent=4) + + map_to_dump: dict[str, dict[str, str]] = {} + missing_original_docstrings: dict[str, dict[str, str]] = {} + + for model, docstrings in non_conforming_docstrings.items(): + original_docstring, new_docstring = docstrings + + if original_docstring is None: + missing_original_docstrings[model.__name__] = { + "new": new_docstring or "", + } + continue + + if original_docstring: + map_to_dump[model.__name__] = { + "original": original_docstring, + "new": new_docstring or "", + } + else: + map_to_dump[model.__name__] = { + "new": new_docstring or "", + } + + if len(map_to_dump) > 0: + with open("non_conforming_docstrings.json", "w", encoding="utf-8") as f: + json.dump(map_to_dump, f, indent=4) + f.write("\n") + + if len(missing_original_docstrings) > 0: + with open("missing_original_docstrings.json", "w", encoding="utf-8") as f: + json.dump(missing_original_docstrings, f, indent=4) + f.write("\n") + + assert not non_conforming_docstrings, ( + "The following models have non-conforming docstrings: " + f"{jsonified_non_conforming_docstrings}" + "\n\nSee `non_conforming_docstrings.json` and `missing_original_docstrings.json` for more details." + ) diff --git a/tests/worker/__init__.py b/tests/worker/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/tests/worker/conftest.py b/tests/worker/conftest.py new file mode 100644 index 00000000..25ca3450 --- /dev/null +++ b/tests/worker/conftest.py @@ -0,0 +1,14 @@ +import pytest + +from horde_sdk.worker.generations import ImageSingleGeneration +from horde_sdk.worker.jobs import ImageWorkerJob + + +@pytest.fixture(scope="function") +def simple_image_worker_job( + simple_image_generation: ImageSingleGeneration, +) -> ImageWorkerJob: + """Fixture providing a simple ImageWorkerJob for testing.""" + return ImageWorkerJob( + generation=simple_image_generation, + ) diff --git a/tests/worker/dispatch/__init__.py b/tests/worker/dispatch/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/tests/worker/dispatch/ai_horde/__init__.py b/tests/worker/dispatch/ai_horde/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/tests/worker/dispatch/ai_horde/test_alchemy_parameters.py b/tests/worker/dispatch/ai_horde/test_alchemy_parameters.py new file mode 100644 index 00000000..f527879e --- /dev/null +++ b/tests/worker/dispatch/ai_horde/test_alchemy_parameters.py @@ -0,0 +1,259 @@ +from horde_sdk.ai_horde_api.apimodels import ( + AlchemyJobPopResponse, + NoValidAlchemyFound, +) +from horde_sdk.consts import KNOWN_DISPATCH_SOURCE, KNOWN_NSFW_DETECTOR +from horde_sdk.generation_parameters.alchemy import ( + AlchemyParameters, + CaptionAlchemyParameters, + FacefixAlchemyParameters, + InterrogateAlchemyParameters, + NSFWAlchemyParameters, + SingleAlchemyParameters, + UpscaleAlchemyParameters, +) +from horde_sdk.generation_parameters.alchemy.consts import ( + KNOWN_ALCHEMY_FORMS, + KNOWN_CAPTION_MODELS, + KNOWN_FACEFIXERS, + KNOWN_INTERROGATORS, + KNOWN_MISC_POST_PROCESSORS, + KNOWN_UPSCALERS, +) +from horde_sdk.worker.consts import REQUESTED_SOURCE_IMAGE_FALLBACK_CHOICE +from horde_sdk.worker.dispatch.ai_horde.alchemy.convert import ( + convert_alchemy_job_pop_response_to_parameters, +) +from horde_sdk.worker.dispatch.ai_horde_parameters import AIHordeR2DispatchParameters + + +def assert_common_parameters( + generation_parameters: AlchemyParameters, + dispatch_parameters: AIHordeR2DispatchParameters, + api_response: AlchemyJobPopResponse, +) -> None: + """Assert that the common parameters are correct.""" + assert dispatch_parameters.dispatch_source == KNOWN_DISPATCH_SOURCE.AI_HORDE_API_OFFICIAL + assert dispatch_parameters.ttl is None + + assert isinstance(generation_parameters, AlchemyParameters) + assert isinstance(dispatch_parameters, AIHordeR2DispatchParameters) + + assert api_response.skipped == NoValidAlchemyFound() + assert api_response.forms is not None + + assert dispatch_parameters.source_image_fallback_choice == REQUESTED_SOURCE_IMAGE_FALLBACK_CHOICE.ABANDON + + for generation_id in api_response.ids: + assert any(str(generation_id) == x.result_id for x in generation_parameters.all_alchemy_operations) # FIXME + + for single_generation in generation_parameters.all_alchemy_operations: + assert isinstance(single_generation, SingleAlchemyParameters) + + if isinstance(single_generation, UpscaleAlchemyParameters): + assert single_generation.form == KNOWN_ALCHEMY_FORMS.post_process + assert ( + single_generation.upscaler in KNOWN_UPSCALERS.__members__ + or single_generation.upscaler in KNOWN_UPSCALERS.__members__.values() + ) + elif isinstance(single_generation, FacefixAlchemyParameters): + assert single_generation.form == KNOWN_ALCHEMY_FORMS.post_process + assert ( + single_generation.facefixer in KNOWN_FACEFIXERS.__members__ + or single_generation.facefixer in KNOWN_FACEFIXERS.__members__.values() + ) + elif isinstance(single_generation, InterrogateAlchemyParameters): + assert single_generation.form == KNOWN_ALCHEMY_FORMS.interrogation + assert ( + single_generation.interrogator in KNOWN_INTERROGATORS.__members__ + or single_generation.interrogator in KNOWN_INTERROGATORS.__members__.values() + ) + elif isinstance(single_generation, CaptionAlchemyParameters): + assert single_generation.form == KNOWN_ALCHEMY_FORMS.caption + assert ( + single_generation.caption_model in KNOWN_CAPTION_MODELS.__members__ + or single_generation.caption_model in KNOWN_CAPTION_MODELS.__members__.values() + ) + elif isinstance(single_generation, NSFWAlchemyParameters): + assert single_generation.form == KNOWN_ALCHEMY_FORMS.nsfw + assert ( + single_generation.nsfw_detector in KNOWN_NSFW_DETECTOR.__members__ + or single_generation.nsfw_detector in KNOWN_NSFW_DETECTOR.__members__.values() + ) + + +def test_convert_alchemy_job_pop_response_to_parameters_interrogate( + simple_alchemy_gen_job_pop_response_interrogate: AlchemyJobPopResponse, +) -> None: + """Confirm that API responses are correctly mapped to generation parameters.""" + assert simple_alchemy_gen_job_pop_response_interrogate.forms is not None + assert len(simple_alchemy_gen_job_pop_response_interrogate.forms) == 1 + + generation_parameters, dispatch_parameters = convert_alchemy_job_pop_response_to_parameters( + simple_alchemy_gen_job_pop_response_interrogate, + ) + + assert len(generation_parameters.all_alchemy_operations) == 1 + + assert_common_parameters( + generation_parameters, + dispatch_parameters, + simple_alchemy_gen_job_pop_response_interrogate, + ) + + assert isinstance(generation_parameters.all_alchemy_operations[0], InterrogateAlchemyParameters) + + +def test_convert_alchemy_job_pop_response_to_parameters_nsfw_detect( + simple_alchemy_gen_job_pop_response_nsfw_detect: AlchemyJobPopResponse, +) -> None: + """Confirm that API responses are correctly mapped to generation parameters.""" + assert simple_alchemy_gen_job_pop_response_nsfw_detect.forms is not None + assert len(simple_alchemy_gen_job_pop_response_nsfw_detect.forms) == 1 + + generation_parameters, dispatch_parameters = convert_alchemy_job_pop_response_to_parameters( + simple_alchemy_gen_job_pop_response_nsfw_detect, + ) + + assert len(generation_parameters.all_alchemy_operations) == 1 + + assert_common_parameters( + generation_parameters, + dispatch_parameters, + simple_alchemy_gen_job_pop_response_nsfw_detect, + ) + + assert isinstance(generation_parameters.all_alchemy_operations[0], NSFWAlchemyParameters) + + +def test_convert_alchemy_job_pop_response_to_parameters_all_feature_extractions( + simple_alchemy_gen_job_pop_response_all_feature_extractions: AlchemyJobPopResponse, +) -> None: + """Confirm that API responses are correctly mapped to generation parameters.""" + assert simple_alchemy_gen_job_pop_response_all_feature_extractions.forms is not None + + generation_parameters, dispatch_parameters = convert_alchemy_job_pop_response_to_parameters( + simple_alchemy_gen_job_pop_response_all_feature_extractions, + ) + + assert_common_parameters( + generation_parameters, + dispatch_parameters, + simple_alchemy_gen_job_pop_response_all_feature_extractions, + ) + + all_types = [type(x) for x in generation_parameters.all_alchemy_operations] + + assert InterrogateAlchemyParameters in all_types + assert CaptionAlchemyParameters in all_types + assert NSFWAlchemyParameters in all_types + + +def test_convert_alchemy_job_pop_response_to_parameters_upscale( + simple_alchemy_gen_job_pop_response_upscale: AlchemyJobPopResponse, +) -> None: + """Confirm that API responses are correctly mapped to generation parameters.""" + assert simple_alchemy_gen_job_pop_response_upscale.forms is not None + assert len(simple_alchemy_gen_job_pop_response_upscale.forms) == 1 + + generation_parameters, dispatch_parameters = convert_alchemy_job_pop_response_to_parameters( + simple_alchemy_gen_job_pop_response_upscale, + ) + + assert len(generation_parameters.all_alchemy_operations) == 1 + + assert_common_parameters( + generation_parameters, + dispatch_parameters, + simple_alchemy_gen_job_pop_response_upscale, + ) + + assert isinstance(generation_parameters.all_alchemy_operations[0], UpscaleAlchemyParameters) + + +def test_convert_alchemy_job_pop_response_to_parameters_upscale_multiple( + simple_alchemy_gen_job_pop_response_upscale_multiple: AlchemyJobPopResponse, +) -> None: + """Confirm that API responses are correctly mapped to generation parameters.""" + assert simple_alchemy_gen_job_pop_response_upscale_multiple.forms is not None + assert len(simple_alchemy_gen_job_pop_response_upscale_multiple.forms) == 2 + + generation_parameters, dispatch_parameters = convert_alchemy_job_pop_response_to_parameters( + simple_alchemy_gen_job_pop_response_upscale_multiple, + ) + + assert len(generation_parameters.all_alchemy_operations) == 2 + + assert_common_parameters( + generation_parameters, + dispatch_parameters, + simple_alchemy_gen_job_pop_response_upscale_multiple, + ) + + assert all(type(x) is UpscaleAlchemyParameters for x in generation_parameters.all_alchemy_operations) + + +def test_convert_alchemy_job_pop_response_to_parameters_facefix( + simple_alchemy_gen_job_pop_response_facefix: AlchemyJobPopResponse, +) -> None: + """Confirm that API responses are correctly mapped to generation parameters.""" + assert simple_alchemy_gen_job_pop_response_facefix.forms is not None + assert len(simple_alchemy_gen_job_pop_response_facefix.forms) == 1 + + generation_parameters, dispatch_parameters = convert_alchemy_job_pop_response_to_parameters( + simple_alchemy_gen_job_pop_response_facefix, + ) + + assert len(generation_parameters.all_alchemy_operations) == 1 + + assert_common_parameters( + generation_parameters, + dispatch_parameters, + simple_alchemy_gen_job_pop_response_facefix, + ) + + assert isinstance(generation_parameters.all_alchemy_operations[0], FacefixAlchemyParameters) + + +def test_convert_alchemy_job_pop_response_to_parameters_facefix_multiple( + simple_alchemy_gen_job_pop_response_facefix_multiple: AlchemyJobPopResponse, +) -> None: + """Confirm that API responses are correctly mapped to generation parameters.""" + assert simple_alchemy_gen_job_pop_response_facefix_multiple.forms is not None + assert len(simple_alchemy_gen_job_pop_response_facefix_multiple.forms) == 2 + + generation_parameters, dispatch_parameters = convert_alchemy_job_pop_response_to_parameters( + simple_alchemy_gen_job_pop_response_facefix_multiple, + ) + + assert len(generation_parameters.all_alchemy_operations) == 2 + + assert_common_parameters( + generation_parameters, + dispatch_parameters, + simple_alchemy_gen_job_pop_response_facefix_multiple, + ) + + assert all(type(x) is FacefixAlchemyParameters for x in generation_parameters.all_alchemy_operations) + + +def test_convert_alchemy_job_pop_response_to_parameters_strip_background( + simple_alchemy_gen_job_pop_response_strip_background: AlchemyJobPopResponse, +) -> None: + """Confirm that API responses are correctly mapped to generation parameters.""" + assert simple_alchemy_gen_job_pop_response_strip_background.forms is not None + assert len(simple_alchemy_gen_job_pop_response_strip_background.forms) == 1 + + generation_parameters, dispatch_parameters = convert_alchemy_job_pop_response_to_parameters( + simple_alchemy_gen_job_pop_response_strip_background, + ) + + assert len(generation_parameters.all_alchemy_operations) == 1 + + assert_common_parameters( + generation_parameters, + dispatch_parameters, + simple_alchemy_gen_job_pop_response_strip_background, + ) + + assert generation_parameters.all_alchemy_operations[0].form == KNOWN_MISC_POST_PROCESSORS.strip_background diff --git a/tests/worker/dispatch/ai_horde/test_image_parameters.py b/tests/worker/dispatch/ai_horde/test_image_parameters.py new file mode 100644 index 00000000..38bc6389 --- /dev/null +++ b/tests/worker/dispatch/ai_horde/test_image_parameters.py @@ -0,0 +1,455 @@ +from horde_model_reference.model_reference_manager import ModelReferenceManager + +from horde_sdk.ai_horde_api.apimodels import ( + ImageGenerateJobPopResponse, + ImageGenerateJobPopSkippedStatus, +) +from horde_sdk.generation_parameters.image import ( + BasicImageGenerationParameters, + ControlnetGenerationParameters, + HiresFixGenerationParameters, + Image2ImageGenerationParameters, + ImageGenerationParameters, + LoRaEntry, + RemixGenerationParameters, + TIEntry, +) +from horde_sdk.generation_parameters.image.consts import ( + KNOWN_IMAGE_SOURCE_PROCESSING, + LORA_TRIGGER_INJECT_CHOICE, + TI_TRIGGER_INJECT_CHOICE, +) +from horde_sdk.worker.consts import REQUESTED_SOURCE_IMAGE_FALLBACK_CHOICE +from horde_sdk.worker.dispatch.ai_horde.image.convert import convert_image_job_pop_response_to_parameters +from horde_sdk.worker.dispatch.ai_horde_parameters import AIHordeR2DispatchParameters + + +def assert_features_selected( + generation_parameters: ImageGenerationParameters, + *, + control_net: bool = False, + custom_workflow: bool = False, + hires_fix: bool = False, + img2img: bool = False, + remix: bool = False, + loras: bool = False, + tis: bool = False, +) -> None: + """Confirm that the selected features are present if intended, and not present if deselected.""" + additional_params = generation_parameters.additional_params + + if additional_params is None: + assert not (control_net or custom_workflow or hires_fix or img2img or remix or loras or tis) + return + + assert (additional_params.controlnet_params is not None) == control_net + assert bool(additional_params.custom_workflows_params) == custom_workflow + assert (additional_params.hires_fix_params is not None) == hires_fix + assert (additional_params.image2image_params is not None) == img2img + assert (additional_params.remix_params is not None) == remix + assert bool(additional_params.lora_params) == loras + assert bool(additional_params.ti_params) == tis + + +def assert_base_image_generation_parameters( + base_params: BasicImageGenerationParameters, + api_response: ImageGenerateJobPopResponse, +) -> None: + """Confirm all common image generation parameters are correctly mapped.""" + assert isinstance(base_params, BasicImageGenerationParameters) + assert base_params.model == api_response.model + assert base_params.prompt == api_response.payload.prompt + assert base_params.seed == api_response.payload.seed + + if not api_response.payload.hires_fix: + assert base_params.steps == api_response.payload.ddim_steps + + +def assert_image_generation_parameters( + generation_parameters: ImageGenerationParameters, + api_response: ImageGenerateJobPopResponse, + source_processing: str, +) -> None: + """Confirm all common image generation parameters are correctly mapped.""" + assert isinstance(generation_parameters, ImageGenerationParameters) + + assert isinstance(generation_parameters.base_params, BasicImageGenerationParameters) + assert_base_image_generation_parameters(generation_parameters.base_params, api_response) + + assert generation_parameters.batch_size == api_response.payload.n_iter + assert generation_parameters.source_processing == source_processing + + +def assert_image_dispatch_parameters( + dispatch_parameters: AIHordeR2DispatchParameters, + api_response: ImageGenerateJobPopResponse, +) -> None: + """Confirm all common image dispatch parameters are correctly mapped.""" + assert isinstance(dispatch_parameters, AIHordeR2DispatchParameters) + assert dispatch_parameters.generation_ids == api_response.ids + assert ( + dispatch_parameters.no_valid_request_found_reasons + == api_response.skipped + == ImageGenerateJobPopSkippedStatus() + ) + assert dispatch_parameters.source_image_fallback_choice == REQUESTED_SOURCE_IMAGE_FALLBACK_CHOICE.TXT2IMG_FALLBACK + + +def assert_common_image_parameters( + image_generation_parameters: ImageGenerationParameters, + dispatch_parameters: AIHordeR2DispatchParameters, + api_response: ImageGenerateJobPopResponse, +) -> None: + """Confirm all common image generation and dispatch parameters are correctly mapped.""" + source_processing = api_response.source_processing + assert_image_generation_parameters(image_generation_parameters, api_response, source_processing) + assert_image_dispatch_parameters(dispatch_parameters, api_response) + + +def test_convert_image_job_pop_response_to_parameters( + simple_image_gen_job_pop_response: ImageGenerateJobPopResponse, + model_reference_manager: ModelReferenceManager, +) -> None: + """Confirm that API responses are correctly mapped to generation parameters.""" + generation_parameters, dispatch_parameters = convert_image_job_pop_response_to_parameters( + api_response=simple_image_gen_job_pop_response, + model_reference_manager=model_reference_manager, + ) + assert_common_image_parameters( + generation_parameters, + dispatch_parameters, + simple_image_gen_job_pop_response, + ) + assert_features_selected(generation_parameters) + + +def test_convert_image_job_pop_response_to_parameters_img2img( + simple_image_gen_job_pop_response_img2img: ImageGenerateJobPopResponse, + model_reference_manager: ModelReferenceManager, +) -> None: + """Confirm that API responses are correctly mapped to generation parameters for img2img.""" + generation_parameters, dispatch_parameters = convert_image_job_pop_response_to_parameters( + api_response=simple_image_gen_job_pop_response_img2img, + model_reference_manager=model_reference_manager, + ) + assert_common_image_parameters( + generation_parameters, + dispatch_parameters, + simple_image_gen_job_pop_response_img2img, + ) + assert_features_selected(generation_parameters, img2img=True) + + additional_params = generation_parameters.additional_params + + assert isinstance(additional_params.image2image_params, Image2ImageGenerationParameters) + assert isinstance(additional_params.image2image_params.source_image, bytes) + assert additional_params.image2image_params.source_mask is None + + +def test_convert_image_job_pop_response_to_parameters_img2img_masked( + simple_image_gen_job_pop_response_img2img_masked: ImageGenerateJobPopResponse, + model_reference_manager: ModelReferenceManager, +) -> None: + """Confirm that API responses are correctly mapped to generation parameters for img2img inpainting.""" + generation_parameters, dispatch_parameters = convert_image_job_pop_response_to_parameters( + api_response=simple_image_gen_job_pop_response_img2img_masked, + model_reference_manager=model_reference_manager, + ) + assert_common_image_parameters( + generation_parameters, + dispatch_parameters, + simple_image_gen_job_pop_response_img2img_masked, + ) + + assert_features_selected(generation_parameters, img2img=True) + + additional_params = generation_parameters.additional_params + + assert isinstance(additional_params.image2image_params, Image2ImageGenerationParameters) + assert isinstance(additional_params.image2image_params.source_image, bytes) + assert isinstance(additional_params.image2image_params.source_mask, bytes) + + +def test_convert_image_job_pop_response_to_parameters_inpainting( + simple_image_gen_job_pop_response_inpainting: ImageGenerateJobPopResponse, + model_reference_manager: ModelReferenceManager, +) -> None: + """Confirm that API responses are correctly mapped to generation parameters for inpainting.""" + generation_parameters, dispatch_parameters = convert_image_job_pop_response_to_parameters( + api_response=simple_image_gen_job_pop_response_inpainting, + model_reference_manager=model_reference_manager, + ) + + assert_common_image_parameters( + generation_parameters, + dispatch_parameters, + simple_image_gen_job_pop_response_inpainting, + ) + + assert_features_selected(generation_parameters, img2img=True) + + additional_params = generation_parameters.additional_params + + assert isinstance(additional_params.image2image_params, Image2ImageGenerationParameters) + assert isinstance(additional_params.image2image_params.source_image, bytes) + assert isinstance(additional_params.image2image_params.source_mask, bytes) + + +def test_convert_image_job_pop_response_to_parameters_outpainting_alpha( + simple_image_gen_job_pop_response_outpainting_alpha: ImageGenerateJobPopResponse, + model_reference_manager: ModelReferenceManager, +) -> None: + """Confirm that API responses are correctly mapped to generation parameters for outpainting with alpha.""" + generation_parameters, dispatch_parameters = convert_image_job_pop_response_to_parameters( + api_response=simple_image_gen_job_pop_response_outpainting_alpha, + model_reference_manager=model_reference_manager, + ) + + assert_common_image_parameters( + generation_parameters, + dispatch_parameters, + simple_image_gen_job_pop_response_outpainting_alpha, + ) + + assert generation_parameters.source_processing == KNOWN_IMAGE_SOURCE_PROCESSING.inpainting + + assert_features_selected(generation_parameters, img2img=True) + + additional_params = generation_parameters.additional_params + + assert isinstance(additional_params.image2image_params, Image2ImageGenerationParameters) + assert isinstance(additional_params.image2image_params.source_image, bytes) + + +def test_convert_image_job_pop_response_to_parameters_controlnet_openpose( + simple_image_gen_job_pop_response_controlnet_openpose: ImageGenerateJobPopResponse, + model_reference_manager: ModelReferenceManager, +) -> None: + """Confirm that API responses are correctly mapped to generation parameters for controlnet with openpose.""" + generation_parameters, dispatch_parameters = convert_image_job_pop_response_to_parameters( + api_response=simple_image_gen_job_pop_response_controlnet_openpose, + model_reference_manager=model_reference_manager, + ) + + assert_common_image_parameters( + generation_parameters, + dispatch_parameters, + simple_image_gen_job_pop_response_controlnet_openpose, + ) + + assert_features_selected(generation_parameters, control_net=True) + + additional_params = generation_parameters.additional_params + + assert isinstance(additional_params.controlnet_params, ControlnetGenerationParameters) + assert additional_params.controlnet_params.control_map is not None + assert isinstance(additional_params.controlnet_params.control_map, bytes) + assert additional_params.controlnet_params.source_image is None + + +def test_convert_image_job_pop_response_to_parameters_hires_fix( + simple_image_gen_job_pop_response_hires_fix: ImageGenerateJobPopResponse, + model_reference_manager: ModelReferenceManager, +) -> None: + """Confirm that API responses are correctly mapped to generation parameters for hires fix.""" + generation_parameters, dispatch_parameters = convert_image_job_pop_response_to_parameters( + api_response=simple_image_gen_job_pop_response_hires_fix, + model_reference_manager=model_reference_manager, + ) + + assert_common_image_parameters( + generation_parameters, + dispatch_parameters, + simple_image_gen_job_pop_response_hires_fix, + ) + + assert_features_selected(generation_parameters, hires_fix=True) + + additional_params = generation_parameters.additional_params + + assert isinstance(additional_params.hires_fix_params, HiresFixGenerationParameters) + + assert additional_params.hires_fix_params.first_pass is not None + assert_base_image_generation_parameters( + base_params=additional_params.hires_fix_params.first_pass, + api_response=simple_image_gen_job_pop_response_hires_fix, + ) + + assert additional_params.hires_fix_params.second_pass is not None + assert_base_image_generation_parameters( + base_params=additional_params.hires_fix_params.second_pass, + api_response=simple_image_gen_job_pop_response_hires_fix, + ) + + +def test_convert_image_job_pop_response_to_parameters_hires_fix_denoise( + simple_image_gen_job_pop_response_hires_fix_denoise: ImageGenerateJobPopResponse, + model_reference_manager: ModelReferenceManager, +) -> None: + """Confirm that API responses are correctly mapped to generation parameters for hires fix with denoise.""" + generation_parameters, dispatch_parameters = convert_image_job_pop_response_to_parameters( + api_response=simple_image_gen_job_pop_response_hires_fix_denoise, + model_reference_manager=model_reference_manager, + ) + + assert_common_image_parameters( + generation_parameters, + dispatch_parameters, + simple_image_gen_job_pop_response_hires_fix_denoise, + ) + + assert_features_selected(generation_parameters, hires_fix=True) + + additional_params = generation_parameters.additional_params + + assert isinstance(additional_params.hires_fix_params, HiresFixGenerationParameters) + + assert additional_params.hires_fix_params.first_pass is not None + assert_base_image_generation_parameters( + base_params=additional_params.hires_fix_params.first_pass, + api_response=simple_image_gen_job_pop_response_hires_fix_denoise, + ) + + assert additional_params.hires_fix_params.second_pass is not None + assert_base_image_generation_parameters( + base_params=additional_params.hires_fix_params.second_pass, + api_response=simple_image_gen_job_pop_response_hires_fix_denoise, + ) + assert ( + additional_params.hires_fix_params.second_pass.denoising_strength + == simple_image_gen_job_pop_response_hires_fix_denoise.payload.hires_fix_denoising_strength + ) + + +def assert_lora_parameters( + generation_parameters: ImageGenerationParameters, + api_response: ImageGenerateJobPopResponse, +) -> None: + """Confirm that LoRa parameters are correctly mapped.""" + additional_params = generation_parameters.additional_params + + assert isinstance(additional_params.lora_params, list) + assert len(additional_params.lora_params) > 0 + + assert api_response.payload.loras is not None + + assert len(additional_params.lora_params) == len(api_response.payload.loras) + + for i in range(len(additional_params.lora_params)): + assert isinstance(additional_params.lora_params[i], LoRaEntry) + assert additional_params.lora_params[i].lora_inject_trigger_choice == LORA_TRIGGER_INJECT_CHOICE.NO_INJECT + + if api_response.payload.loras[i].is_version: + assert additional_params.lora_params[i].remote_version_id is not None + assert additional_params.lora_params[i].remote_version_id == api_response.payload.loras[i].name + assert additional_params.lora_params[i].name is None + assert additional_params.lora_params[i].model_strength + else: + assert additional_params.lora_params[i].name is not None + assert additional_params.lora_params[i].name == api_response.payload.loras[i].name + assert additional_params.lora_params[i].remote_version_id is None + + +def test_convert_image_job_pop_response_to_parameters_loras( + simple_image_gen_job_pop_response_loras: ImageGenerateJobPopResponse, + model_reference_manager: ModelReferenceManager, +) -> None: + """Confirm that API responses are correctly mapped to generation parameters for LoRAS.""" + generation_parameters, dispatch_parameters = convert_image_job_pop_response_to_parameters( + api_response=simple_image_gen_job_pop_response_loras, + model_reference_manager=model_reference_manager, + ) + + assert_common_image_parameters( + generation_parameters, + dispatch_parameters, + simple_image_gen_job_pop_response_loras, + ) + + assert_features_selected(generation_parameters, loras=True) + + additional_params = generation_parameters.additional_params + + assert isinstance(additional_params.lora_params, list) + assert len(additional_params.lora_params) == 1 + + assert simple_image_gen_job_pop_response_loras.payload.loras is not None + + assert_lora_parameters(generation_parameters, simple_image_gen_job_pop_response_loras) + + +def assert_ti_parameters( + generation_parameters: ImageGenerationParameters, + api_response: ImageGenerateJobPopResponse, +) -> None: + """Confirm that TI parameters are correctly mapped.""" + additional_params = generation_parameters.additional_params + + assert isinstance(additional_params.ti_params, list) + assert len(additional_params.ti_params) > 0 + + assert api_response.payload.tis is not None + + assert len(additional_params.ti_params) == len(api_response.payload.tis) + + for i in range(len(additional_params.ti_params)): + assert isinstance(additional_params.ti_params[i], TIEntry) + + assert additional_params.ti_params[i].name == api_response.payload.tis[i].name + + +def test_convert_image_job_pop_response_to_parameters_tis( + simple_image_gen_job_pop_response_tis: ImageGenerateJobPopResponse, + model_reference_manager: ModelReferenceManager, +) -> None: + """Confirm that API responses are correctly mapped to generation parameters for TIs.""" + generation_parameters, dispatch_parameters = convert_image_job_pop_response_to_parameters( + api_response=simple_image_gen_job_pop_response_tis, + model_reference_manager=model_reference_manager, + ) + + assert_common_image_parameters( + generation_parameters, + dispatch_parameters, + simple_image_gen_job_pop_response_tis, + ) + + assert_features_selected(generation_parameters, tis=True) + + additional_params = generation_parameters.additional_params + + assert isinstance(additional_params.ti_params, list) + assert len(additional_params.ti_params) == 1 + + assert simple_image_gen_job_pop_response_tis.payload.tis is not None + + assert isinstance(additional_params.ti_params[0], TIEntry) + + for i in range(len(additional_params.ti_params)): + assert isinstance(additional_params.ti_params[i], TIEntry) + + assert additional_params.ti_params[i].ti_inject_trigger_choice == TI_TRIGGER_INJECT_CHOICE.NEGATIVE_PROMPT + assert additional_params.ti_params[i].name == simple_image_gen_job_pop_response_tis.payload.tis[i].name + + +def test_convert_image_job_pop_response_to_parameters_remix( + simple_image_gen_job_pop_response_remix: ImageGenerateJobPopResponse, + model_reference_manager: ModelReferenceManager, +) -> None: + """Confirm that API responses are correctly mapped to generation parameters for remix.""" + generation_parameters, dispatch_parameters = convert_image_job_pop_response_to_parameters( + api_response=simple_image_gen_job_pop_response_remix, + model_reference_manager=model_reference_manager, + ) + + assert_common_image_parameters( + generation_parameters, + dispatch_parameters, + simple_image_gen_job_pop_response_remix, + ) + + assert_features_selected(generation_parameters, remix=True) + + additional_params = generation_parameters.additional_params + + assert isinstance(additional_params.remix_params, RemixGenerationParameters) diff --git a/tests/worker/dispatch/ai_horde/test_text_parameters.py b/tests/worker/dispatch/ai_horde/test_text_parameters.py new file mode 100644 index 00000000..83ab5c49 --- /dev/null +++ b/tests/worker/dispatch/ai_horde/test_text_parameters.py @@ -0,0 +1,76 @@ +from horde_sdk.ai_horde_api.apimodels.generate.text.pop import TextGenerateJobPopResponse +from horde_sdk.consts import KNOWN_DISPATCH_SOURCE, KNOWN_INFERENCE_BACKEND +from horde_sdk.worker.consts import ( + REQUESTED_BACKEND_CONSTRAINTS, +) +from horde_sdk.worker.dispatch.ai_horde.text.convert import ( + convert_text_job_pop_response_to_parameters, +) + + +def test_convert_text_job_pop_response_to_parameters( + simple_text_gen_job_pop_response: TextGenerateJobPopResponse, +) -> None: + """Confirm that API responses are correctly mapped to generation parameters.""" + generation_parameters, dispatch_parameters = convert_text_job_pop_response_to_parameters( + simple_text_gen_job_pop_response, + ) + + # Dispatch parameters. + assert dispatch_parameters.generation_ids == simple_text_gen_job_pop_response.ids + assert dispatch_parameters.dispatch_source == KNOWN_DISPATCH_SOURCE.AI_HORDE_API_OFFICIAL + assert dispatch_parameters.ttl == simple_text_gen_job_pop_response.ttl + assert dispatch_parameters.inference_backend == KNOWN_INFERENCE_BACKEND.IN_MODEL_NAME + assert dispatch_parameters.requested_backend_constraints == REQUESTED_BACKEND_CONSTRAINTS.ANY + assert dispatch_parameters.no_valid_request_found_reasons == simple_text_gen_job_pop_response.skipped + + # Base generation parameters. + assert generation_parameters.result_ids == simple_text_gen_job_pop_response.ids + + assert generation_parameters.base_params.model == simple_text_gen_job_pop_response.model + assert generation_parameters.base_params.prompt == simple_text_gen_job_pop_response.payload.prompt + assert generation_parameters.base_params.soft_prompt == simple_text_gen_job_pop_response.softprompt + assert ( + generation_parameters.base_params.max_context_length + == simple_text_gen_job_pop_response.payload.max_context_length + ) + assert generation_parameters.base_params.max_length == simple_text_gen_job_pop_response.payload.max_length + assert generation_parameters.base_params.stop_sequence == simple_text_gen_job_pop_response.payload.stop_sequence + assert generation_parameters.base_params.temperature == simple_text_gen_job_pop_response.payload.temperature + assert ( + generation_parameters.base_params.dynamic_temp_exponent + == simple_text_gen_job_pop_response.payload.dynatemp_exponent + ) + assert ( + generation_parameters.base_params.dynamic_temp_range == simple_text_gen_job_pop_response.payload.dynatemp_range + ) + assert generation_parameters.base_params.tfs == simple_text_gen_job_pop_response.payload.tfs + assert generation_parameters.base_params.typical == simple_text_gen_job_pop_response.payload.typical + assert generation_parameters.base_params.sampler_order == simple_text_gen_job_pop_response.payload.sampler_order + assert ( + generation_parameters.base_params.smoothing_factor == simple_text_gen_job_pop_response.payload.smoothing_factor + ) + assert generation_parameters.base_params.top_a == simple_text_gen_job_pop_response.payload.top_a + assert generation_parameters.base_params.top_k == simple_text_gen_job_pop_response.payload.top_k + assert generation_parameters.base_params.top_p == simple_text_gen_job_pop_response.payload.top_p + assert generation_parameters.base_params.min_p == simple_text_gen_job_pop_response.payload.min_p + assert generation_parameters.base_params.rep_pen == simple_text_gen_job_pop_response.payload.rep_pen + assert generation_parameters.base_params.rep_pen_range == simple_text_gen_job_pop_response.payload.rep_pen_range + assert generation_parameters.base_params.rep_pen_slope == simple_text_gen_job_pop_response.payload.rep_pen_slope + + # Format parameters. + format_params = generation_parameters.format_params + + assert format_params is not None + + assert format_params.frmtadsnsp == simple_text_gen_job_pop_response.payload.frmtadsnsp + assert format_params.frmtrmblln == simple_text_gen_job_pop_response.payload.frmtrmblln + assert format_params.frmtrmspch == simple_text_gen_job_pop_response.payload.frmtrmspch + assert format_params.frmttriminc == simple_text_gen_job_pop_response.payload.frmttriminc + assert format_params.singleline == simple_text_gen_job_pop_response.payload.singleline + + # KoboldAI-specific parameters. + assert ( + generation_parameters.use_default_bad_words_ids + == simple_text_gen_job_pop_response.payload.use_default_badwordsids + ) diff --git a/tests/worker/test_consts.py b/tests/worker/test_consts.py new file mode 100644 index 00000000..b4cb4022 --- /dev/null +++ b/tests/worker/test_consts.py @@ -0,0 +1,8 @@ +from horde_sdk.worker.consts import ( + GENERATION_PROGRESS, + initial_generation_state, +) + + +def test_initial_generation_state() -> None: + assert initial_generation_state == GENERATION_PROGRESS.NOT_STARTED, "Initial state should be NOT_STARTED" diff --git a/tests/worker/test_horde_single_generations.py b/tests/worker/test_horde_single_generations.py new file mode 100644 index 00000000..0ccc54ed --- /dev/null +++ b/tests/worker/test_horde_single_generations.py @@ -0,0 +1,2274 @@ +from collections.abc import Callable +from typing import Any +from uuid import UUID + +import pytest +import yaml +from loguru import logger + +from horde_sdk.generation_parameters.alchemy import ( + AlchemyParameters, + SingleAlchemyParameters, +) +from horde_sdk.generation_parameters.alchemy.consts import KNOWN_ALCHEMY_FORMS +from horde_sdk.generation_parameters.alchemy.object_models import ( + UpscaleAlchemyParametersTemplate, +) +from horde_sdk.generation_parameters.image import ImageGenerationParameters +from horde_sdk.generation_parameters.image.object_models import ( + BasicImageGenerationParametersTemplate, + ImageGenerationParametersTemplate, +) +from horde_sdk.generation_parameters.text import TextGenerationParameters +from horde_sdk.generation_parameters.text.object_models import ( + BasicTextGenerationParametersTemplate, + TextGenerationParametersTemplate, +) +from horde_sdk.generation_parameters.utils import ResultIdAllocator +from horde_sdk.safety import ( + ImageSafetyResult, + SafetyResult, + SafetyRules, + default_image_safety_rules, +) +from horde_sdk.worker.consts import ( + GENERATION_PROGRESS, + base_generate_progress_transitions, + black_box_generate_progress_transitions, +) +from horde_sdk.worker.generations import ( + AlchemySingleGeneration, + ImageSingleGeneration, + TextSingleGeneration, +) +from horde_sdk.worker.generations_base import HordeSingleGeneration + + +def test_write_progress_transitions() -> None: + """Write the progress transitions to the docs folder.""" + + # Convert enum dictionaries to string dictionaries before serializing to YAML + def convert_enum_dict_to_string_dict( + enum_dict: dict[GENERATION_PROGRESS, list[GENERATION_PROGRESS]], + ) -> dict[str, list[str]]: + string_dict = {} + for key, values in enum_dict.items(): + string_key = key.name # Get the name of the enum + string_values = [value.name for value in values] # Convert enum values to strings + string_dict[string_key] = string_values + return string_dict + + transitions_to_write = [ + (convert_enum_dict_to_string_dict(base_generate_progress_transitions), "base_transitions.yaml"), + (convert_enum_dict_to_string_dict(black_box_generate_progress_transitions), "black_box_transitions.yaml"), + ] + + output_folder = "docs/worker" + + for transitions in transitions_to_write: + transitions_dict, filename = transitions + output_path = f"{output_folder}/{filename}" + + with open(output_path, "w") as file: + yaml.dump(transitions_dict, file, default_flow_style=False, sort_keys=False) + + logger.info(f"Wrote progress transitions to {output_path}") + + +class GenerationPermutation: + """A permutation of possible generation configurations. + + For example, text generation may not require post-processing or safety checks, while image generation may require + both. For testing, we can create permutations of these configurations to ensure that the generation process works + as expected across all possible configurations. + + """ + + def __init__( + self, + *, + include_preloading: bool, + include_generation: bool, + include_post_processing: bool, + include_safety_check: bool, + include_submit: bool = False, + underlying_payload: ImageGenerationParameters | TextGenerationParameters | SingleAlchemyParameters, + ) -> None: + """Initialize the permutation. + + Args: + include_preloading (bool): Whether to include preloading in the generation process. + include_generation (bool): Whether to include generation in the generation process. + include_post_processing (bool): Whether to include post-processing in the generation process. + include_safety_check (bool): Whether to include a safety check in the generation process. + include_submit (bool): Whether to include submission in the generation process. + underlying_payload (ImageGenerationParameters | TextGenerationParameters | SingleAlchemyParameters): The + underlying payload for the generation process. + """ + self.include_preloading = include_preloading + self.include_generation = include_generation + self.include_post_processing = include_post_processing + self.include_safety_check = include_safety_check + self.include_submit = include_submit + self.underlying_payload = underlying_payload + + +def create_permutations( + payload: ImageGenerationParameters | TextGenerationParameters | SingleAlchemyParameters, + *, + required_preloading: bool = False, + required_post_processing: bool = False, + required_generation: bool = False, + required_safety_check: bool = False, + required_submit: bool = False, +) -> list[GenerationPermutation]: + permutations = [] + + for include_generation in [True] if required_generation else [False, True]: + for include_post_processing in [True] if required_post_processing else [False, True]: + if not (include_generation or include_post_processing): + continue + + for include_preloading in [True] if required_preloading else [False, True]: + for include_safety_check in [True] if required_safety_check else [False, True]: + if not (include_safety_check or include_generation or include_post_processing): + continue + + for include_submit in [True] if required_submit else [False, True]: + permutations.append( + GenerationPermutation( + include_preloading=include_preloading, + include_generation=include_generation, + include_post_processing=include_post_processing, + include_safety_check=include_safety_check, + include_submit=include_submit, + underlying_payload=payload, + ), + ) + + return permutations + + +@pytest.fixture(scope="function") +def text_permutations( + simple_text_generation_parameters: TextGenerationParameters, +) -> list[GenerationPermutation]: + """Return the supported configurations for a `TextSingleGeneration` object.""" + return create_permutations( + simple_text_generation_parameters, + required_generation=True, + ) + + +@pytest.fixture(scope="function") +def image_permutations( + simple_image_generation_parameters: ImageGenerationParameters, + simple_image_generation_parameters_post_processing: ImageGenerationParameters, +) -> list[GenerationPermutation]: + """Return the supported configurations for a `ImageSingleGeneration` object.""" + return create_permutations( + simple_image_generation_parameters, + required_generation=True, + required_safety_check=True, + ) + create_permutations( + simple_image_generation_parameters_post_processing, + required_generation=True, + required_post_processing=True, + required_safety_check=True, + ) + + +@pytest.fixture(scope="function") +def alchemy_permutations( + simple_alchemy_generation_parameters: AlchemyParameters, + simple_alchemy_generation_parameters_nsfw_detect: AlchemyParameters, +) -> list[GenerationPermutation]: + """Return the supported configurations for a `AlchemySingleGeneration` object.""" + assert len(simple_alchemy_generation_parameters.all_alchemy_operations) == 1 + assert len(simple_alchemy_generation_parameters_nsfw_detect.all_alchemy_operations) == 1 + return create_permutations( + simple_alchemy_generation_parameters.all_alchemy_operations[0], + ) + create_permutations( + simple_alchemy_generation_parameters_nsfw_detect.all_alchemy_operations[0], + ) + + +def test_image_single_generation_from_template_applies_updates() -> None: + template = ImageGenerationParametersTemplate( + base_params=BasicImageGenerationParametersTemplate(prompt="placeholder"), + ) + generation = ImageSingleGeneration.from_template( + template, + base_param_updates=BasicImageGenerationParametersTemplate(model="test-model", prompt="updated"), + result_ids=("result-1",), + ) + + assert generation.generation_parameters.base_params.prompt == "updated" + assert generation.generation_parameters.result_ids == ["result-1"] + + +def test_image_single_generation_allocator_is_deterministic() -> None: + template = ImageGenerationParametersTemplate( + base_params=BasicImageGenerationParametersTemplate(prompt="allocator prompt", model="image-model"), + ) + template.batch_size = 2 + + allocator = ResultIdAllocator() + first = ImageSingleGeneration.from_template(template, allocator=allocator, seed="image-seed") + second = ImageSingleGeneration.from_template(template, allocator=allocator, seed="image-seed") + + assert first.generation_parameters.result_ids == second.generation_parameters.result_ids + + variant_template = ImageGenerationParametersTemplate( + base_params=BasicImageGenerationParametersTemplate(prompt="allocator prompt variant", model="image-model"), + ) + variant_template.batch_size = 2 + third = ImageSingleGeneration.from_template(variant_template, allocator=allocator, seed="image-seed") + + assert first.generation_parameters.result_ids != third.generation_parameters.result_ids + + +def test_text_single_generation_from_template_allocates_result_id() -> None: + template = TextGenerationParametersTemplate( + base_params=BasicTextGenerationParametersTemplate( + prompt="base", + model="test-model", + ), + ) + + generation = TextSingleGeneration.from_template( + template, + base_param_updates=BasicTextGenerationParametersTemplate(prompt="final"), + ) + + assert generation.generation_parameters.base_params.prompt == "final" + assert generation.generation_parameters.result_ids is not None + assert len(generation.generation_parameters.result_ids) == 1 + + +def test_text_single_generation_allocator_is_deterministic() -> None: + template = TextGenerationParametersTemplate( + base_params=BasicTextGenerationParametersTemplate( + prompt="allocator", + model="allocator-model", + ), + ) + + allocator = ResultIdAllocator() + first = TextSingleGeneration.from_template(template, allocator=allocator, seed="text-seed") + second = TextSingleGeneration.from_template(template, allocator=allocator, seed="text-seed") + + assert first.generation_parameters.result_ids == second.generation_parameters.result_ids + + modified_template = TextGenerationParametersTemplate( + base_params=BasicTextGenerationParametersTemplate( + prompt="allocator-2", + model="allocator-model", + ), + ) + third = TextSingleGeneration.from_template(modified_template, allocator=allocator, seed="text-seed") + + assert first.generation_parameters.result_ids != third.generation_parameters.result_ids + + +def test_alchemy_single_generation_from_template_sets_source_image() -> None: + template = UpscaleAlchemyParametersTemplate() + generation = AlchemySingleGeneration.from_template( + template, + source_image=b"image-bytes", + default_form=KNOWN_ALCHEMY_FORMS.post_process, + ) + + assert generation.generation_parameters.source_image == b"image-bytes" + assert generation.generation_parameters.form == KNOWN_ALCHEMY_FORMS.post_process + assert generation.generation_parameters.result_id is not None + + +class TestHordeSingleGeneration: + """Test the `HordeSingleGeneration` class.""" + + _shared_image: bytes + + @pytest.fixture(autouse=True) + def setup(self, default_testing_image_bytes: bytes) -> None: + self._shared_image = default_testing_image_bytes + + def test_image_generation_results_preserve_dispatch_identifiers( + self, + simple_image_generation_parameters: ImageGenerationParameters, + ) -> None: + dispatch_ids = ["dispatch-image-1"] + generation = ImageSingleGeneration( + generation_parameters=simple_image_generation_parameters, + dispatch_result_ids=dispatch_ids, + ) + + generation.on_generating() + generation.on_generation_work_complete(result=self._shared_image) + + results_snapshot = generation.generation_results + assert list(results_snapshot.keys()) == generation.result_ids + + result_id, payload = results_snapshot.popitem() + assert payload == self._shared_image + assert result_id == generation.result_ids[-1] + + assert generation.dispatch_result_ids == dispatch_ids + mutated_ids = generation.dispatch_result_ids + if mutated_ids is not None: + mutated_ids.append("mutated") + assert generation.dispatch_result_ids == dispatch_ids + assert len(generation.generation_results) == 1 + + @pytest.fixture(scope="function") + def id_and_image_generation( + self, + single_id_str: str, + simple_image_generation_parameters: ImageGenerationParameters, + ) -> tuple[str, ImageSingleGeneration]: + generation = ImageSingleGeneration( + generation_id=single_id_str, + generation_parameters=simple_image_generation_parameters, + ) + return single_id_str, generation + + def test_text_generation_results_preserve_dispatch_identifiers( + self, + simple_text_generation_parameters: TextGenerationParameters, + ) -> None: + dispatch_ids = ["dispatch-text-1"] + generation = TextSingleGeneration( + generation_parameters=simple_text_generation_parameters, + dispatch_result_ids=dispatch_ids, + ) + + generation.on_generating() + generation.on_generation_work_complete(result="generated text") + + results_snapshot = generation.generation_results + assert list(results_snapshot.keys()) == generation.result_ids + + result_id, payload = results_snapshot.popitem() + assert payload == "generated text" + assert result_id == generation.result_ids[-1] + + assert generation.dispatch_result_ids == dispatch_ids + mutated_ids = generation.dispatch_result_ids + if mutated_ids is not None: + mutated_ids.append("mutated") + assert generation.dispatch_result_ids == dispatch_ids + assert len(generation.generation_results) == 1 + + @pytest.fixture(scope="function") + def id_and_text_generation( + self, + single_id_str: str, + simple_text_generation_parameters: TextGenerationParameters, + ) -> tuple[str, TextSingleGeneration]: + generation = TextSingleGeneration( + generation_id=single_id_str, + generation_parameters=simple_text_generation_parameters, + ) + return single_id_str, generation + + @pytest.fixture(scope="function") + def id_and_alchemy_generation( + self, + single_id_str: str, + simple_alchemy_generation_parameters: AlchemyParameters, + ) -> tuple[str, AlchemySingleGeneration]: + assert len(simple_alchemy_generation_parameters.all_alchemy_operations) == 1 + generation = AlchemySingleGeneration( + generation_id=single_id_str, + generation_parameters=simple_alchemy_generation_parameters.all_alchemy_operations[0], + ) + return single_id_str, generation + + def test_alchemy_generation_results_preserve_dispatch_identifiers( + self, + simple_alchemy_generation_parameters: AlchemyParameters, + ) -> None: + assert len(simple_alchemy_generation_parameters.all_alchemy_operations) == 1 + operation_parameters = simple_alchemy_generation_parameters.all_alchemy_operations[0] + + dispatch_ids = ["dispatch-alchemy-1"] + generation = AlchemySingleGeneration( + generation_parameters=operation_parameters, + dispatch_result_ids=dispatch_ids, + ) + + generation.on_post_processing() + generation.set_work_result(self._shared_image) + generation.on_post_processing_complete() + + results_snapshot = generation.generation_results + assert list(results_snapshot.keys()) == generation.result_ids + + result_id, payload = results_snapshot.popitem() + assert payload == self._shared_image + assert result_id == generation.result_ids[-1] + + assert generation.dispatch_result_ids == dispatch_ids + mutated_ids = generation.dispatch_result_ids + if mutated_ids is not None: + mutated_ids.append("mutated") + assert generation.dispatch_result_ids == dispatch_ids + assert len(generation.generation_results) == 1 + + def test_none_generation_init( + self, + ) -> None: + """Test that an exception is raised when a generation is initialized with a `None` ID.""" + + with pytest.raises(TypeError): + ImageSingleGeneration(generation_id=None) # type: ignore + + def test_black_box_mode_no_submit( + self, + simple_image_generation_parameters: ImageGenerationParameters, + ) -> None: + """Test that a generation can be initialized in black box mode.""" + from horde_sdk.worker.consts import GENERATION_PROGRESS + from horde_sdk.worker.generations import ImageSingleGeneration + + generation_id = str(UUID("00000000-0000-0000-0000-000000000000")) + generation = ImageSingleGeneration( + generation_id=generation_id, + generation_parameters=simple_image_generation_parameters, + black_box_mode=True, + requires_submit=False, + ) + + assert generation.get_generation_progress() == GENERATION_PROGRESS.NOT_STARTED + assert generation.black_box_mode is True + + generation.on_generating() + assert generation.get_generation_progress() == GENERATION_PROGRESS.GENERATING + + generation.on_generation_work_complete() + generation.set_work_result(self._shared_image) + generation.on_complete() + assert generation.get_generation_progress() == GENERATION_PROGRESS.COMPLETE + + def test_black_box_mode_with_submit( + self, + simple_image_generation_parameters: ImageGenerationParameters, + ) -> None: + """Test that a generation can be initialized in black box mode with submission.""" + from horde_sdk.worker.consts import GENERATION_PROGRESS + from horde_sdk.worker.generations import ImageSingleGeneration + + generation_id = str(UUID("00000000-0000-0000-0000-000000000000")) + generation = ImageSingleGeneration( + generation_id=generation_id, + generation_parameters=simple_image_generation_parameters, + black_box_mode=True, + requires_submit=True, + ) + + assert generation.get_generation_progress() == GENERATION_PROGRESS.NOT_STARTED + assert generation.black_box_mode is True + + generation.on_generating() + assert generation.get_generation_progress() == GENERATION_PROGRESS.GENERATING + + generation.on_generation_work_complete() + generation.set_work_result(self._shared_image) + generation.on_pending_submit() + assert generation.get_generation_progress() == GENERATION_PROGRESS.PENDING_SUBMIT + + generation.on_submitting() + assert generation.get_generation_progress() == GENERATION_PROGRESS.SUBMITTING + + generation.on_submit_complete() + assert generation.get_generation_progress() == GENERATION_PROGRESS.SUBMIT_COMPLETE + + def test_black_box_mode_with_submit_and_safety_check( + self, + simple_image_generation_parameters: ImageGenerationParameters, + ) -> None: + """Test that a generation can be initialized in black box mode with submission.""" + from horde_sdk.worker.consts import GENERATION_PROGRESS + from horde_sdk.worker.generations import ImageSingleGeneration + + generation_id = str(UUID("00000000-0000-0000-0000-000000000000")) + generation = ImageSingleGeneration( + generation_id=generation_id, + generation_parameters=simple_image_generation_parameters, + black_box_mode=True, + requires_submit=True, + ) + + assert generation.get_generation_progress() == GENERATION_PROGRESS.NOT_STARTED + assert generation.black_box_mode is True + + generation.on_generating() + assert generation.get_generation_progress() == GENERATION_PROGRESS.GENERATING + + generation.on_generation_work_complete() + generation.set_work_result(self._shared_image) + generation.on_pending_safety_check() + assert generation.get_generation_progress() == GENERATION_PROGRESS.PENDING_SAFETY_CHECK + + generation.on_safety_checking() + assert generation.get_generation_progress() == GENERATION_PROGRESS.SAFETY_CHECKING + + generation.on_safety_check_complete( + batch_index=0, + safety_result=ImageSafetyResult( + is_nsfw=False, + is_csam=False, + ), + ) + assert generation.get_generation_progress() == GENERATION_PROGRESS.PENDING_SUBMIT + + generation.on_submitting() + assert generation.get_generation_progress() == GENERATION_PROGRESS.SUBMITTING + + generation.on_submit_complete() + assert generation.get_generation_progress() == GENERATION_PROGRESS.SUBMIT_COMPLETE + + def test_alchemy_safety_only( + self, + simple_alchemy_generation_parameters_nsfw_detect: AlchemyParameters, + ) -> None: + assert len(simple_alchemy_generation_parameters_nsfw_detect.all_alchemy_operations) == 1 + assert simple_alchemy_generation_parameters_nsfw_detect.nsfw_detectors is not None + assert len(simple_alchemy_generation_parameters_nsfw_detect.nsfw_detectors) == 1 + + nsfw_parameters = simple_alchemy_generation_parameters_nsfw_detect.nsfw_detectors[0] + + generation = AlchemySingleGeneration( + generation_parameters=nsfw_parameters, + requires_post_processing=False, + requires_submit=False, + ) + + generation.on_preloading() + + def _run_censor_test( + self, + *, + generation_parameters: ImageGenerationParameters | SingleAlchemyParameters, + generation_type: type[HordeSingleGeneration[Any]], + result_id: UUID, + is_nsfw: bool, + is_csam: bool, + expect_censored: bool, + safety_rules: SafetyRules, + ) -> None: + from horde_sdk.worker.consts import GENERATION_PROGRESS + + generation_id = str(UUID("00000000-0000-0000-0000-000000000000")) + + generation: HordeSingleGeneration[Any] = generation_type( + generation_id=generation_id, + generation_parameters=generation_parameters, + safety_rules=safety_rules, + result_ids=[result_id], + black_box_mode=True, + ) + + assert generation.get_generation_progress() == GENERATION_PROGRESS.NOT_STARTED + assert generation.black_box_mode is True + + generation.on_generating() + assert generation.get_generation_progress() == GENERATION_PROGRESS.GENERATING + + generation.on_generation_work_complete() + generation.set_work_result(self._shared_image) + + generation.on_pending_safety_check() + assert generation.get_generation_progress() == GENERATION_PROGRESS.PENDING_SAFETY_CHECK + + generation.on_safety_checking() + assert generation.get_generation_progress() == GENERATION_PROGRESS.SAFETY_CHECKING + + generation.on_safety_check_complete( + batch_index=0, + safety_result=ImageSafetyResult( + is_nsfw=is_nsfw, + is_csam=is_csam, + ), + ) + + safety_check_results = generation.get_safety_check_results() + assert safety_check_results is not None + assert len(safety_check_results) == 1 + assert safety_check_results[0] is not None + assert safety_check_results[0].is_nsfw == is_nsfw + assert safety_check_results[0].is_csam == is_csam + + assert result_id in generation.generation_results + if expect_censored or is_csam: + assert generation.generation_results[result_id] is None + else: + assert generation.generation_results[result_id] is not None + + def test_safety_censored_results( + self, + simple_image_generation_parameters: ImageGenerationParameters, + simple_alchemy_generation_parameters_nsfw_detect: AlchemyParameters, + ) -> None: + """Test that a generation can be censored based on safety results.""" + self._run_censor_test( + generation_parameters=simple_image_generation_parameters, + generation_type=ImageSingleGeneration, + result_id=UUID("00000000-0000-0000-9999-000000000000"), + is_nsfw=True, + is_csam=False, + expect_censored=True, + safety_rules=default_image_safety_rules, + ) + + assert simple_alchemy_generation_parameters_nsfw_detect.nsfw_detectors is not None + self._run_censor_test( + generation_parameters=simple_alchemy_generation_parameters_nsfw_detect.nsfw_detectors[0], + generation_type=AlchemySingleGeneration, + result_id=UUID("00000000-0000-0000-8888-000000000000"), + is_nsfw=True, + is_csam=False, + expect_censored=True, + safety_rules=default_image_safety_rules, + ) + + def test_safety_censoring_not_expected( + self, + simple_image_generation_parameters: ImageGenerationParameters, + simple_alchemy_generation_parameters_nsfw_detect: AlchemyParameters, + ) -> None: + """Test that a generation is not censored when it shouldn't be.""" + self._run_censor_test( + generation_parameters=simple_image_generation_parameters, + generation_type=ImageSingleGeneration, + result_id=UUID("00000000-0000-0000-9999-000000000001"), + is_nsfw=False, + is_csam=False, + expect_censored=False, + safety_rules=default_image_safety_rules, + ) + + assert simple_alchemy_generation_parameters_nsfw_detect.nsfw_detectors is not None + self._run_censor_test( + generation_parameters=simple_alchemy_generation_parameters_nsfw_detect.nsfw_detectors[0], + generation_type=AlchemySingleGeneration, + result_id=UUID("00000000-0000-0000-8888-000000000001"), + is_nsfw=False, + is_csam=False, + expect_censored=False, + safety_rules=default_image_safety_rules, + ) + + def test_safety_censored_csam( + self, + simple_image_generation_parameters: ImageGenerationParameters, + simple_alchemy_generation_parameters_nsfw_detect: AlchemyParameters, + ) -> None: + """Test that a generation is censored when CSAM is detected.""" + self._run_censor_test( + generation_parameters=simple_image_generation_parameters, + generation_type=ImageSingleGeneration, + result_id=UUID("00000000-0000-0000-9999-000000000002"), + is_nsfw=False, + is_csam=True, + expect_censored=True, + safety_rules=default_image_safety_rules, + ) + + assert simple_alchemy_generation_parameters_nsfw_detect.nsfw_detectors is not None + self._run_censor_test( + generation_parameters=simple_alchemy_generation_parameters_nsfw_detect.nsfw_detectors[0], + generation_type=AlchemySingleGeneration, + result_id=UUID("00000000-0000-0000-8888-000000000002"), + is_nsfw=False, + is_csam=True, + expect_censored=True, + safety_rules=default_image_safety_rules, + ) + + def test_safety_censored_both( + self, + simple_image_generation_parameters: ImageGenerationParameters, + simple_alchemy_generation_parameters_nsfw_detect: AlchemyParameters, + ) -> None: + """Test that a generation is censored when both NSFW and CSAM are detected.""" + self._run_censor_test( + generation_parameters=simple_image_generation_parameters, + generation_type=ImageSingleGeneration, + result_id=UUID("00000000-0000-0000-9999-000000000003"), + is_nsfw=True, + is_csam=True, + expect_censored=True, + safety_rules=default_image_safety_rules, + ) + + assert simple_alchemy_generation_parameters_nsfw_detect.nsfw_detectors is not None + self._run_censor_test( + generation_parameters=simple_alchemy_generation_parameters_nsfw_detect.nsfw_detectors[0], + generation_type=AlchemySingleGeneration, + result_id=UUID("00000000-0000-0000-8888-000000000003"), + is_nsfw=True, + is_csam=True, + expect_censored=True, + safety_rules=default_image_safety_rules, + ) + + def test_safety_uncensored_rules( + self, + simple_image_generation_parameters: ImageGenerationParameters, + simple_alchemy_generation_parameters_nsfw_detect: AlchemyParameters, + ) -> None: + """Test that a generation is not censored when the safety rules are set to not censor.""" + self._run_censor_test( + generation_parameters=simple_image_generation_parameters, + generation_type=ImageSingleGeneration, + result_id=UUID("00000000-0000-0000-9999-000000000004"), + is_nsfw=True, + is_csam=False, + expect_censored=False, + safety_rules=SafetyRules( + should_censor_nsfw=False, + should_censor_hate_speech=False, + should_censor_violent=False, + should_censor_self_harm=False, + ), + ) + + assert simple_alchemy_generation_parameters_nsfw_detect.nsfw_detectors is not None + self._run_censor_test( + generation_parameters=simple_alchemy_generation_parameters_nsfw_detect.nsfw_detectors[0], + generation_type=AlchemySingleGeneration, + result_id=UUID("00000000-0000-0000-8888-000000000004"), + is_nsfw=True, + is_csam=False, + expect_censored=False, + safety_rules=SafetyRules( + should_censor_nsfw=False, + should_censor_hate_speech=False, + should_censor_violent=False, + should_censor_self_harm=False, + ), + ) + + def test_safety_csam_always_censors( + self, + simple_image_generation_parameters: ImageGenerationParameters, + simple_alchemy_generation_parameters_nsfw_detect: AlchemyParameters, + ) -> None: + """Test that a generation is always censored when CSAM is detected, regardless of safety rules.""" + self._run_censor_test( + generation_parameters=simple_image_generation_parameters, + generation_type=ImageSingleGeneration, + result_id=UUID("00000000-0000-0000-9999-000000000005"), + is_nsfw=False, + is_csam=True, + expect_censored=True, + safety_rules=SafetyRules( + should_censor_nsfw=False, + should_censor_hate_speech=False, + should_censor_violent=False, + should_censor_self_harm=False, + ), + ) + + assert simple_alchemy_generation_parameters_nsfw_detect.nsfw_detectors is not None + self._run_censor_test( + generation_parameters=simple_alchemy_generation_parameters_nsfw_detect.nsfw_detectors[0], + generation_type=AlchemySingleGeneration, + result_id=UUID("00000000-0000-0000-8888-000000000005"), + is_nsfw=False, + is_csam=True, + expect_censored=True, + safety_rules=SafetyRules( + should_censor_nsfw=False, + should_censor_hate_speech=False, + should_censor_violent=False, + should_censor_self_harm=False, + ), + ) + + @staticmethod + def shared_check_generation_init( + generation: HordeSingleGeneration[Any], + generation_id: str, + ) -> None: + """Confirm that the `HordeSingleGeneration` was initialized correctly.""" + assert generation.generation_id == generation_id + + first_state, _ = generation._progress_history[0] + assert first_state == GENERATION_PROGRESS.NOT_STARTED + + assert generation._state_error_limits is not None + # assert len(generation._state_error_limits) == 0 # FIXME + assert generation.errored_states is not None + assert len(generation.errored_states) == 0 + + assert generation._safety_results[0] is None + + assert len(generation.generation_results) == 0 + + def test_alchemy_single_generation_init( + self, + id_and_alchemy_generation: tuple[str, AlchemySingleGeneration], + ) -> None: + """Test that an `AlchemySingleGeneration` object can be initialized correctly.""" + + from horde_sdk.worker.consts import default_alchemy_generate_progress_transitions + + generation_id, generation = id_and_alchemy_generation + + TestHordeSingleGeneration.shared_check_generation_init( + generation=generation, + generation_id=generation_id, + ) + + assert generation._generate_progress_transitions == default_alchemy_generate_progress_transitions + + def test_image_single_generation_init( + self, + id_and_image_generation: tuple[str, ImageSingleGeneration], + ) -> None: + """Test that an `ImageSingleGeneration` object can be initialized correctly.""" + + from horde_sdk.worker.consts import default_image_generate_progress_transitions + + generation_id, generation = id_and_image_generation + + TestHordeSingleGeneration.shared_check_generation_init( + generation=generation, + generation_id=generation_id, + ) + + assert generation._generate_progress_transitions == default_image_generate_progress_transitions + + def test_text_single_generation_init( + self, + id_and_text_generation: tuple[str, TextSingleGeneration], + ) -> None: + """Test that a `TextSingleGeneration` object can be initialized correctly.""" + + from horde_sdk.worker.consts import default_text_generate_progress_transitions + + generation_id, generation = id_and_text_generation + + TestHordeSingleGeneration.shared_check_generation_init( + generation=generation, + generation_id=generation_id, + ) + + assert generation._generate_progress_transitions == default_text_generate_progress_transitions + + def test_wrong_order_of_steps( + self, + id_and_image_generation: tuple[str, ImageSingleGeneration], + ) -> None: + """Test that an exception is raised when the generation steps are called in the wrong order. + + - It should not be possible to transition according to the default transition \ + progressions defined in `horde_sdk/ai_horde_worker/consts.py`. + - It should not be possible to transition to the same state in which the generation is currently in. \ + This is a safety check to prevent infinite loops or bad implementations. + """ + + _, generation = id_and_image_generation + + def assert_raises_value_error(func: Callable[..., Any], match: str) -> None: + with pytest.raises(ValueError, match=match): + func() + + assert generation.get_generation_progress() == GENERATION_PROGRESS.NOT_STARTED + + assert_raises_value_error( + generation.on_generation_work_complete, + f"Invalid transition from {GENERATION_PROGRESS.NOT_STARTED} to {GENERATION_PROGRESS.PENDING_SAFETY_CHECK}", + ) + + # Normal progression to preloading + generation.on_preloading() + assert generation.get_generation_progress() == GENERATION_PROGRESS.PRELOADING + + assert_raises_value_error( + generation.on_preloading, + f"is already in state {GENERATION_PROGRESS.PRELOADING}", + ) + + assert_raises_value_error( + generation.on_generation_work_complete, + f"Invalid transition from {GENERATION_PROGRESS.PRELOADING} to {GENERATION_PROGRESS.PENDING_SAFETY_CHECK}", + ) + + # Normal progression to preloading complete + generation.on_preloading_complete() + assert generation.get_generation_progress() == GENERATION_PROGRESS.PRELOADING_COMPLETE + + assert_raises_value_error( + generation.on_preloading_complete, + f"is already in state {GENERATION_PROGRESS.PRELOADING_COMPLETE}", + ) + + # Normal progression to generating + generation.on_generating() + assert generation.get_generation_progress() == GENERATION_PROGRESS.GENERATING + + assert_raises_value_error( + generation.on_generating, + f"is already in state {GENERATION_PROGRESS.GENERATING}", + ) + + assert_raises_value_error( + generation.on_preloading, + f"Invalid transition from {GENERATION_PROGRESS.GENERATING} to {GENERATION_PROGRESS.PRELOADING}", + ) + + assert_raises_value_error( + generation.on_preloading_complete, + f"Invalid transition from {GENERATION_PROGRESS.GENERATING} to {GENERATION_PROGRESS.PRELOADING_COMPLETE}", + ) + + def test_set_safety_check_result_without_generation_result( + self, + id_and_image_generation: tuple[str, ImageSingleGeneration], + ) -> None: + """Test that an exception is raised when setting a safety check result without setting a generation result.""" + _, generation = id_and_image_generation + + with pytest.raises(ValueError, match="Generation result must be set before setting safety check result"): + generation.on_safety_check_complete( + batch_index=0, + safety_result=ImageSafetyResult( + is_nsfw=False, + is_csam=False, + ), + ) + + def test_reference_run_generation_process_image( + self, + simple_image_generation_parameters: ImageGenerationParameters, + default_testing_image_bytes: bytes, + ) -> None: + """Run a reference generation process from start to finish, without testing-specific magic or helpers. + + The purpose of this test is to have a the bare-minimum usage of the `HordeSingleGeneration` class to ensure + that the most straight forward use-case works as expected and isn't lost in the complexity of the test suite. + """ + from horde_sdk.worker.consts import GENERATION_PROGRESS + from horde_sdk.worker.generations import ImageSingleGeneration + + dummy_id = str(UUID("00000000-0000-0000-0000-000000000000")) + generation = ImageSingleGeneration( + generation_id=dummy_id, + generation_parameters=simple_image_generation_parameters, + ) + assert generation.get_generation_progress() == GENERATION_PROGRESS.NOT_STARTED + + generation.on_preloading() + assert generation.get_generation_progress() == GENERATION_PROGRESS.PRELOADING + + generation.on_preloading_complete() + assert generation.get_generation_progress() == GENERATION_PROGRESS.PRELOADING_COMPLETE + + generation.on_generating() + assert generation.get_generation_progress() == GENERATION_PROGRESS.GENERATING + + generation.on_generation_work_complete() + + generation.set_work_result(default_testing_image_bytes) + + assert generation.get_generation_progress() == GENERATION_PROGRESS.PENDING_SAFETY_CHECK + + generation.on_safety_checking() + assert generation.get_generation_progress() == GENERATION_PROGRESS.SAFETY_CHECKING + + generation.on_safety_check_complete( + batch_index=0, + safety_result=ImageSafetyResult( + is_nsfw=False, + is_csam=False, + ), + ) + assert generation.get_generation_progress() == GENERATION_PROGRESS.PENDING_SUBMIT + + generation.on_submitting() + assert generation.get_generation_progress() == GENERATION_PROGRESS.SUBMITTING + + generation.on_submit_complete() + assert generation.get_generation_progress() == GENERATION_PROGRESS.SUBMIT_COMPLETE + + def test_reference_run_generation_process_text( + self, + simple_text_generation_parameters: TextGenerationParameters, + ) -> None: + """Run a reference generation process from start to finish, without testing-specific magic or helpers. + + The purpose of this test is to have a the bare-minimum usage of the `HordeSingleGeneration` class to ensure + that the most straight forward use-case works as expected and isn't lost in the complexity of the test suite. + """ + from horde_sdk.worker.consts import GENERATION_PROGRESS + from horde_sdk.worker.generations import TextSingleGeneration + + dummy_id = str(UUID("00000000-0000-0000-0000-000000000000")) + generation = TextSingleGeneration( + generation_id=dummy_id, + generation_parameters=simple_text_generation_parameters, + ) + assert generation.get_generation_progress() == GENERATION_PROGRESS.NOT_STARTED + + generation.on_preloading() + assert generation.get_generation_progress() == GENERATION_PROGRESS.PRELOADING + + generation.on_preloading_complete() + assert generation.get_generation_progress() == GENERATION_PROGRESS.PRELOADING_COMPLETE + + generation.on_generating() + assert generation.get_generation_progress() == GENERATION_PROGRESS.GENERATING + + generation.on_generation_work_complete() + generation.set_work_result("This is a test") + assert generation.get_generation_progress() == GENERATION_PROGRESS.PENDING_SUBMIT + + generation.on_submitting() + assert generation.get_generation_progress() == GENERATION_PROGRESS.SUBMITTING + + generation.on_submit_complete() + assert generation.get_generation_progress() == GENERATION_PROGRESS.SUBMIT_COMPLETE + + def test_reference_run_generation_process_alchemy( + self, + id_and_alchemy_generation: tuple[str, AlchemySingleGeneration], + default_testing_image_bytes: bytes, + ) -> None: + """Run a reference generation process from start to finish, without testing-specific magic or helpers. + + The purpose of this test is to have a the bare-minimum usage of the `HordeSingleGeneration` class to ensure + that the most straight forward use-case works as expected and isn't lost in the complexity of the test suite. + """ + from horde_sdk.worker.consts import GENERATION_PROGRESS + + _, generation = id_and_alchemy_generation + + assert generation.get_generation_progress() == GENERATION_PROGRESS.NOT_STARTED + + generation.on_preloading() + assert generation.get_generation_progress() == GENERATION_PROGRESS.PRELOADING + + generation.on_preloading_complete() + assert generation.get_generation_progress() == GENERATION_PROGRESS.PRELOADING_COMPLETE + + generation.on_post_processing() + assert generation.get_generation_progress() == GENERATION_PROGRESS.POST_PROCESSING + + generation.set_work_result(default_testing_image_bytes) + + generation.on_post_processing_complete() + assert generation.get_generation_progress() == GENERATION_PROGRESS.PENDING_SUBMIT + + generation.on_submitting() + assert generation.get_generation_progress() == GENERATION_PROGRESS.SUBMITTING + + generation.on_submit_complete() + assert generation.get_generation_progress() == GENERATION_PROGRESS.SUBMIT_COMPLETE + + @staticmethod + def run_generation_process( + generation: HordeSingleGeneration[Any], + result: bytes | str, + include_preloading: bool, + include_generation: bool, + include_post_processing: bool, + include_safety_check: bool, + ) -> None: + """Run a generation process from start to finish. + + This function will run the generation process from start to finish, including preloading, generation, + post-processing, safety checks, and submission. It will also check that the generation progresses through the + correct states. + + If a step is not requested, it will be skipped. + """ + + from horde_sdk.worker.consts import GENERATION_PROGRESS + + if include_preloading: + assert generation.get_generation_progress() == GENERATION_PROGRESS.NOT_STARTED + + generation.on_preloading() + assert generation.get_generation_progress() == GENERATION_PROGRESS.PRELOADING + + generation.on_preloading_complete() + assert generation.get_generation_progress() == GENERATION_PROGRESS.PRELOADING_COMPLETE + + if include_generation: + generation.on_generating() + assert generation.get_generation_progress() == GENERATION_PROGRESS.GENERATING + + if include_post_processing: + generation.on_generation_work_complete() + else: + generation.on_generation_work_complete() + generation.set_work_result(result) + + if include_post_processing: + generation.on_post_processing() + assert generation.get_generation_progress() == GENERATION_PROGRESS.POST_PROCESSING + + generation.on_post_processing_complete() + generation.set_work_result(result) + + assert generation.generation_results is not None + assert len(generation.generation_results) == 1 + + generation_result_id, generation_result = generation.generation_results.popitem() + assert isinstance(generation_result_id, UUID | str) + assert generation_result is not None + + if include_safety_check: + assert generation.get_generation_progress() == GENERATION_PROGRESS.PENDING_SAFETY_CHECK + + generation.on_safety_checking() + assert generation.get_generation_progress() == GENERATION_PROGRESS.SAFETY_CHECKING + + generation.on_safety_check_complete( + batch_index=0, + safety_result=SafetyResult( + is_nsfw=False, + is_csam=False, + ), + ) + + assert generation._safety_results[0] is not None + assert generation._safety_results[0].is_nsfw is False + assert generation._safety_results[0].is_csam is False + + assert generation.get_generation_progress() == GENERATION_PROGRESS.PENDING_SUBMIT + + generation.on_submitting() + assert generation.get_generation_progress() == GENERATION_PROGRESS.SUBMITTING + + generation.on_submit_complete() + assert generation.get_generation_progress() == GENERATION_PROGRESS.SUBMIT_COMPLETE + + def test_happy_path_image_start_to_finish( + self, + single_id_str: str, + simple_image_generation_parameters: ImageGenerationParameters, + simple_image_generation_parameters_post_processing: ImageGenerationParameters, + ) -> None: + """Test the happy path for average `ImageSingleGeneration` from start to finish.""" + + generation_no_post_processing = ImageSingleGeneration( + generation_id=single_id_str, + generation_parameters=simple_image_generation_parameters, + ) + self.process_generation( + generation_no_post_processing, + include_preloading=True, + include_safety_check=True, + include_generation=True, + include_post_processing=False, + ) + + generation_with_post_processing = ImageSingleGeneration( + generation_id=single_id_str, + generation_parameters=simple_image_generation_parameters_post_processing, + ) + self.process_generation( + generation_with_post_processing, + include_preloading=True, + include_safety_check=True, + include_generation=True, + include_post_processing=True, + ) + + def test_happy_path_image_no_preloading( + self, + single_id_str: str, + simple_image_generation_parameters: ImageGenerationParameters, + simple_image_generation_parameters_post_processing: ImageGenerationParameters, + ) -> None: + """Test the happy path for average `ImageSingleGeneration` from start to finish without preloading.""" + + generation_no_post_processing = ImageSingleGeneration( + generation_id=single_id_str, + generation_parameters=simple_image_generation_parameters, + ) + self.run_generation_process( + generation_no_post_processing, + result=self._shared_image, + include_preloading=False, + include_safety_check=True, + include_generation=True, + include_post_processing=False, + ) + + generation_with_post_processing = ImageSingleGeneration( + generation_id=single_id_str, + generation_parameters=simple_image_generation_parameters_post_processing, + ) + self.run_generation_process( + generation_with_post_processing, + result=self._shared_image, + include_preloading=False, + include_safety_check=True, + include_generation=True, + include_post_processing=True, + ) + + def test_happy_path_alchemy_start_to_finish( + self, + id_and_alchemy_generation: tuple[str, AlchemySingleGeneration], + ) -> None: + """Test the happy path for average `AlchemySingleGeneration` from start to finish.""" + + _, generation = id_and_alchemy_generation + + self.run_generation_process( + generation, + result=self._shared_image, + include_preloading=True, + include_safety_check=False, + include_generation=False, + include_post_processing=True, + ) + + def test_happy_path_alchemy_no_preloading( + self, + id_and_alchemy_generation: tuple[str, AlchemySingleGeneration], + ) -> None: + """Test the happy path for average `AlchemySingleGeneration` from start to finish without preloading.""" + + _, generation = id_and_alchemy_generation + + self.run_generation_process( + generation, + result=self._shared_image, + include_preloading=False, + include_safety_check=False, + include_generation=False, + include_post_processing=True, + ) + + def test_happy_path_text_start_to_finish( + self, + id_and_text_generation: tuple[str, TextSingleGeneration], + ) -> None: + """Test the happy path for average `TextSingleGeneration` from start to finish.""" + + _, generation = id_and_text_generation + + self.run_generation_process( + generation, + result="Fake Text Generation Result", + include_preloading=True, + include_safety_check=False, + include_generation=True, + include_post_processing=False, + ) + + def test_happy_path_text_no_preloading( + self, + id_and_text_generation: tuple[str, TextSingleGeneration], + ) -> None: + """Test the happy path for average `TextSingleGeneration` from start to finish without preloading.""" + + _, generation = id_and_text_generation + + self.run_generation_process( + generation, + result="Fake Text Generation Result", + include_preloading=False, + include_safety_check=False, + include_generation=True, + include_post_processing=False, + ) + + def simulate_hitting_error_limit( + self, + generation: HordeSingleGeneration[Any], + state_to_error_out_on: GENERATION_PROGRESS, + include_preloading: bool, + include_generation: bool, + include_post_processing: bool, + include_safety_check: bool, + include_submit: bool, + ) -> None: + from horde_sdk.worker.consts import GENERATION_PROGRESS + + if state_to_error_out_on == GENERATION_PROGRESS.PRELOADING and not include_preloading: + return + if state_to_error_out_on == GENERATION_PROGRESS.GENERATING and not include_generation: + return + if state_to_error_out_on == GENERATION_PROGRESS.POST_PROCESSING and not include_post_processing: + return + if state_to_error_out_on == GENERATION_PROGRESS.SAFETY_CHECKING and not include_safety_check: + return + if state_to_error_out_on == GENERATION_PROGRESS.SUBMITTING and not include_submit: + return + if state_to_error_out_on == GENERATION_PROGRESS.PENDING_SUBMIT: + return + + if not (include_preloading or include_generation or include_post_processing or include_safety_check): + return + + if include_submit and not generation.requires_submit: + return + + if generation.requires_post_processing is not include_post_processing: + return + + if include_preloading: + generation.on_preloading() + assert generation.get_generation_progress() == GENERATION_PROGRESS.PRELOADING + if state_to_error_out_on == GENERATION_PROGRESS.PRELOADING: + assert generation._state_error_limits is not None + with pytest.raises(RuntimeError, match="has exceeded the maximum number of errors for state"): + for _ in range(generation._state_error_limits[GENERATION_PROGRESS.PRELOADING]): + generation.on_error( + failed_message="Failed to preload", + failure_exception=Exception("Failed to preload exception"), + ) + assert generation.get_generation_progress() == GENERATION_PROGRESS.ERROR + generation.step(GENERATION_PROGRESS.PRELOADING) + assert generation.get_generation_progress() == GENERATION_PROGRESS.PRELOADING + + return + generation.on_preloading_complete() + assert generation.get_generation_progress() == GENERATION_PROGRESS.PRELOADING_COMPLETE + + if include_generation: + generation.on_generating() + assert generation.get_generation_progress() == GENERATION_PROGRESS.GENERATING + + if state_to_error_out_on == GENERATION_PROGRESS.GENERATING: + with pytest.raises(RuntimeError, match="has exceeded the maximum number of errors for state"): + assert generation._state_error_limits is not None + for _ in range(generation._state_error_limits[GENERATION_PROGRESS.GENERATING]): + generation.on_error( + failed_message="Failed to generate", + failure_exception=Exception("Failed to generate exception"), + ) + assert generation.get_generation_progress() == GENERATION_PROGRESS.ERROR + generation.step(GENERATION_PROGRESS.GENERATING) + assert generation.get_generation_progress() == GENERATION_PROGRESS.GENERATING + + return + + generation.on_generation_work_complete() + + if include_post_processing: + generation.on_post_processing() + assert generation.get_generation_progress() == GENERATION_PROGRESS.POST_PROCESSING + + if state_to_error_out_on == GENERATION_PROGRESS.POST_PROCESSING: + with pytest.raises(RuntimeError, match="has exceeded the maximum number of errors for state"): + assert generation._state_error_limits is not None + for _ in range(generation._state_error_limits[GENERATION_PROGRESS.POST_PROCESSING]): + generation.on_error( + failed_message="Failed to post-process", + failure_exception=Exception("Failed to post-process exception"), + ) + assert generation.get_generation_progress() == GENERATION_PROGRESS.ERROR + generation.step(GENERATION_PROGRESS.POST_PROCESSING) + assert generation.get_generation_progress() == GENERATION_PROGRESS.POST_PROCESSING + + return + + generation.on_post_processing_complete() + if generation._result_type is bytes: + generation.set_work_result(self._shared_image) + elif generation._result_type is str: + generation.set_work_result("Fake Text Generation Result") + else: + if generation._result_type is bytes: + generation.set_work_result(self._shared_image) + elif generation._result_type is str: + generation.set_work_result("Fake Text Generation Result") + + if include_safety_check: + generation.on_safety_checking() + assert generation.get_generation_progress() == GENERATION_PROGRESS.SAFETY_CHECKING + + if state_to_error_out_on == GENERATION_PROGRESS.SAFETY_CHECKING: + with pytest.raises(RuntimeError, match="has exceeded the maximum number of errors for state"): + assert generation._state_error_limits is not None + for _ in range(generation._state_error_limits[GENERATION_PROGRESS.SAFETY_CHECKING]): + generation.on_error( + failed_message="Failed to safety check", + failure_exception=Exception("Failed to safety check exception"), + ) + assert generation.get_generation_progress() == GENERATION_PROGRESS.ERROR + generation.step(GENERATION_PROGRESS.SAFETY_CHECKING) + assert generation.get_generation_progress() == GENERATION_PROGRESS.SAFETY_CHECKING + + return + + generation.on_safety_check_complete( + batch_index=0, + safety_result=SafetyResult( + is_nsfw=False, + is_csam=False, + ), + ) + assert generation._safety_results[0] is not None + assert generation._safety_results[0].is_nsfw is False + assert generation._safety_results[0].is_csam is False + + if include_submit: + generation.on_submitting() + assert generation.get_generation_progress() == GENERATION_PROGRESS.SUBMITTING + + if state_to_error_out_on == GENERATION_PROGRESS.SUBMITTING: + with pytest.raises(RuntimeError, match="has exceeded the maximum number of errors for state"): + assert generation._state_error_limits is not None + for _ in range(generation._state_error_limits[GENERATION_PROGRESS.SUBMITTING]): + generation.on_error( + failed_message="Failed to submit", + failure_exception=Exception("Failed to submit exception"), + ) + assert generation.get_generation_progress() == GENERATION_PROGRESS.ERROR + generation.step(GENERATION_PROGRESS.SUBMITTING) + assert generation.get_generation_progress() == GENERATION_PROGRESS.SUBMITTING + + return + + generation.on_submit_complete() + assert generation.get_generation_progress() == GENERATION_PROGRESS.SUBMIT_COMPLETE + + def simulate_abort( + self, + generation: HordeSingleGeneration[Any], + state_to_abort_on: GENERATION_PROGRESS, + include_preloading: bool, + include_generation: bool, + include_post_processing: bool, + include_safety_check: bool, + include_submit: bool, + error_message: str, + error_exception: Exception, + ) -> None: + """Simulate aborting a generation. + + This function will simulate aborting a generation by calling the `on_abort` method on the generation object. + It will also check that the generation progresses through the correct states. + + If a step is not requested, it will be skipped. + """ + from horde_sdk.worker.consts import GENERATION_PROGRESS + + if state_to_abort_on == GENERATION_PROGRESS.PRELOADING and not include_preloading: + return + if state_to_abort_on == GENERATION_PROGRESS.GENERATING and not include_generation: + return + if state_to_abort_on == GENERATION_PROGRESS.POST_PROCESSING and not include_post_processing: + return + if state_to_abort_on == GENERATION_PROGRESS.SAFETY_CHECKING and not include_safety_check: + return + if state_to_abort_on == GENERATION_PROGRESS.SUBMITTING and not include_submit: + return + + if not (include_preloading or include_generation or include_post_processing or include_safety_check): + return + + if include_submit and not generation.requires_submit: + return + + if state_to_abort_on == GENERATION_PROGRESS.NOT_STARTED: + generation.on_abort( + failed_message=error_message, + failure_exception=error_exception, + ) + assert generation.get_generation_progress() == GENERATION_PROGRESS.ABORTED + return + + if include_preloading: + generation.on_preloading() + assert generation.get_generation_progress() == GENERATION_PROGRESS.PRELOADING + + if state_to_abort_on == GENERATION_PROGRESS.PRELOADING: + generation.on_abort( + failed_message=error_message, + failure_exception=error_exception, + ) + + assert generation.get_generation_progress() == GENERATION_PROGRESS.ABORTED + return + + generation.on_preloading_complete() + assert generation.get_generation_progress() == GENERATION_PROGRESS.PRELOADING_COMPLETE + + if include_generation: + generation.on_generating() + assert generation.get_generation_progress() == GENERATION_PROGRESS.GENERATING + + if state_to_abort_on == GENERATION_PROGRESS.GENERATING: + generation.on_abort( + failed_message=error_message, + failure_exception=error_exception, + ) + + assert generation.get_generation_progress() == GENERATION_PROGRESS.ABORTED + return + + generation.on_generation_work_complete() + + if not include_post_processing: + if generation.result_type is bytes: + generation.set_work_result(self._shared_image) + elif generation.result_type is str: + generation.set_work_result("Fake Text Generation Result") + + if include_post_processing: + generation.on_post_processing() + assert generation.get_generation_progress() == GENERATION_PROGRESS.POST_PROCESSING + + if state_to_abort_on == GENERATION_PROGRESS.POST_PROCESSING: + generation.on_abort( + failed_message=error_message, + failure_exception=error_exception, + ) + + assert generation.get_generation_progress() == GENERATION_PROGRESS.ABORTED + return + + generation.on_post_processing_complete() + if generation._result_type is bytes: + generation.set_work_result(self._shared_image) + elif generation._result_type is str: + generation.set_work_result("Fake Text Generation Result") + + if include_safety_check: + generation.on_safety_checking() + assert generation.get_generation_progress() == GENERATION_PROGRESS.SAFETY_CHECKING + if state_to_abort_on == GENERATION_PROGRESS.SAFETY_CHECKING: + generation.on_abort( + failed_message=error_message, + failure_exception=error_exception, + ) + + assert generation.get_generation_progress() == GENERATION_PROGRESS.ABORTED + return + generation.on_safety_check_complete( + batch_index=0, + safety_result=SafetyResult( + is_nsfw=False, + is_csam=False, + ), + ) + assert generation._safety_results[0] is not None + assert generation._safety_results[0].is_nsfw is False + assert generation._safety_results[0].is_csam is False + + if include_submit: + generation.on_submitting() + assert generation.get_generation_progress() == GENERATION_PROGRESS.SUBMITTING + + if state_to_abort_on == GENERATION_PROGRESS.SUBMITTING: + generation.on_abort( + failed_message=error_message, + failure_exception=error_exception, + ) + + assert generation.get_generation_progress() == GENERATION_PROGRESS.ABORTED + return + + generation.on_submit_complete() + assert generation.get_generation_progress() == GENERATION_PROGRESS.SUBMIT_COMPLETE + + if state_to_abort_on == GENERATION_PROGRESS.SUBMIT_COMPLETE: + generation.on_abort( + failed_message=error_message, + failure_exception=error_exception, + ) + + assert generation.get_generation_progress() == GENERATION_PROGRESS.ABORTED + return + + @staticmethod + def handle_error( + generation: HordeSingleGeneration[Any], + error_message: str, + error_exception: Exception, + errors_count: int, + ) -> None: + generation.on_error( + failed_message=error_message, + failure_exception=error_exception, + ) + assert generation.get_generation_progress() == GENERATION_PROGRESS.ERROR + + def process_generation( + self, + generation: HordeSingleGeneration[Any], + include_preloading: bool, + include_generation: bool, + include_post_processing: bool, + include_safety_check: bool, + include_submit: bool = True, + error_on_preloading: bool = False, + error_on_generation: bool = False, + error_on_post_processing: bool = False, + error_on_safety_check: bool = False, + error_on_submit: bool = False, + ) -> None: + """Process a generation with the given configurations. + + This will step the `HordeSingleGeneration` through the entire generation process, as requested by the + arguments. If an error is requested, the generation will be marked as errored and the error count will be + incremented. + + """ + if error_on_preloading and not include_preloading: + return + if error_on_generation and not include_generation: + return + if error_on_post_processing and not include_post_processing: + return + if error_on_safety_check and not include_safety_check: + return + if error_on_submit and not include_submit: + return + + error_flags = { + "preloading": error_on_preloading and include_preloading, + "generation": error_on_generation and include_generation, + "post_processing": error_on_post_processing + and include_post_processing + and generation.requires_post_processing, + "safety_check": error_on_safety_check and include_safety_check, + "submit": error_on_submit, + } + + target_errors_count = sum(error_flags.values()) + errors_count = 0 + + if include_preloading: + errors_count = self._simulate_preloading(generation, error_on_preloading, errors_count) + + if include_generation: + errors_count = self._simulate_generation( + generation, + error_on_generation=error_on_generation, + include_post_processing=include_post_processing, + errors_count=errors_count, + ) + + if include_post_processing and generation.requires_post_processing: + errors_count = self._simulate_post_processing( + generation, + error_on_post_processing=error_on_post_processing, + errors_count=errors_count, + ) + + if include_safety_check: + errors_count = self._simulate_safety_check( + generation, + error_on_safety_check=error_on_safety_check, + errors_count=errors_count, + ) + + if include_submit: + errors_count = self._simulate_submission( + generation, + error_on_submit=error_on_submit, + errors_count=errors_count, + ) + elif not include_submit and not generation.requires_submit: + with pytest.raises( + ValueError, + match="Invalid transition from ", + ): + generation.on_submitting() + + assert generation.generation_failure_count == target_errors_count + + def _set_and_confirm_work_result( + self, + generation: HordeSingleGeneration[Any], + ) -> None: + if generation.result_type is bytes: + generation.set_work_result(self._shared_image) + + assert generation.generation_results is not None + assert len(generation.generation_results) == 1 + result_id, result = generation.generation_results.popitem() + assert isinstance(result_id, UUID | str) + assert result == self._shared_image + elif generation.result_type is str: + fake_result = "Fake Text Generation Result" + generation.set_work_result(fake_result) + assert generation.generation_results is not None + assert len(generation.generation_results) == 1 + result_id, result = generation.generation_results.popitem() + assert isinstance(result_id, UUID | str) + assert result == fake_result + else: + raise ValueError(f"Unknown result type {generation.result_type}") + + def _simulate_preloading( + self, + generation: HordeSingleGeneration[Any], + error_on_preloading: bool, + errors_count: int, + ) -> int: + """Simulate expected actions for the preloading step for a `HordeSingleGeneration`.""" + + from horde_sdk.worker.consts import GENERATION_PROGRESS + + assert generation.get_generation_progress() == GENERATION_PROGRESS.NOT_STARTED + + assert generation.step(GENERATION_PROGRESS.PRELOADING) == GENERATION_PROGRESS.PRELOADING + assert generation.get_generation_progress() == GENERATION_PROGRESS.PRELOADING + + if error_on_preloading: + errors_count += 1 + assert ( + generation.on_error( + failed_message="Failed to preload", + failure_exception=Exception("Failed to preload exception"), + ) + == GENERATION_PROGRESS.ERROR + ) + assert generation.get_generation_progress() == GENERATION_PROGRESS.ERROR + + assert generation.step(GENERATION_PROGRESS.PRELOADING) == GENERATION_PROGRESS.PRELOADING + assert generation.get_generation_progress() == GENERATION_PROGRESS.PRELOADING + + assert generation.generation_failure_count == errors_count + assert generation.errored_states is not None + error_state, error_time = generation.errored_states[-1] + assert error_state == GENERATION_PROGRESS.PRELOADING + assert error_time != 0 + + assert generation.on_preloading_complete() == GENERATION_PROGRESS.PRELOADING_COMPLETE + assert generation.get_generation_progress() == GENERATION_PROGRESS.PRELOADING_COMPLETE + + return errors_count + + def _simulate_generation( + self, + generation: HordeSingleGeneration[Any], + error_on_generation: bool, + include_post_processing: bool, + errors_count: int, + ) -> int: + """Simulate expected actions for the generation step for a `HordeSingleGeneration`.""" + + from horde_sdk.worker.consts import GENERATION_PROGRESS + + assert generation.step(GENERATION_PROGRESS.GENERATING) == GENERATION_PROGRESS.GENERATING + assert generation.get_generation_progress() == GENERATION_PROGRESS.GENERATING + + if error_on_generation: + errors_count += 1 + assert ( + generation.on_error( + failed_message="Failed to generate", + failure_exception=Exception("Failed to generate exception"), + ) + == GENERATION_PROGRESS.ERROR + ) + assert generation.get_generation_progress() == GENERATION_PROGRESS.ERROR + + assert generation.step(GENERATION_PROGRESS.GENERATING) == GENERATION_PROGRESS.GENERATING + assert generation.get_generation_progress() == GENERATION_PROGRESS.GENERATING + + assert generation.generation_failure_count == errors_count + assert generation.errored_states is not None + error_state, error_time = generation.errored_states[-1] + assert error_state == GENERATION_PROGRESS.GENERATING + assert error_time != 0 + + if include_post_processing and generation.requires_post_processing: + assert generation.on_generation_work_complete() == GENERATION_PROGRESS.PENDING_POST_PROCESSING + else: + assert generation.on_generation_work_complete() in ( + GENERATION_PROGRESS.PENDING_SUBMIT, + GENERATION_PROGRESS.PENDING_SAFETY_CHECK, + GENERATION_PROGRESS.PENDING_POST_PROCESSING, + GENERATION_PROGRESS.COMPLETE, + ) + + self._set_and_confirm_work_result(generation) + + return errors_count + + def _simulate_post_processing( + self, + generation: HordeSingleGeneration[Any], + error_on_post_processing: bool, + errors_count: int, + ) -> int: + """Simulate expected actions for the post-processing step for a `HordeSingleGeneration`.""" + + from horde_sdk.worker.consts import GENERATION_PROGRESS + + assert generation.step(GENERATION_PROGRESS.POST_PROCESSING) == GENERATION_PROGRESS.POST_PROCESSING + assert generation.get_generation_progress() == GENERATION_PROGRESS.POST_PROCESSING + + if error_on_post_processing: + errors_count += 1 + assert ( + generation.on_error( + failed_message="Failed during post-processing", + failure_exception=Exception("Failed during post-processing exception"), + ) + == GENERATION_PROGRESS.ERROR + ) + assert generation.get_generation_progress() == GENERATION_PROGRESS.ERROR + + generation.step(GENERATION_PROGRESS.POST_PROCESSING) + assert generation.get_generation_progress() == GENERATION_PROGRESS.POST_PROCESSING + + assert generation.on_post_processing_complete() in ( + GENERATION_PROGRESS.PENDING_SUBMIT, + GENERATION_PROGRESS.PENDING_SAFETY_CHECK, + GENERATION_PROGRESS.COMPLETE, + ) + + self._set_and_confirm_work_result(generation) + + return errors_count + + def _simulate_safety_check( + self, + generation: HordeSingleGeneration[Any], + error_on_safety_check: bool, + errors_count: int, + ) -> int: + """Simulate expected actions for the safety check step for a `HordeSingleGeneration`.""" + from horde_sdk.worker.consts import GENERATION_PROGRESS + + assert generation.get_generation_progress() == GENERATION_PROGRESS.PENDING_SAFETY_CHECK + + assert generation.step(GENERATION_PROGRESS.SAFETY_CHECKING) == GENERATION_PROGRESS.SAFETY_CHECKING + assert generation.get_generation_progress() == GENERATION_PROGRESS.SAFETY_CHECKING + + if error_on_safety_check: + errors_count += 1 + assert ( + generation.on_error( + failed_message="Failed during safety check", + failure_exception=Exception("Failed during safety check exception"), + ) + == GENERATION_PROGRESS.ERROR + ) + assert generation.get_generation_progress() == GENERATION_PROGRESS.ERROR + + assert generation.step(GENERATION_PROGRESS.SAFETY_CHECKING) == GENERATION_PROGRESS.SAFETY_CHECKING + assert generation.get_generation_progress() == GENERATION_PROGRESS.SAFETY_CHECKING + + generation.on_safety_check_complete( + batch_index=0, + safety_result=SafetyResult( + is_nsfw=False, + is_csam=False, + ), + ) + assert generation._safety_results[0] is not None + assert generation._safety_results[0].is_nsfw is False + assert generation._safety_results[0].is_csam is False + + return errors_count + + def _simulate_submission( + self, + generation: HordeSingleGeneration[Any], + error_on_submit: bool, + errors_count: int, + ) -> int: + """Simulate expected actions for the submission step for a `HordeSingleGeneration`.""" + from horde_sdk.worker.consts import GENERATION_PROGRESS + + assert generation.get_generation_progress() == GENERATION_PROGRESS.PENDING_SUBMIT + + assert generation.step(GENERATION_PROGRESS.SUBMITTING) == GENERATION_PROGRESS.SUBMITTING + assert generation.get_generation_progress() == GENERATION_PROGRESS.SUBMITTING + + if error_on_submit: + errors_count += 1 + assert ( + generation.on_error( + failed_message="Failed during submission", + failure_exception=Exception("Failed during submission exception"), + ) + == GENERATION_PROGRESS.ERROR + ) + assert generation.get_generation_progress() == GENERATION_PROGRESS.ERROR + + assert generation.step(GENERATION_PROGRESS.SUBMITTING) == GENERATION_PROGRESS.SUBMITTING + assert generation.get_generation_progress() == GENERATION_PROGRESS.SUBMITTING + + assert generation.on_submit_complete() == GENERATION_PROGRESS.SUBMIT_COMPLETE + assert generation.get_generation_progress() == GENERATION_PROGRESS.SUBMIT_COMPLETE + + return errors_count + + def run_generation_abort_and_error_tests( + self, + generation_class: type[ImageSingleGeneration | TextSingleGeneration | AlchemySingleGeneration], + generation_id_factory: Callable[[], str], + generation_parameters: ImageGenerationParameters | TextGenerationParameters | SingleAlchemyParameters, + include_generation: bool, + include_post_processing: bool, + include_safety_check: bool, + include_submit: bool, + ) -> None: + states_to_test = list(GENERATION_PROGRESS.__members__.values()) + states_to_test = [s for s in states_to_test if s not in generation_class.default_interrupt_states()] + states_to_test.remove(GENERATION_PROGRESS.ERROR) + + for state in states_to_test: + if self.is_conflicting_permutations( + generation_class_requires_generation=generation_class.does_class_require_generation(), + generation_requires_post_processing=include_post_processing, + generation_requires_submit=include_submit, + include_generation=include_generation, + include_post_processing=include_post_processing, + include_submit=include_submit, + ): + continue + + abort_generation = self.create_generation_instance( + generation_class, + generation_id_factory(), + generation_parameters, + requires_generation=include_generation, + requires_post_processing=include_post_processing, + requires_safety_check=include_safety_check, + requires_submit=include_submit, + strict_transition_mode=True, + extra_logging=False, + ) + + if abort_generation.requires_post_processing is not include_post_processing: + continue + + self.simulate_abort( + generation=abort_generation, + state_to_abort_on=state, + include_preloading=True, + include_generation=include_generation, + include_post_processing=include_post_processing, + include_safety_check=include_safety_check, + include_submit=include_submit, + error_message="Simulated error message", + error_exception=Exception("Simulated error exception"), + ) + + generation_strict = self.create_generation_instance( + generation_class, + generation_id_factory(), + generation_parameters, + requires_generation=include_generation, + requires_post_processing=include_post_processing, + requires_safety_check=include_safety_check, + requires_submit=include_submit, + strict_transition_mode=True, + extra_logging=False, + ) + self.simulate_hitting_error_limit( + generation=generation_strict, + state_to_error_out_on=state, + include_preloading=True, + include_generation=include_generation, + include_post_processing=include_post_processing, + include_safety_check=include_safety_check, + include_submit=include_submit, + ) + + generation_non_strict = self.create_generation_instance( + generation_class, + generation_id_factory(), + generation_parameters, + requires_generation=include_generation, + requires_post_processing=include_post_processing, + requires_safety_check=include_safety_check, + requires_submit=include_submit, + strict_transition_mode=False, + extra_logging=False, + ) + + self.simulate_hitting_error_limit( + generation=generation_non_strict, + state_to_error_out_on=state, + include_preloading=True, + include_generation=include_generation, + include_post_processing=include_post_processing, + include_safety_check=include_safety_check, + include_submit=include_submit, + ) + + def run_generation_test_permutations( + self, + generation_class: type[ImageSingleGeneration | TextSingleGeneration | AlchemySingleGeneration], + generation_id_factory: Callable[[], str], + permutations: list[GenerationPermutation], + error_on_preloading: bool, + error_on_generation: bool, + error_on_post_processing: bool, + error_on_safety_check: bool, + error_on_submit: bool, + ) -> None: + """Run permutations of generation configurations. + + Additionally ensures that error limits are respected and that aborting a generation works as expected. + + See the docstring for `GenerationPermutation` for more information on the possible configurations. + + Args: + generation_class (type[HordeSingleGeneration[Any]]): The generation class to test. + generation_id_factory (Callable[[], str]): A factory function to create unique generation IDs. + permutations (list[GenerationPermutation]): A list of permutations to test. + include_generation (bool): Whether to force include the generation step. + include_submit (bool): Whether to force include the submit step. + error_on_preloading (bool): Whether to simulate an error during preloading. + error_on_generation (bool): Whether to simulate an error during generation. + error_on_post_processing (bool): Whether to simulate an error during post-processing. + error_on_safety_check (bool): Whether to simulate an error during safety check. + error_on_submit (bool): Whether to simulate an error during submission. + """ + for permutation in permutations: + generation: HordeSingleGeneration[Any] + + generation = self.create_generation_instance_for_permutation( + generation_class, + generation_id_factory, + permutation.include_submit, + permutation, + ) + + self.process_generation( + generation, + include_generation=permutation.include_generation, + include_safety_check=permutation.include_safety_check, + include_preloading=permutation.include_preloading, + include_post_processing=permutation.include_post_processing, + include_submit=permutation.include_submit, + error_on_preloading=error_on_preloading, + error_on_generation=error_on_generation, + error_on_post_processing=error_on_post_processing, + error_on_safety_check=error_on_safety_check, + error_on_submit=error_on_submit, + ) + + @staticmethod + def is_conflicting_permutations( + generation_class_requires_generation: bool, + generation_requires_post_processing: bool, + generation_requires_submit: bool, + include_generation: bool, + include_post_processing: bool, + include_submit: bool, + ) -> bool: + """Check if the given generation and configurations are conflicting. + + Args: + generation (HordeSingleGeneration[Any]): The generation to check. + include_generation (bool): Whether to force include the generation step. + include_post_processing (bool): Whether to force include the post-processing step. + include_submit (bool): Whether to force include the submit step. + + Returns: + bool: True if the configurations are conflicting, False otherwise. + """ + if not generation_class_requires_generation and not include_generation: + return True + + if generation_class_requires_generation and not include_generation: + return True + + if not include_generation and not include_post_processing: + return True + + if include_post_processing is not generation_requires_post_processing: + return True + + if include_submit is not generation_requires_submit: + return True + + return bool(not (include_generation or include_post_processing)) + + @staticmethod + def create_generation_instance( + generation_class: type[ImageSingleGeneration | TextSingleGeneration | AlchemySingleGeneration], + generation_id: str, + generation_parameters: ImageGenerationParameters | TextGenerationParameters | SingleAlchemyParameters, + requires_generation: bool, + requires_post_processing: bool, + requires_safety_check: bool, + requires_submit: bool, + strict_transition_mode: bool, + extra_logging: bool = False, + ) -> HordeSingleGeneration[Any]: + """Create a generation instance for the given class and parameters. + + Args: + generation_class (type[HordeSingleGeneration[Any]]): The generation class to create. + generation_id (str): The ID of the generation. + generation_parameters (ImageGenerationParameters | TextGenerationParameters | SingleAlchemyParameters): + The parameters for the generation. + requires_generation (bool): Whether the generation requires generation. + requires_post_processing (bool): Whether the generation requires post-processing. + requires_safety_check (bool): Whether the generation requires a safety check. + requires_submit (bool): Whether the generation requires submission. + force_post_processing (bool): Whether to force post-processing. + strict_transition_mode (bool): Whether to use strict transition mode. + extra_logging (bool, optional): Whether to enable extra logging. Defaults to False. + + Returns: + HordeSingleGeneration[Any]: The created generation instance. + """ + + generation: HordeSingleGeneration[Any] + + if generation_class == ImageSingleGeneration: + assert isinstance(generation_parameters, ImageGenerationParameters) + generation = ImageSingleGeneration( + generation_id=generation_id, + generation_parameters=generation_parameters, + requires_submit=requires_submit, + extra_logging=extra_logging, + strict_transition_mode=strict_transition_mode, + ) + elif generation_class == AlchemySingleGeneration: + assert isinstance(generation_parameters, SingleAlchemyParameters) + generation = AlchemySingleGeneration( + generation_id=generation_id, + generation_parameters=generation_parameters, + requires_generation=requires_generation, + requires_post_processing=requires_post_processing, + requires_safety_check=requires_safety_check, + requires_submit=requires_submit, + extra_logging=extra_logging, + strict_transition_mode=strict_transition_mode, + ) + elif generation_class == TextSingleGeneration: + assert isinstance(generation_parameters, TextGenerationParameters) + generation = TextSingleGeneration( + generation_id=generation_id, + generation_parameters=generation_parameters, + requires_post_processing=requires_post_processing, + requires_safety_check=requires_safety_check, + requires_submit=requires_submit, + extra_logging=extra_logging, + strict_transition_mode=strict_transition_mode, + ) + else: + raise ValueError(f"Unknown generation class: {generation_class}") + + return generation + + def create_generation_instance_for_permutation( + self, + generation_class: type[ImageSingleGeneration | TextSingleGeneration | AlchemySingleGeneration], + generation_id_factory: Callable[[], str], + include_submit: bool, + permutation: GenerationPermutation, + strict_transition_mode: bool = True, + ) -> HordeSingleGeneration[Any]: + generation: HordeSingleGeneration[Any] = self.create_generation_instance( + generation_class, + generation_id=generation_id_factory(), + generation_parameters=permutation.underlying_payload, + requires_generation=permutation.include_generation, + requires_post_processing=permutation.include_post_processing, + requires_safety_check=permutation.include_safety_check, + requires_submit=include_submit, + strict_transition_mode=strict_transition_mode, + extra_logging=False, + ) + + return generation + + @pytest.mark.parametrize( + "generation_class", + [ + ImageSingleGeneration, + AlchemySingleGeneration, + TextSingleGeneration, + ], + ) + def test_error_handling( + self, + generation_class: type[ImageSingleGeneration | TextSingleGeneration | AlchemySingleGeneration], + id_factory_str: Callable[[], str], + image_permutations: list[GenerationPermutation], + alchemy_permutations: list[GenerationPermutation], + text_permutations: list[GenerationPermutation], + ) -> None: + """Test error handling for all permutations of generation configurations. + + This function is one of the heaviest in the test suite but does a lot of heavy + lifting in terms of making sure all of the intended generation paths are covered. + """ + + from collections import namedtuple + + ErrorPermutation = namedtuple( + "ErrorPermutation", + [ + "error_on_preloading", + "error_on_generation", + "error_on_post_processing", + "error_on_safety_check", + "error_on_submit", + ], + ) + + error_permutations = [ + ErrorPermutation( + error_on_preloading, + error_on_generation, + error_on_post_processing, + error_on_safety_check, + error_on_submit, + ) + for error_on_preloading in [True, False] + for error_on_generation in [True, False] + for error_on_post_processing in [True, False] + for error_on_safety_check in [True, False] + for error_on_submit in [True, False] + ] + + permutations_map: dict[ + type[ImageSingleGeneration] | type[AlchemySingleGeneration] | type[TextSingleGeneration], + list[GenerationPermutation], + ] + permutations_map = { + ImageSingleGeneration: image_permutations, + AlchemySingleGeneration: alchemy_permutations, + TextSingleGeneration: text_permutations, + } + + for generation_type, permutations in permutations_map.items(): + for permutation in permutations: + self.run_generation_abort_and_error_tests( + generation_class=generation_type, + generation_id_factory=id_factory_str, + generation_parameters=permutation.underlying_payload, + include_generation=permutation.include_generation, + include_post_processing=permutation.include_post_processing, + include_safety_check=permutation.include_safety_check, + include_submit=permutation.include_submit, + ) + + for error_permutation in error_permutations: + permutations_list = permutations_map.get(generation_class) + if permutations_list is None: + raise ValueError(f"Permutations not found for {generation_class.__name__}") + try: + self.run_generation_test_permutations( + generation_class, + id_factory_str, + permutations_list, + error_on_preloading=error_permutation.error_on_preloading, + error_on_generation=error_permutation.error_on_generation, + error_on_post_processing=error_permutation.error_on_post_processing, + error_on_safety_check=error_permutation.error_on_safety_check, + error_on_submit=error_permutation.error_on_submit, + ) + except Exception as e: + logger.exception(f"Error running permutations for {generation_class.__name__}") + logger.exception(f"Error permutation: {error_permutation}") + logger.exception(f"Generation permutations: {permutations_list}") + + raise e diff --git a/tests/worker/test_jobs.py b/tests/worker/test_jobs.py new file mode 100644 index 00000000..ad9e235f --- /dev/null +++ b/tests/worker/test_jobs.py @@ -0,0 +1,565 @@ +from uuid import uuid4 + +import pytest + +from horde_sdk.generation_parameters import ( + AlchemyParameters, +) +from horde_sdk.generation_parameters.alchemy.consts import KNOWN_ALCHEMY_FORMS +from horde_sdk.generation_parameters.alchemy.object_models import ( + UpscaleAlchemyParametersTemplate, +) +from horde_sdk.generation_parameters.image.object_models import ( + BasicImageGenerationParametersTemplate, + ImageGenerationParametersTemplate, +) +from horde_sdk.generation_parameters.text.object_models import ( + BasicTextGenerationParametersTemplate, + TextGenerationParametersTemplate, +) +from horde_sdk.generation_parameters.utils import ResultIdAllocator +from horde_sdk.worker.consts import GENERATION_PROGRESS, WORKER_ERRORS +from horde_sdk.worker.generations import AlchemySingleGeneration, ImageSingleGeneration, TextSingleGeneration +from horde_sdk.worker.jobs import AlchemyWorkerJob, ImageWorkerJob, TextWorkerJob + + +@pytest.fixture(scope="function") +def simple_image_worker_job( + simple_image_generation: ImageSingleGeneration, +) -> ImageWorkerJob: + return ImageWorkerJob( + generation=simple_image_generation, + ) + + +def test_init_image_worker_job( + simple_image_worker_job: ImageWorkerJob, +) -> None: + assert simple_image_worker_job is not None + assert simple_image_worker_job.generation is not None + assert simple_image_worker_job.generation_cls is ImageSingleGeneration + + +def test_init_worker_job_end_to_end_happy_path( + simple_image_worker_job: ImageWorkerJob, + default_testing_image_bytes: bytes, +) -> None: + from .test_horde_single_generations import TestHordeSingleGeneration + + TestHordeSingleGeneration() + + TestHordeSingleGeneration.run_generation_process( + generation=simple_image_worker_job.generation, + result=default_testing_image_bytes, + include_preloading=True, + include_generation=True, + include_post_processing=False, + include_safety_check=True, + ) + + +def test_image_worker_job_lifecycle_preserves_dispatch_result_ids( + simple_image_generation: ImageSingleGeneration, + default_testing_image_bytes: bytes, +) -> None: + from .test_horde_single_generations import TestHordeSingleGeneration + + dispatch_ids = ("dispatch-image-1", uuid4()) + job = ImageWorkerJob( + generation=simple_image_generation, + dispatch_result_ids=dispatch_ids, + ) + + TestHordeSingleGeneration() + + TestHordeSingleGeneration.run_generation_process( + generation=job.generation, + result=default_testing_image_bytes, + include_preloading=True, + include_generation=True, + include_post_processing=False, + include_safety_check=True, + ) + + assert job.generation.dispatch_result_ids == list(dispatch_ids) + + +def test_image_worker_job_lifecycle_uses_dispatch_job_id( + simple_image_generation: ImageSingleGeneration, + default_testing_image_bytes: bytes, +) -> None: + from .test_horde_single_generations import TestHordeSingleGeneration + + job = ImageWorkerJob( + generation=simple_image_generation, + dispatch_job_id="dispatch-image-job", + ) + + TestHordeSingleGeneration() + + TestHordeSingleGeneration.run_generation_process( + generation=job.generation, + result=default_testing_image_bytes, + include_preloading=True, + include_generation=True, + include_post_processing=False, + include_safety_check=True, + ) + + assert job.generation.dispatch_result_ids == ["dispatch-image-job"] + + +def test_image_job_abort_immediately( + simple_image_worker_job: ImageWorkerJob, +) -> None: + assert simple_image_worker_job is not None + assert simple_image_worker_job.generation is not None + assert simple_image_worker_job.generation_cls is ImageSingleGeneration + + simple_image_worker_job.set_job_faulted( + faulted_reason=WORKER_ERRORS.UNHANDLED_EXCEPTION, + failure_exception=Exception("Test faulted exception"), + ) + + +def test_image_job_abort_during_generation( + simple_image_worker_job: ImageWorkerJob, +) -> None: + assert simple_image_worker_job is not None + assert simple_image_worker_job.generation is not None + assert simple_image_worker_job.generation_cls is ImageSingleGeneration + + generation = simple_image_worker_job.generation + + assert generation.on_generating() == GENERATION_PROGRESS.GENERATING + + assert generation.on_abort( + failed_message=WORKER_ERRORS.UNHANDLED_EXCEPTION, + failure_exception=Exception("Test faulted exception"), + ) + + +@pytest.fixture(scope="function") +def simple_alchemy_job( + simple_alchemy_generation_parameters: AlchemyParameters, +) -> AlchemyWorkerJob: + assert len(simple_alchemy_generation_parameters.all_alchemy_operations) == 1 + alchemy_single_generation_parameters = simple_alchemy_generation_parameters.all_alchemy_operations[0] + return AlchemyWorkerJob( + generation=AlchemySingleGeneration( + generation_parameters=alchemy_single_generation_parameters, + ), + ) + + +def test_init_alchemy_job( + simple_alchemy_job: AlchemyWorkerJob, +) -> None: + assert simple_alchemy_job is not None + assert simple_alchemy_job.generation is not None + assert simple_alchemy_job.generation_cls is AlchemySingleGeneration + + +def test_init_alchemy_worker_job_end_to_end_happy_path( + simple_alchemy_job: AlchemyWorkerJob, + default_testing_image_bytes: bytes, +) -> None: + from .test_horde_single_generations import TestHordeSingleGeneration + + TestHordeSingleGeneration() + + TestHordeSingleGeneration.run_generation_process( + generation=simple_alchemy_job.generation, + result=default_testing_image_bytes, + include_preloading=True, + include_generation=False, + include_post_processing=True, + include_safety_check=False, + ) + + +def test_alchemy_worker_job_lifecycle_preserves_dispatch_result_ids( + simple_alchemy_generation: AlchemySingleGeneration, + default_testing_image_bytes: bytes, +) -> None: + from .test_horde_single_generations import TestHordeSingleGeneration + + dispatch_ids = ("dispatch-alchemy-1", uuid4()) + job = AlchemyWorkerJob( + generation=simple_alchemy_generation, + dispatch_result_ids=dispatch_ids, + ) + + TestHordeSingleGeneration() + + TestHordeSingleGeneration.run_generation_process( + generation=job.generation, + result=default_testing_image_bytes, + include_preloading=True, + include_generation=False, + include_post_processing=True, + include_safety_check=False, + ) + + assert job.generation.dispatch_result_ids == list(dispatch_ids) + + +def test_alchemy_worker_job_lifecycle_uses_dispatch_job_id( + simple_alchemy_generation: AlchemySingleGeneration, + default_testing_image_bytes: bytes, +) -> None: + from .test_horde_single_generations import TestHordeSingleGeneration + + job = AlchemyWorkerJob( + generation=simple_alchemy_generation, + dispatch_job_id="dispatch-alchemy-job", + ) + + TestHordeSingleGeneration() + + TestHordeSingleGeneration.run_generation_process( + generation=job.generation, + result=default_testing_image_bytes, + include_preloading=True, + include_generation=False, + include_post_processing=True, + include_safety_check=False, + ) + + assert job.generation.dispatch_result_ids == ["dispatch-alchemy-job"] + + +@pytest.fixture(scope="function") +def simple_text_job( + simple_text_generation: TextSingleGeneration, +) -> TextWorkerJob: + return TextWorkerJob( + generation=simple_text_generation, + ) + + +def test_init_text_job( + simple_text_job: TextWorkerJob, +) -> None: + assert simple_text_job is not None + assert simple_text_job.generation_cls is TextSingleGeneration + + +def test_init_text_worker_job_end_to_end_happy_path( + simple_text_job: TextWorkerJob, +) -> None: + from .test_horde_single_generations import TestHordeSingleGeneration + + TestHordeSingleGeneration() + + TestHordeSingleGeneration.run_generation_process( + generation=simple_text_job.generation, + result="Dummy result", + include_preloading=True, + include_generation=True, + include_post_processing=False, + include_safety_check=False, + ) + + +def test_text_worker_job_lifecycle_preserves_dispatch_result_ids( + simple_text_generation: TextSingleGeneration, +) -> None: + from .test_horde_single_generations import TestHordeSingleGeneration + + dispatch_ids = ("dispatch-text-1", uuid4()) + job = TextWorkerJob( + generation=simple_text_generation, + dispatch_result_ids=dispatch_ids, + ) + + TestHordeSingleGeneration() + + TestHordeSingleGeneration.run_generation_process( + generation=job.generation, + result="Dummy result", + include_preloading=True, + include_generation=True, + include_post_processing=False, + include_safety_check=False, + ) + + assert job.generation.dispatch_result_ids == list(dispatch_ids) + + +def test_text_worker_job_lifecycle_uses_dispatch_job_id( + simple_text_generation: TextSingleGeneration, +) -> None: + from .test_horde_single_generations import TestHordeSingleGeneration + + job = TextWorkerJob( + generation=simple_text_generation, + dispatch_job_id="dispatch-text-job", + ) + + TestHordeSingleGeneration() + + TestHordeSingleGeneration.run_generation_process( + generation=job.generation, + result="Dummy result", + include_preloading=True, + include_generation=True, + include_post_processing=False, + include_safety_check=False, + ) + + assert job.generation.dispatch_result_ids == ["dispatch-text-job"] + + +def test_image_worker_job_from_template_overrides_prompt() -> None: + template = ImageGenerationParametersTemplate( + base_params=BasicImageGenerationParametersTemplate(prompt="placeholder") + ) + job = ImageWorkerJob.from_template( + template, + base_param_updates=BasicImageGenerationParametersTemplate(prompt="generated", model="image-model"), + result_ids=("image-result",), + ) + + assert job.generation.generation_parameters.base_params.prompt == "generated" + assert job.generation.generation_parameters.base_params.model == "image-model" + assert job.generation.generation_parameters.result_ids == ["image-result"] + + +def test_text_worker_job_from_template_updates_prompt() -> None: + template = TextGenerationParametersTemplate( + base_params=BasicTextGenerationParametersTemplate( + prompt="base", + model="text-model", + ), + ) + job = TextWorkerJob.from_template( + template, + base_param_updates=BasicTextGenerationParametersTemplate(prompt="final"), + ) + + assert job.generation.generation_parameters.base_params.prompt == "final" + assert job.generation.generation_parameters.result_ids is not None + + +def test_text_worker_job_from_template_accepts_explicit_result_ids() -> None: + template = TextGenerationParametersTemplate( + base_params=BasicTextGenerationParametersTemplate( + prompt="seed", + model="text-model", + ), + ) + + job = TextWorkerJob.from_template( + template, + base_param_updates=BasicTextGenerationParametersTemplate(prompt="updated"), + result_ids=("text-template",), + ) + + result_ids = job.generation.generation_parameters.result_ids + assert result_ids == ["text-template"] + result_ids.append("mutated") + assert job.generation.generation_parameters.result_ids == result_ids + + +def test_text_worker_job_from_template_uses_allocator() -> None: + template = TextGenerationParametersTemplate( + base_params=BasicTextGenerationParametersTemplate( + prompt="allocator", + model="text-model", + ), + ) + allocator = ResultIdAllocator() + + first_job = TextWorkerJob.from_template(template, allocator=allocator, seed="text-seed") + second_job = TextWorkerJob.from_template(template, allocator=allocator, seed="text-seed") + + assert ( + first_job.generation.generation_parameters.result_ids == second_job.generation.generation_parameters.result_ids + ) + + +def test_alchemy_worker_job_from_template_sets_source_image() -> None: + template = UpscaleAlchemyParametersTemplate() + job = AlchemyWorkerJob.from_template( + template, + source_image=b"image-bytes", + default_form=KNOWN_ALCHEMY_FORMS.post_process, + ) + + assert job.generation.generation_parameters.source_image == b"image-bytes" + assert job.generation.generation_parameters.form == KNOWN_ALCHEMY_FORMS.post_process + + +def test_alchemy_worker_job_from_template_allocates_result_id_with_allocator() -> None: + template = UpscaleAlchemyParametersTemplate() + allocator = ResultIdAllocator() + + first_job = AlchemyWorkerJob.from_template( + template, + source_image=b"seed-image", + allocator=allocator, + seed="alchemy-seed", + ) + second_job = AlchemyWorkerJob.from_template( + template, + source_image=b"seed-image", + allocator=allocator, + seed="alchemy-seed", + ) + + first_id = first_job.generation.generation_parameters.result_id + assert first_id + assert first_job.generation.result_ids == [first_id] + assert first_id == second_job.generation.generation_parameters.result_id + + +def test_worker_job_from_template_preserves_generation_identifier_when_requested() -> None: + template = ImageGenerationParametersTemplate( + base_params=BasicImageGenerationParametersTemplate(prompt="placeholder") + ) + job = ImageWorkerJob.from_template( + template, + generation_id="generation-id", + job_id="job-id", + preserve_generation_id=True, + base_param_updates=BasicImageGenerationParametersTemplate(prompt="generated", model="image-model"), + ) + + assert job.job_id == "job-id" + assert job.generation.generation_id == "generation-id" + + +def test_worker_job_from_template_binds_generation_identifier_by_default() -> None: + template = ImageGenerationParametersTemplate( + base_params=BasicImageGenerationParametersTemplate(prompt="placeholder", model="image-model") + ) + job = ImageWorkerJob.from_template( + template, + job_id="job-id", + ) + + assert job.job_id == "job-id" + assert job.generation.generation_id == "job-id" + + +def test_worker_job_propagates_dispatch_result_ids( + simple_image_generation: ImageSingleGeneration, +) -> None: + dispatch_ids = ("result-1", uuid4()) + + job = ImageWorkerJob( + generation=simple_image_generation, + dispatch_result_ids=dispatch_ids, + ) + + # Generation receives the dispatch result identifiers and exposes copies on access. + assert job.generation.dispatch_result_ids == list(dispatch_ids) + + observed_ids = job.generation.dispatch_result_ids + assert observed_ids is not None + observed_ids.append("mutated") + + # Mutating the returned collection should not affect the internal dispatch bindings. + assert job.generation.dispatch_result_ids == list(dispatch_ids) + + +def test_worker_job_defaults_dispatch_result_ids_to_dispatch_job_id( + simple_image_generation: ImageSingleGeneration, +) -> None: + job = ImageWorkerJob( + generation=simple_image_generation, + dispatch_job_id="remote-job", + ) + + assert job.generation.dispatch_result_ids == ["remote-job"] + + +def test_text_worker_job_dispatch_result_ids( + simple_text_generation: TextSingleGeneration, +) -> None: + dispatch_ids = (uuid4(), "result-2") + + job = TextWorkerJob( + generation=simple_text_generation, + dispatch_result_ids=dispatch_ids, + ) + + assert job.generation.dispatch_result_ids == list(dispatch_ids) + + observed_ids = job.generation.dispatch_result_ids + assert observed_ids is not None + observed_ids.append("mutated") + + assert job.generation.dispatch_result_ids == list(dispatch_ids) + + +def test_text_worker_job_defaults_dispatch_result_ids_to_dispatch_job_id( + simple_text_generation: TextSingleGeneration, +) -> None: + job = TextWorkerJob( + generation=simple_text_generation, + dispatch_job_id="remote-text-job", + ) + + assert job.generation.dispatch_result_ids == ["remote-text-job"] + + +def test_image_worker_job_from_template_uses_allocator_for_result_ids() -> None: + template = ImageGenerationParametersTemplate( + base_params=BasicImageGenerationParametersTemplate(prompt="allocator prompt", model="image-model") + ) + template.batch_size = 2 + allocator = ResultIdAllocator() + + first_job = ImageWorkerJob.from_template(template, allocator=allocator, seed="image-seed") + second_job = ImageWorkerJob.from_template(template, allocator=allocator, seed="image-seed") + + first_ids = first_job.generation.generation_parameters.result_ids + assert len(first_ids) == 2 + assert first_ids == first_job.generation.result_ids + assert first_ids == second_job.generation.generation_parameters.result_ids + + +def test_worker_job_preserves_existing_dispatch_result_ids( + simple_image_generation: ImageSingleGeneration, +) -> None: + existing = ["pre-announced"] + simple_image_generation.set_dispatch_result_ids(existing) + + job = ImageWorkerJob( + generation=simple_image_generation, + dispatch_job_id="remote-job", + ) + + assert job.generation.dispatch_result_ids == existing + + +def test_alchemy_worker_job_dispatch_result_ids( + simple_alchemy_generation: AlchemySingleGeneration, +) -> None: + dispatch_ids = ("result-1", uuid4()) + + job = AlchemyWorkerJob( + generation=simple_alchemy_generation, + dispatch_result_ids=dispatch_ids, + ) + + assert job.generation.dispatch_result_ids == list(dispatch_ids) + + observed_ids = job.generation.dispatch_result_ids + assert observed_ids is not None + observed_ids.append("mutated") + + assert job.generation.dispatch_result_ids == list(dispatch_ids) + + +def test_alchemy_worker_job_defaults_dispatch_result_ids_to_dispatch_job_id( + simple_alchemy_generation: AlchemySingleGeneration, +) -> None: + job = AlchemyWorkerJob( + generation=simple_alchemy_generation, + dispatch_job_id="remote-alchemy-job", + ) + + assert job.generation.dispatch_result_ids == ["remote-alchemy-job"] diff --git a/uv.lock b/uv.lock new file mode 100644 index 00000000..4d738b6a --- /dev/null +++ b/uv.lock @@ -0,0 +1,3113 @@ +version = 1 +revision = 3 +requires-python = ">=3.12" +resolution-markers = [ + "python_full_version >= '3.14'", + "python_full_version < '3.14'", +] + +[[package]] +name = "aiodns" +version = "3.6.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "pycares" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/85/2f/9d1ee4f937addda60220f47925dac6c6b3782f6851fd578987284a8d2491/aiodns-3.6.1.tar.gz", hash = "sha256:b0e9ce98718a5b8f7ca8cd16fc393163374bc2412236b91f6c851d066e3324b6", size = 15143, upload-time = "2025-12-11T12:53:07.785Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/09/e3/9f777774ebe8f664bcd564f9de3936490a16effa82a969372161c9b0fb21/aiodns-3.6.1-py3-none-any.whl", hash = "sha256:46233ccad25f2037903828c5d05b64590eaa756e51d12b4a5616e2defcbc98c7", size = 7975, upload-time = "2025-12-11T12:53:06.387Z" }, +] + +[[package]] +name = "aiofiles" +version = "25.1.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/41/c3/534eac40372d8ee36ef40df62ec129bee4fdb5ad9706e58a29be53b2c970/aiofiles-25.1.0.tar.gz", hash = "sha256:a8d728f0a29de45dc521f18f07297428d56992a742f0cd2701ba86e44d23d5b2", size = 46354, upload-time = "2025-10-09T20:51:04.358Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/bc/8a/340a1555ae33d7354dbca4faa54948d76d89a27ceef032c8c3bc661d003e/aiofiles-25.1.0-py3-none-any.whl", hash = "sha256:abe311e527c862958650f9438e859c1fa7568a141b22abcd015e120e86a85695", size = 14668, upload-time = "2025-10-09T20:51:03.174Z" }, +] + +[[package]] +name = "aiohappyeyeballs" +version = "2.6.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/26/30/f84a107a9c4331c14b2b586036f40965c128aa4fee4dda5d3d51cb14ad54/aiohappyeyeballs-2.6.1.tar.gz", hash = "sha256:c3f9d0113123803ccadfdf3f0faa505bc78e6a72d1cc4806cbd719826e943558", size = 22760, upload-time = "2025-03-12T01:42:48.764Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/0f/15/5bf3b99495fb160b63f95972b81750f18f7f4e02ad051373b669d17d44f2/aiohappyeyeballs-2.6.1-py3-none-any.whl", hash = "sha256:f349ba8f4b75cb25c99c5c2d84e997e485204d2902a9597802b0371f09331fb8", size = 15265, upload-time = "2025-03-12T01:42:47.083Z" }, +] + +[[package]] +name = "aiohttp" +version = "3.13.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "aiohappyeyeballs" }, + { name = "aiosignal" }, + { name = "attrs" }, + { name = "frozenlist" }, + { name = "multidict" }, + { name = "propcache" }, + { name = "yarl" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/1c/ce/3b83ebba6b3207a7135e5fcaba49706f8a4b6008153b4e30540c982fae26/aiohttp-3.13.2.tar.gz", hash = "sha256:40176a52c186aefef6eb3cad2cdd30cd06e3afbe88fe8ab2af9c0b90f228daca", size = 7837994, upload-time = "2025-10-28T20:59:39.937Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/29/9b/01f00e9856d0a73260e86dd8ed0c2234a466c5c1712ce1c281548df39777/aiohttp-3.13.2-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:b1e56bab2e12b2b9ed300218c351ee2a3d8c8fdab5b1ec6193e11a817767e47b", size = 737623, upload-time = "2025-10-28T20:56:30.797Z" }, + { url = "https://files.pythonhosted.org/packages/5a/1b/4be39c445e2b2bd0aab4ba736deb649fabf14f6757f405f0c9685019b9e9/aiohttp-3.13.2-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:364e25edaabd3d37b1db1f0cbcee8c73c9a3727bfa262b83e5e4cf3489a2a9dc", size = 492664, upload-time = "2025-10-28T20:56:32.708Z" }, + { url = "https://files.pythonhosted.org/packages/28/66/d35dcfea8050e131cdd731dff36434390479b4045a8d0b9d7111b0a968f1/aiohttp-3.13.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:c5c94825f744694c4b8db20b71dba9a257cd2ba8e010a803042123f3a25d50d7", size = 491808, upload-time = "2025-10-28T20:56:34.57Z" }, + { url = "https://files.pythonhosted.org/packages/00/29/8e4609b93e10a853b65f8291e64985de66d4f5848c5637cddc70e98f01f8/aiohttp-3.13.2-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:ba2715d842ffa787be87cbfce150d5e88c87a98e0b62e0f5aa489169a393dbbb", size = 1738863, upload-time = "2025-10-28T20:56:36.377Z" }, + { url = "https://files.pythonhosted.org/packages/9d/fa/4ebdf4adcc0def75ced1a0d2d227577cd7b1b85beb7edad85fcc87693c75/aiohttp-3.13.2-cp312-cp312-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:585542825c4bc662221fb257889e011a5aa00f1ae4d75d1d246a5225289183e3", size = 1700586, upload-time = "2025-10-28T20:56:38.034Z" }, + { url = "https://files.pythonhosted.org/packages/da/04/73f5f02ff348a3558763ff6abe99c223381b0bace05cd4530a0258e52597/aiohttp-3.13.2-cp312-cp312-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:39d02cb6025fe1aabca329c5632f48c9532a3dabccd859e7e2f110668972331f", size = 1768625, upload-time = "2025-10-28T20:56:39.75Z" }, + { url = "https://files.pythonhosted.org/packages/f8/49/a825b79ffec124317265ca7d2344a86bcffeb960743487cb11988ffb3494/aiohttp-3.13.2-cp312-cp312-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:e67446b19e014d37342f7195f592a2a948141d15a312fe0e700c2fd2f03124f6", size = 1867281, upload-time = "2025-10-28T20:56:41.471Z" }, + { url = "https://files.pythonhosted.org/packages/b9/48/adf56e05f81eac31edcfae45c90928f4ad50ef2e3ea72cb8376162a368f8/aiohttp-3.13.2-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:4356474ad6333e41ccefd39eae869ba15a6c5299c9c01dfdcfdd5c107be4363e", size = 1752431, upload-time = "2025-10-28T20:56:43.162Z" }, + { url = "https://files.pythonhosted.org/packages/30/ab/593855356eead019a74e862f21523db09c27f12fd24af72dbc3555b9bfd9/aiohttp-3.13.2-cp312-cp312-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:eeacf451c99b4525f700f078becff32c32ec327b10dcf31306a8a52d78166de7", size = 1562846, upload-time = "2025-10-28T20:56:44.85Z" }, + { url = "https://files.pythonhosted.org/packages/39/0f/9f3d32271aa8dc35036e9668e31870a9d3b9542dd6b3e2c8a30931cb27ae/aiohttp-3.13.2-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:d8a9b889aeabd7a4e9af0b7f4ab5ad94d42e7ff679aaec6d0db21e3b639ad58d", size = 1699606, upload-time = "2025-10-28T20:56:46.519Z" }, + { url = "https://files.pythonhosted.org/packages/2c/3c/52d2658c5699b6ef7692a3f7128b2d2d4d9775f2a68093f74bca06cf01e1/aiohttp-3.13.2-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:fa89cb11bc71a63b69568d5b8a25c3ca25b6d54c15f907ca1c130d72f320b76b", size = 1720663, upload-time = "2025-10-28T20:56:48.528Z" }, + { url = "https://files.pythonhosted.org/packages/9b/d4/8f8f3ff1fb7fb9e3f04fcad4e89d8a1cd8fc7d05de67e3de5b15b33008ff/aiohttp-3.13.2-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:8aa7c807df234f693fed0ecd507192fc97692e61fee5702cdc11155d2e5cadc8", size = 1737939, upload-time = "2025-10-28T20:56:50.77Z" }, + { url = "https://files.pythonhosted.org/packages/03/d3/ddd348f8a27a634daae39a1b8e291ff19c77867af438af844bf8b7e3231b/aiohttp-3.13.2-cp312-cp312-musllinux_1_2_riscv64.whl", hash = "sha256:9eb3e33fdbe43f88c3c75fa608c25e7c47bbd80f48d012763cb67c47f39a7e16", size = 1555132, upload-time = "2025-10-28T20:56:52.568Z" }, + { url = "https://files.pythonhosted.org/packages/39/b8/46790692dc46218406f94374903ba47552f2f9f90dad554eed61bfb7b64c/aiohttp-3.13.2-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:9434bc0d80076138ea986833156c5a48c9c7a8abb0c96039ddbb4afc93184169", size = 1764802, upload-time = "2025-10-28T20:56:54.292Z" }, + { url = "https://files.pythonhosted.org/packages/ba/e4/19ce547b58ab2a385e5f0b8aa3db38674785085abcf79b6e0edd1632b12f/aiohttp-3.13.2-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:ff15c147b2ad66da1f2cbb0622313f2242d8e6e8f9b79b5206c84523a4473248", size = 1719512, upload-time = "2025-10-28T20:56:56.428Z" }, + { url = "https://files.pythonhosted.org/packages/70/30/6355a737fed29dcb6dfdd48682d5790cb5eab050f7b4e01f49b121d3acad/aiohttp-3.13.2-cp312-cp312-win32.whl", hash = "sha256:27e569eb9d9e95dbd55c0fc3ec3a9335defbf1d8bc1d20171a49f3c4c607b93e", size = 426690, upload-time = "2025-10-28T20:56:58.736Z" }, + { url = "https://files.pythonhosted.org/packages/0a/0d/b10ac09069973d112de6ef980c1f6bb31cb7dcd0bc363acbdad58f927873/aiohttp-3.13.2-cp312-cp312-win_amd64.whl", hash = "sha256:8709a0f05d59a71f33fd05c17fc11fcb8c30140506e13c2f5e8ee1b8964e1b45", size = 453465, upload-time = "2025-10-28T20:57:00.795Z" }, + { url = "https://files.pythonhosted.org/packages/bf/78/7e90ca79e5aa39f9694dcfd74f4720782d3c6828113bb1f3197f7e7c4a56/aiohttp-3.13.2-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:7519bdc7dfc1940d201651b52bf5e03f5503bda45ad6eacf64dda98be5b2b6be", size = 732139, upload-time = "2025-10-28T20:57:02.455Z" }, + { url = "https://files.pythonhosted.org/packages/db/ed/1f59215ab6853fbaa5c8495fa6cbc39edfc93553426152b75d82a5f32b76/aiohttp-3.13.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:088912a78b4d4f547a1f19c099d5a506df17eacec3c6f4375e2831ec1d995742", size = 490082, upload-time = "2025-10-28T20:57:04.784Z" }, + { url = "https://files.pythonhosted.org/packages/68/7b/fe0fe0f5e05e13629d893c760465173a15ad0039c0a5b0d0040995c8075e/aiohttp-3.13.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:5276807b9de9092af38ed23ce120539ab0ac955547b38563a9ba4f5b07b95293", size = 489035, upload-time = "2025-10-28T20:57:06.894Z" }, + { url = "https://files.pythonhosted.org/packages/d2/04/db5279e38471b7ac801d7d36a57d1230feeee130bbe2a74f72731b23c2b1/aiohttp-3.13.2-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:1237c1375eaef0db4dcd7c2559f42e8af7b87ea7d295b118c60c36a6e61cb811", size = 1720387, upload-time = "2025-10-28T20:57:08.685Z" }, + { url = "https://files.pythonhosted.org/packages/31/07/8ea4326bd7dae2bd59828f69d7fdc6e04523caa55e4a70f4a8725a7e4ed2/aiohttp-3.13.2-cp313-cp313-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:96581619c57419c3d7d78703d5b78c1e5e5fc0172d60f555bdebaced82ded19a", size = 1688314, upload-time = "2025-10-28T20:57:10.693Z" }, + { url = "https://files.pythonhosted.org/packages/48/ab/3d98007b5b87ffd519d065225438cc3b668b2f245572a8cb53da5dd2b1bc/aiohttp-3.13.2-cp313-cp313-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:a2713a95b47374169409d18103366de1050fe0ea73db358fc7a7acb2880422d4", size = 1756317, upload-time = "2025-10-28T20:57:12.563Z" }, + { url = "https://files.pythonhosted.org/packages/97/3d/801ca172b3d857fafb7b50c7c03f91b72b867a13abca982ed6b3081774ef/aiohttp-3.13.2-cp313-cp313-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:228a1cd556b3caca590e9511a89444925da87d35219a49ab5da0c36d2d943a6a", size = 1858539, upload-time = "2025-10-28T20:57:14.623Z" }, + { url = "https://files.pythonhosted.org/packages/f7/0d/4764669bdf47bd472899b3d3db91fffbe925c8e3038ec591a2fd2ad6a14d/aiohttp-3.13.2-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:ac6cde5fba8d7d8c6ac963dbb0256a9854e9fafff52fbcc58fdf819357892c3e", size = 1739597, upload-time = "2025-10-28T20:57:16.399Z" }, + { url = "https://files.pythonhosted.org/packages/c4/52/7bd3c6693da58ba16e657eb904a5b6decfc48ecd06e9ac098591653b1566/aiohttp-3.13.2-cp313-cp313-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:f2bef8237544f4e42878c61cef4e2839fee6346dc60f5739f876a9c50be7fcdb", size = 1555006, upload-time = "2025-10-28T20:57:18.288Z" }, + { url = "https://files.pythonhosted.org/packages/48/30/9586667acec5993b6f41d2ebcf96e97a1255a85f62f3c653110a5de4d346/aiohttp-3.13.2-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:16f15a4eac3bc2d76c45f7ebdd48a65d41b242eb6c31c2245463b40b34584ded", size = 1683220, upload-time = "2025-10-28T20:57:20.241Z" }, + { url = "https://files.pythonhosted.org/packages/71/01/3afe4c96854cfd7b30d78333852e8e851dceaec1c40fd00fec90c6402dd2/aiohttp-3.13.2-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:bb7fb776645af5cc58ab804c58d7eba545a97e047254a52ce89c157b5af6cd0b", size = 1712570, upload-time = "2025-10-28T20:57:22.253Z" }, + { url = "https://files.pythonhosted.org/packages/11/2c/22799d8e720f4697a9e66fd9c02479e40a49de3de2f0bbe7f9f78a987808/aiohttp-3.13.2-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:e1b4951125ec10c70802f2cb09736c895861cd39fd9dcb35107b4dc8ae6220b8", size = 1733407, upload-time = "2025-10-28T20:57:24.37Z" }, + { url = "https://files.pythonhosted.org/packages/34/cb/90f15dd029f07cebbd91f8238a8b363978b530cd128488085b5703683594/aiohttp-3.13.2-cp313-cp313-musllinux_1_2_riscv64.whl", hash = "sha256:550bf765101ae721ee1d37d8095f47b1f220650f85fe1af37a90ce75bab89d04", size = 1550093, upload-time = "2025-10-28T20:57:26.257Z" }, + { url = "https://files.pythonhosted.org/packages/69/46/12dce9be9d3303ecbf4d30ad45a7683dc63d90733c2d9fe512be6716cd40/aiohttp-3.13.2-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:fe91b87fc295973096251e2d25a811388e7d8adf3bd2b97ef6ae78bc4ac6c476", size = 1758084, upload-time = "2025-10-28T20:57:28.349Z" }, + { url = "https://files.pythonhosted.org/packages/f9/c8/0932b558da0c302ffd639fc6362a313b98fdf235dc417bc2493da8394df7/aiohttp-3.13.2-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:e0c8e31cfcc4592cb200160344b2fb6ae0f9e4effe06c644b5a125d4ae5ebe23", size = 1716987, upload-time = "2025-10-28T20:57:30.233Z" }, + { url = "https://files.pythonhosted.org/packages/5d/8b/f5bd1a75003daed099baec373aed678f2e9b34f2ad40d85baa1368556396/aiohttp-3.13.2-cp313-cp313-win32.whl", hash = "sha256:0740f31a60848d6edb296a0df827473eede90c689b8f9f2a4cdde74889eb2254", size = 425859, upload-time = "2025-10-28T20:57:32.105Z" }, + { url = "https://files.pythonhosted.org/packages/5d/28/a8a9fc6957b2cee8902414e41816b5ab5536ecf43c3b1843c10e82c559b2/aiohttp-3.13.2-cp313-cp313-win_amd64.whl", hash = "sha256:a88d13e7ca367394908f8a276b89d04a3652044612b9a408a0bb22a5ed976a1a", size = 452192, upload-time = "2025-10-28T20:57:34.166Z" }, + { url = "https://files.pythonhosted.org/packages/9b/36/e2abae1bd815f01c957cbf7be817b3043304e1c87bad526292a0410fdcf9/aiohttp-3.13.2-cp314-cp314-macosx_10_13_universal2.whl", hash = "sha256:2475391c29230e063ef53a66669b7b691c9bfc3f1426a0f7bcdf1216bdbac38b", size = 735234, upload-time = "2025-10-28T20:57:36.415Z" }, + { url = "https://files.pythonhosted.org/packages/ca/e3/1ee62dde9b335e4ed41db6bba02613295a0d5b41f74a783c142745a12763/aiohttp-3.13.2-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:f33c8748abef4d8717bb20e8fb1b3e07c6adacb7fd6beaae971a764cf5f30d61", size = 490733, upload-time = "2025-10-28T20:57:38.205Z" }, + { url = "https://files.pythonhosted.org/packages/1a/aa/7a451b1d6a04e8d15a362af3e9b897de71d86feac3babf8894545d08d537/aiohttp-3.13.2-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:ae32f24bbfb7dbb485a24b30b1149e2f200be94777232aeadba3eecece4d0aa4", size = 491303, upload-time = "2025-10-28T20:57:40.122Z" }, + { url = "https://files.pythonhosted.org/packages/57/1e/209958dbb9b01174870f6a7538cd1f3f28274fdbc88a750c238e2c456295/aiohttp-3.13.2-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:5d7f02042c1f009ffb70067326ef183a047425bb2ff3bc434ead4dd4a4a66a2b", size = 1717965, upload-time = "2025-10-28T20:57:42.28Z" }, + { url = "https://files.pythonhosted.org/packages/08/aa/6a01848d6432f241416bc4866cae8dc03f05a5a884d2311280f6a09c73d6/aiohttp-3.13.2-cp314-cp314-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:93655083005d71cd6c072cdab54c886e6570ad2c4592139c3fb967bfc19e4694", size = 1667221, upload-time = "2025-10-28T20:57:44.869Z" }, + { url = "https://files.pythonhosted.org/packages/87/4f/36c1992432d31bbc789fa0b93c768d2e9047ec8c7177e5cd84ea85155f36/aiohttp-3.13.2-cp314-cp314-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:0db1e24b852f5f664cd728db140cf11ea0e82450471232a394b3d1a540b0f906", size = 1757178, upload-time = "2025-10-28T20:57:47.216Z" }, + { url = "https://files.pythonhosted.org/packages/ac/b4/8e940dfb03b7e0f68a82b88fd182b9be0a65cb3f35612fe38c038c3112cf/aiohttp-3.13.2-cp314-cp314-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:b009194665bcd128e23eaddef362e745601afa4641930848af4c8559e88f18f9", size = 1838001, upload-time = "2025-10-28T20:57:49.337Z" }, + { url = "https://files.pythonhosted.org/packages/d7/ef/39f3448795499c440ab66084a9db7d20ca7662e94305f175a80f5b7e0072/aiohttp-3.13.2-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:c038a8fdc8103cd51dbd986ecdce141473ffd9775a7a8057a6ed9c3653478011", size = 1716325, upload-time = "2025-10-28T20:57:51.327Z" }, + { url = "https://files.pythonhosted.org/packages/d7/51/b311500ffc860b181c05d91c59a1313bdd05c82960fdd4035a15740d431e/aiohttp-3.13.2-cp314-cp314-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:66bac29b95a00db411cd758fea0e4b9bdba6d549dfe333f9a945430f5f2cc5a6", size = 1547978, upload-time = "2025-10-28T20:57:53.554Z" }, + { url = "https://files.pythonhosted.org/packages/31/64/b9d733296ef79815226dab8c586ff9e3df41c6aff2e16c06697b2d2e6775/aiohttp-3.13.2-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:4ebf9cfc9ba24a74cf0718f04aac2a3bbe745902cc7c5ebc55c0f3b5777ef213", size = 1682042, upload-time = "2025-10-28T20:57:55.617Z" }, + { url = "https://files.pythonhosted.org/packages/3f/30/43d3e0f9d6473a6db7d472104c4eff4417b1e9df01774cb930338806d36b/aiohttp-3.13.2-cp314-cp314-musllinux_1_2_armv7l.whl", hash = "sha256:a4b88ebe35ce54205c7074f7302bd08a4cb83256a3e0870c72d6f68a3aaf8e49", size = 1680085, upload-time = "2025-10-28T20:57:57.59Z" }, + { url = "https://files.pythonhosted.org/packages/16/51/c709f352c911b1864cfd1087577760ced64b3e5bee2aa88b8c0c8e2e4972/aiohttp-3.13.2-cp314-cp314-musllinux_1_2_ppc64le.whl", hash = "sha256:98c4fb90bb82b70a4ed79ca35f656f4281885be076f3f970ce315402b53099ae", size = 1728238, upload-time = "2025-10-28T20:57:59.525Z" }, + { url = "https://files.pythonhosted.org/packages/19/e2/19bd4c547092b773caeb48ff5ae4b1ae86756a0ee76c16727fcfd281404b/aiohttp-3.13.2-cp314-cp314-musllinux_1_2_riscv64.whl", hash = "sha256:ec7534e63ae0f3759df3a1ed4fa6bc8f75082a924b590619c0dd2f76d7043caa", size = 1544395, upload-time = "2025-10-28T20:58:01.914Z" }, + { url = "https://files.pythonhosted.org/packages/cf/87/860f2803b27dfc5ed7be532832a3498e4919da61299b4a1f8eb89b8ff44d/aiohttp-3.13.2-cp314-cp314-musllinux_1_2_s390x.whl", hash = "sha256:5b927cf9b935a13e33644cbed6c8c4b2d0f25b713d838743f8fe7191b33829c4", size = 1742965, upload-time = "2025-10-28T20:58:03.972Z" }, + { url = "https://files.pythonhosted.org/packages/67/7f/db2fc7618925e8c7a601094d5cbe539f732df4fb570740be88ed9e40e99a/aiohttp-3.13.2-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:88d6c017966a78c5265d996c19cdb79235be5e6412268d7e2ce7dee339471b7a", size = 1697585, upload-time = "2025-10-28T20:58:06.189Z" }, + { url = "https://files.pythonhosted.org/packages/0c/07/9127916cb09bb38284db5036036042b7b2c514c8ebaeee79da550c43a6d6/aiohttp-3.13.2-cp314-cp314-win32.whl", hash = "sha256:f7c183e786e299b5d6c49fb43a769f8eb8e04a2726a2bd5887b98b5cc2d67940", size = 431621, upload-time = "2025-10-28T20:58:08.636Z" }, + { url = "https://files.pythonhosted.org/packages/fb/41/554a8a380df6d3a2bba8a7726429a23f4ac62aaf38de43bb6d6cde7b4d4d/aiohttp-3.13.2-cp314-cp314-win_amd64.whl", hash = "sha256:fe242cd381e0fb65758faf5ad96c2e460df6ee5b2de1072fe97e4127927e00b4", size = 457627, upload-time = "2025-10-28T20:58:11Z" }, + { url = "https://files.pythonhosted.org/packages/c7/8e/3824ef98c039d3951cb65b9205a96dd2b20f22241ee17d89c5701557c826/aiohttp-3.13.2-cp314-cp314t-macosx_10_13_universal2.whl", hash = "sha256:f10d9c0b0188fe85398c61147bbd2a657d616c876863bfeff43376e0e3134673", size = 767360, upload-time = "2025-10-28T20:58:13.358Z" }, + { url = "https://files.pythonhosted.org/packages/a4/0f/6a03e3fc7595421274fa34122c973bde2d89344f8a881b728fa8c774e4f1/aiohttp-3.13.2-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:e7c952aefdf2460f4ae55c5e9c3e80aa72f706a6317e06020f80e96253b1accd", size = 504616, upload-time = "2025-10-28T20:58:15.339Z" }, + { url = "https://files.pythonhosted.org/packages/c6/aa/ed341b670f1bc8a6f2c6a718353d13b9546e2cef3544f573c6a1ff0da711/aiohttp-3.13.2-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:c20423ce14771d98353d2e25e83591fa75dfa90a3c1848f3d7c68243b4fbded3", size = 509131, upload-time = "2025-10-28T20:58:17.693Z" }, + { url = "https://files.pythonhosted.org/packages/7f/f0/c68dac234189dae5c4bbccc0f96ce0cc16b76632cfc3a08fff180045cfa4/aiohttp-3.13.2-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:e96eb1a34396e9430c19d8338d2ec33015e4a87ef2b4449db94c22412e25ccdf", size = 1864168, upload-time = "2025-10-28T20:58:20.113Z" }, + { url = "https://files.pythonhosted.org/packages/8f/65/75a9a76db8364b5d0e52a0c20eabc5d52297385d9af9c35335b924fafdee/aiohttp-3.13.2-cp314-cp314t-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:23fb0783bc1a33640036465019d3bba069942616a6a2353c6907d7fe1ccdaf4e", size = 1719200, upload-time = "2025-10-28T20:58:22.583Z" }, + { url = "https://files.pythonhosted.org/packages/f5/55/8df2ed78d7f41d232f6bd3ff866b6f617026551aa1d07e2f03458f964575/aiohttp-3.13.2-cp314-cp314t-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:2e1a9bea6244a1d05a4e57c295d69e159a5c50d8ef16aa390948ee873478d9a5", size = 1843497, upload-time = "2025-10-28T20:58:24.672Z" }, + { url = "https://files.pythonhosted.org/packages/e9/e0/94d7215e405c5a02ccb6a35c7a3a6cfff242f457a00196496935f700cde5/aiohttp-3.13.2-cp314-cp314t-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:0a3d54e822688b56e9f6b5816fb3de3a3a64660efac64e4c2dc435230ad23bad", size = 1935703, upload-time = "2025-10-28T20:58:26.758Z" }, + { url = "https://files.pythonhosted.org/packages/0b/78/1eeb63c3f9b2d1015a4c02788fb543141aad0a03ae3f7a7b669b2483f8d4/aiohttp-3.13.2-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:7a653d872afe9f33497215745da7a943d1dc15b728a9c8da1c3ac423af35178e", size = 1792738, upload-time = "2025-10-28T20:58:29.787Z" }, + { url = "https://files.pythonhosted.org/packages/41/75/aaf1eea4c188e51538c04cc568040e3082db263a57086ea74a7d38c39e42/aiohttp-3.13.2-cp314-cp314t-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:56d36e80d2003fa3fc0207fac644216d8532e9504a785ef9a8fd013f84a42c61", size = 1624061, upload-time = "2025-10-28T20:58:32.529Z" }, + { url = "https://files.pythonhosted.org/packages/9b/c2/3b6034de81fbcc43de8aeb209073a2286dfb50b86e927b4efd81cf848197/aiohttp-3.13.2-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:78cd586d8331fb8e241c2dd6b2f4061778cc69e150514b39a9e28dd050475661", size = 1789201, upload-time = "2025-10-28T20:58:34.618Z" }, + { url = "https://files.pythonhosted.org/packages/c9/38/c15dcf6d4d890217dae79d7213988f4e5fe6183d43893a9cf2fe9e84ca8d/aiohttp-3.13.2-cp314-cp314t-musllinux_1_2_armv7l.whl", hash = "sha256:20b10bbfbff766294fe99987f7bb3b74fdd2f1a2905f2562132641ad434dcf98", size = 1776868, upload-time = "2025-10-28T20:58:38.835Z" }, + { url = "https://files.pythonhosted.org/packages/04/75/f74fd178ac81adf4f283a74847807ade5150e48feda6aef024403716c30c/aiohttp-3.13.2-cp314-cp314t-musllinux_1_2_ppc64le.whl", hash = "sha256:9ec49dff7e2b3c85cdeaa412e9d438f0ecd71676fde61ec57027dd392f00c693", size = 1790660, upload-time = "2025-10-28T20:58:41.507Z" }, + { url = "https://files.pythonhosted.org/packages/e7/80/7368bd0d06b16b3aba358c16b919e9c46cf11587dc572091031b0e9e3ef0/aiohttp-3.13.2-cp314-cp314t-musllinux_1_2_riscv64.whl", hash = "sha256:94f05348c4406450f9d73d38efb41d669ad6cd90c7ee194810d0eefbfa875a7a", size = 1617548, upload-time = "2025-10-28T20:58:43.674Z" }, + { url = "https://files.pythonhosted.org/packages/7d/4b/a6212790c50483cb3212e507378fbe26b5086d73941e1ec4b56a30439688/aiohttp-3.13.2-cp314-cp314t-musllinux_1_2_s390x.whl", hash = "sha256:fa4dcb605c6f82a80c7f95713c2b11c3b8e9893b3ebd2bc9bde93165ed6107be", size = 1817240, upload-time = "2025-10-28T20:58:45.787Z" }, + { url = "https://files.pythonhosted.org/packages/ff/f7/ba5f0ba4ea8d8f3c32850912944532b933acbf0f3a75546b89269b9b7dde/aiohttp-3.13.2-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:cf00e5db968c3f67eccd2778574cf64d8b27d95b237770aa32400bd7a1ca4f6c", size = 1762334, upload-time = "2025-10-28T20:58:47.936Z" }, + { url = "https://files.pythonhosted.org/packages/7e/83/1a5a1856574588b1cad63609ea9ad75b32a8353ac995d830bf5da9357364/aiohttp-3.13.2-cp314-cp314t-win32.whl", hash = "sha256:d23b5fe492b0805a50d3371e8a728a9134d8de5447dce4c885f5587294750734", size = 464685, upload-time = "2025-10-28T20:58:50.642Z" }, + { url = "https://files.pythonhosted.org/packages/9f/4d/d22668674122c08f4d56972297c51a624e64b3ed1efaa40187607a7cb66e/aiohttp-3.13.2-cp314-cp314t-win_amd64.whl", hash = "sha256:ff0a7b0a82a7ab905cbda74006318d1b12e37c797eb1b0d4eb3e316cf47f658f", size = 498093, upload-time = "2025-10-28T20:58:52.782Z" }, +] + +[[package]] +name = "aiosignal" +version = "1.4.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "frozenlist" }, + { name = "typing-extensions", marker = "python_full_version < '3.13'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/61/62/06741b579156360248d1ec624842ad0edf697050bbaf7c3e46394e106ad1/aiosignal-1.4.0.tar.gz", hash = "sha256:f47eecd9468083c2029cc99945502cb7708b082c232f9aca65da147157b251c7", size = 25007, upload-time = "2025-07-03T22:54:43.528Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/fb/76/641ae371508676492379f16e2fa48f4e2c11741bd63c48be4b12a6b09cba/aiosignal-1.4.0-py3-none-any.whl", hash = "sha256:053243f8b92b990551949e63930a839ff0cf0b0ebbe0597b0f3fb19e1a0fe82e", size = 7490, upload-time = "2025-07-03T22:54:42.156Z" }, +] + +[[package]] +name = "annotated-types" +version = "0.7.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/ee/67/531ea369ba64dcff5ec9c3402f9f51bf748cec26dde048a2f973a4eea7f5/annotated_types-0.7.0.tar.gz", hash = "sha256:aff07c09a53a08bc8cfccb9c85b05f1aa9a2a6f23728d790723543408344ce89", size = 16081, upload-time = "2024-05-20T21:33:25.928Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/78/b6/6307fbef88d9b5ee7421e68d78a9f162e0da4900bc5f5793f6d3d0e34fb8/annotated_types-0.7.0-py3-none-any.whl", hash = "sha256:1f02e8b43a8fbbc3f3e0d4f0f4bfc8131bcb4eebe8849b8e5c773f3a1c582a53", size = 13643, upload-time = "2024-05-20T21:33:24.1Z" }, +] + +[[package]] +name = "anyio" +version = "4.12.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "idna" }, + { name = "typing-extensions", marker = "python_full_version < '3.13'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/16/ce/8a777047513153587e5434fd752e89334ac33e379aa3497db860eeb60377/anyio-4.12.0.tar.gz", hash = "sha256:73c693b567b0c55130c104d0b43a9baf3aa6a31fc6110116509f27bf75e21ec0", size = 228266, upload-time = "2025-11-28T23:37:38.911Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/7f/9c/36c5c37947ebfb8c7f22e0eb6e4d188ee2d53aa3880f3f2744fb894f0cb1/anyio-4.12.0-py3-none-any.whl", hash = "sha256:dad2376a628f98eeca4881fc56cd06affd18f659b17a747d3ff0307ced94b1bb", size = 113362, upload-time = "2025-11-28T23:36:57.897Z" }, +] + +[[package]] +name = "argcomplete" +version = "3.6.3" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/38/61/0b9ae6399dd4a58d8c1b1dc5a27d6f2808023d0b5dd3104bb99f45a33ff6/argcomplete-3.6.3.tar.gz", hash = "sha256:62e8ed4fd6a45864acc8235409461b72c9a28ee785a2011cc5eb78318786c89c", size = 73754, upload-time = "2025-10-20T03:33:34.741Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/74/f5/9373290775639cb67a2fce7f629a1c240dce9f12fe927bc32b2736e16dfc/argcomplete-3.6.3-py3-none-any.whl", hash = "sha256:f5007b3a600ccac5d25bbce33089211dfd49eab4a7718da3f10e3082525a92ce", size = 43846, upload-time = "2025-10-20T03:33:33.021Z" }, +] + +[[package]] +name = "attrs" +version = "25.4.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/6b/5c/685e6633917e101e5dcb62b9dd76946cbb57c26e133bae9e0cd36033c0a9/attrs-25.4.0.tar.gz", hash = "sha256:16d5969b87f0859ef33a48b35d55ac1be6e42ae49d5e853b597db70c35c57e11", size = 934251, upload-time = "2025-10-06T13:54:44.725Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/3a/2a/7cc015f5b9f5db42b7d48157e23356022889fc354a2813c15934b7cb5c0e/attrs-25.4.0-py3-none-any.whl", hash = "sha256:adcf7e2a1fb3b36ac48d97835bb6d8ade15b8dcce26aba8bf1d14847b57a3373", size = 67615, upload-time = "2025-10-06T13:54:43.17Z" }, +] + +[[package]] +name = "babel" +version = "2.17.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/7d/6b/d52e42361e1aa00709585ecc30b3f9684b3ab62530771402248b1b1d6240/babel-2.17.0.tar.gz", hash = "sha256:0c54cffb19f690cdcc52a3b50bcbf71e07a808d1c80d549f2459b9d2cf0afb9d", size = 9951852, upload-time = "2025-02-01T15:17:41.026Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/b7/b8/3fe70c75fe32afc4bb507f75563d39bc5642255d1d94f1f23604725780bf/babel-2.17.0-py3-none-any.whl", hash = "sha256:4d0b53093fdfb4b21c92b5213dba5a1b23885afa8383709427046b21c366e5f2", size = 10182537, upload-time = "2025-02-01T15:17:37.39Z" }, +] + +[[package]] +name = "backrefs" +version = "6.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/86/e3/bb3a439d5cb255c4774724810ad8073830fac9c9dee123555820c1bcc806/backrefs-6.1.tar.gz", hash = "sha256:3bba1749aafe1db9b915f00e0dd166cba613b6f788ffd63060ac3485dc9be231", size = 7011962, upload-time = "2025-11-15T14:52:08.323Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/3b/ee/c216d52f58ea75b5e1841022bbae24438b19834a29b163cb32aa3a2a7c6e/backrefs-6.1-py310-none-any.whl", hash = "sha256:2a2ccb96302337ce61ee4717ceacfbf26ba4efb1d55af86564b8bbaeda39cac1", size = 381059, upload-time = "2025-11-15T14:51:59.758Z" }, + { url = "https://files.pythonhosted.org/packages/e6/9a/8da246d988ded941da96c7ed945d63e94a445637eaad985a0ed88787cb89/backrefs-6.1-py311-none-any.whl", hash = "sha256:e82bba3875ee4430f4de4b6db19429a27275d95a5f3773c57e9e18abc23fd2b7", size = 392854, upload-time = "2025-11-15T14:52:01.194Z" }, + { url = "https://files.pythonhosted.org/packages/37/c9/fd117a6f9300c62bbc33bc337fd2b3c6bfe28b6e9701de336b52d7a797ad/backrefs-6.1-py312-none-any.whl", hash = "sha256:c64698c8d2269343d88947c0735cb4b78745bd3ba590e10313fbf3f78c34da5a", size = 398770, upload-time = "2025-11-15T14:52:02.584Z" }, + { url = "https://files.pythonhosted.org/packages/eb/95/7118e935b0b0bd3f94dfec2d852fd4e4f4f9757bdb49850519acd245cd3a/backrefs-6.1-py313-none-any.whl", hash = "sha256:4c9d3dc1e2e558965202c012304f33d4e0e477e1c103663fd2c3cc9bb18b0d05", size = 400726, upload-time = "2025-11-15T14:52:04.093Z" }, + { url = "https://files.pythonhosted.org/packages/1d/72/6296bad135bfafd3254ae3648cd152980a424bd6fed64a101af00cc7ba31/backrefs-6.1-py314-none-any.whl", hash = "sha256:13eafbc9ccd5222e9c1f0bec563e6d2a6d21514962f11e7fc79872fd56cbc853", size = 412584, upload-time = "2025-11-15T14:52:05.233Z" }, + { url = "https://files.pythonhosted.org/packages/02/e3/a4fa1946722c4c7b063cc25043a12d9ce9b4323777f89643be74cef2993c/backrefs-6.1-py39-none-any.whl", hash = "sha256:a9e99b8a4867852cad177a6430e31b0f6e495d65f8c6c134b68c14c3c95bf4b0", size = 381058, upload-time = "2025-11-15T14:52:06.698Z" }, +] + +[[package]] +name = "beautifulsoup4" +version = "4.14.3" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "soupsieve" }, + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/c3/b0/1c6a16426d389813b48d95e26898aff79abbde42ad353958ad95cc8c9b21/beautifulsoup4-4.14.3.tar.gz", hash = "sha256:6292b1c5186d356bba669ef9f7f051757099565ad9ada5dd630bd9de5fa7fb86", size = 627737, upload-time = "2025-11-30T15:08:26.084Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/1a/39/47f9197bdd44df24d67ac8893641e16f386c984a0619ef2ee4c51fbbc019/beautifulsoup4-4.14.3-py3-none-any.whl", hash = "sha256:0918bfe44902e6ad8d57732ba310582e98da931428d231a5ecb9e7c703a735bb", size = 107721, upload-time = "2025-11-30T15:08:24.087Z" }, +] + +[[package]] +name = "black" +version = "25.12.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "click" }, + { name = "mypy-extensions" }, + { name = "packaging" }, + { name = "pathspec" }, + { name = "platformdirs" }, + { name = "pytokens" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/c4/d9/07b458a3f1c525ac392b5edc6b191ff140b596f9d77092429417a54e249d/black-25.12.0.tar.gz", hash = "sha256:8d3dd9cea14bff7ddc0eb243c811cdb1a011ebb4800a5f0335a01a68654796a7", size = 659264, upload-time = "2025-12-08T01:40:52.501Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/d1/bd/26083f805115db17fda9877b3c7321d08c647df39d0df4c4ca8f8450593e/black-25.12.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:31f96b7c98c1ddaeb07dc0f56c652e25bdedaac76d5b68a059d998b57c55594a", size = 1924178, upload-time = "2025-12-08T01:49:51.048Z" }, + { url = "https://files.pythonhosted.org/packages/89/6b/ea00d6651561e2bdd9231c4177f4f2ae19cc13a0b0574f47602a7519b6ca/black-25.12.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:05dd459a19e218078a1f98178c13f861fe6a9a5f88fc969ca4d9b49eb1809783", size = 1742643, upload-time = "2025-12-08T01:49:59.09Z" }, + { url = "https://files.pythonhosted.org/packages/6d/f3/360fa4182e36e9875fabcf3a9717db9d27a8d11870f21cff97725c54f35b/black-25.12.0-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:c1f68c5eff61f226934be6b5b80296cf6939e5d2f0c2f7d543ea08b204bfaf59", size = 1800158, upload-time = "2025-12-08T01:44:27.301Z" }, + { url = "https://files.pythonhosted.org/packages/f8/08/2c64830cb6616278067e040acca21d4f79727b23077633953081c9445d61/black-25.12.0-cp312-cp312-win_amd64.whl", hash = "sha256:274f940c147ddab4442d316b27f9e332ca586d39c85ecf59ebdea82cc9ee8892", size = 1426197, upload-time = "2025-12-08T01:45:51.198Z" }, + { url = "https://files.pythonhosted.org/packages/d4/60/a93f55fd9b9816b7432cf6842f0e3000fdd5b7869492a04b9011a133ee37/black-25.12.0-cp312-cp312-win_arm64.whl", hash = "sha256:169506ba91ef21e2e0591563deda7f00030cb466e747c4b09cb0a9dae5db2f43", size = 1237266, upload-time = "2025-12-08T01:45:10.556Z" }, + { url = "https://files.pythonhosted.org/packages/c8/52/c551e36bc95495d2aa1a37d50566267aa47608c81a53f91daa809e03293f/black-25.12.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:a05ddeb656534c3e27a05a29196c962877c83fa5503db89e68857d1161ad08a5", size = 1923809, upload-time = "2025-12-08T01:46:55.126Z" }, + { url = "https://files.pythonhosted.org/packages/a0/f7/aac9b014140ee56d247e707af8db0aae2e9efc28d4a8aba92d0abd7ae9d1/black-25.12.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:9ec77439ef3e34896995503865a85732c94396edcc739f302c5673a2315e1e7f", size = 1742384, upload-time = "2025-12-08T01:49:37.022Z" }, + { url = "https://files.pythonhosted.org/packages/74/98/38aaa018b2ab06a863974c12b14a6266badc192b20603a81b738c47e902e/black-25.12.0-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:0e509c858adf63aa61d908061b52e580c40eae0dfa72415fa47ac01b12e29baf", size = 1798761, upload-time = "2025-12-08T01:46:05.386Z" }, + { url = "https://files.pythonhosted.org/packages/16/3a/a8ac542125f61574a3f015b521ca83b47321ed19bb63fe6d7560f348bfe1/black-25.12.0-cp313-cp313-win_amd64.whl", hash = "sha256:252678f07f5bac4ff0d0e9b261fbb029fa530cfa206d0a636a34ab445ef8ca9d", size = 1429180, upload-time = "2025-12-08T01:45:34.903Z" }, + { url = "https://files.pythonhosted.org/packages/e6/2d/bdc466a3db9145e946762d52cd55b1385509d9f9004fec1c97bdc8debbfb/black-25.12.0-cp313-cp313-win_arm64.whl", hash = "sha256:bc5b1c09fe3c931ddd20ee548511c64ebf964ada7e6f0763d443947fd1c603ce", size = 1239350, upload-time = "2025-12-08T01:46:09.458Z" }, + { url = "https://files.pythonhosted.org/packages/35/46/1d8f2542210c502e2ae1060b2e09e47af6a5e5963cb78e22ec1a11170b28/black-25.12.0-cp314-cp314-macosx_10_15_x86_64.whl", hash = "sha256:0a0953b134f9335c2434864a643c842c44fba562155c738a2a37a4d61f00cad5", size = 1917015, upload-time = "2025-12-08T01:53:27.987Z" }, + { url = "https://files.pythonhosted.org/packages/41/37/68accadf977672beb8e2c64e080f568c74159c1aaa6414b4cd2aef2d7906/black-25.12.0-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:2355bbb6c3b76062870942d8cc450d4f8ac71f9c93c40122762c8784df49543f", size = 1741830, upload-time = "2025-12-08T01:54:36.861Z" }, + { url = "https://files.pythonhosted.org/packages/ac/76/03608a9d8f0faad47a3af3a3c8c53af3367f6c0dd2d23a84710456c7ac56/black-25.12.0-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:9678bd991cc793e81d19aeeae57966ee02909877cb65838ccffef24c3ebac08f", size = 1791450, upload-time = "2025-12-08T01:44:52.581Z" }, + { url = "https://files.pythonhosted.org/packages/06/99/b2a4bd7dfaea7964974f947e1c76d6886d65fe5d24f687df2d85406b2609/black-25.12.0-cp314-cp314-win_amd64.whl", hash = "sha256:97596189949a8aad13ad12fcbb4ae89330039b96ad6742e6f6b45e75ad5cfd83", size = 1452042, upload-time = "2025-12-08T01:46:13.188Z" }, + { url = "https://files.pythonhosted.org/packages/b2/7c/d9825de75ae5dd7795d007681b752275ea85a1c5d83269b4b9c754c2aaab/black-25.12.0-cp314-cp314-win_arm64.whl", hash = "sha256:778285d9ea197f34704e3791ea9404cd6d07595745907dd2ce3da7a13627b29b", size = 1267446, upload-time = "2025-12-08T01:46:14.497Z" }, + { url = "https://files.pythonhosted.org/packages/68/11/21331aed19145a952ad28fca2756a1433ee9308079bd03bd898e903a2e53/black-25.12.0-py3-none-any.whl", hash = "sha256:48ceb36c16dbc84062740049eef990bb2ce07598272e673c17d1a7720c71c828", size = 206191, upload-time = "2025-12-08T01:40:50.963Z" }, +] + +[[package]] +name = "bracex" +version = "2.6" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/63/9a/fec38644694abfaaeca2798b58e276a8e61de49e2e37494ace423395febc/bracex-2.6.tar.gz", hash = "sha256:98f1347cd77e22ee8d967a30ad4e310b233f7754dbf31ff3fceb76145ba47dc7", size = 26642, upload-time = "2025-06-22T19:12:31.254Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/9d/2a/9186535ce58db529927f6cf5990a849aa9e052eea3e2cfefe20b9e1802da/bracex-2.6-py3-none-any.whl", hash = "sha256:0b0049264e7340b3ec782b5cb99beb325f36c3782a32e36e876452fd49a09952", size = 11508, upload-time = "2025-06-22T19:12:29.781Z" }, +] + +[[package]] +name = "build" +version = "1.3.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "colorama", marker = "os_name == 'nt'" }, + { name = "packaging" }, + { name = "pyproject-hooks" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/25/1c/23e33405a7c9eac261dff640926b8b5adaed6a6eb3e1767d441ed611d0c0/build-1.3.0.tar.gz", hash = "sha256:698edd0ea270bde950f53aed21f3a0135672206f3911e0176261a31e0e07b397", size = 48544, upload-time = "2025-08-01T21:27:09.268Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/cb/8c/2b30c12155ad8de0cf641d76a8b396a16d2c36bc6d50b621a62b7c4567c1/build-1.3.0-py3-none-any.whl", hash = "sha256:7145f0b5061ba90a1500d60bd1b13ca0a8a4cebdd0cc16ed8adf1c0e739f43b4", size = 23382, upload-time = "2025-08-01T21:27:07.844Z" }, +] + +[[package]] +name = "cachetools" +version = "6.2.4" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/bc/1d/ede8680603f6016887c062a2cf4fc8fdba905866a3ab8831aa8aa651320c/cachetools-6.2.4.tar.gz", hash = "sha256:82c5c05585e70b6ba2d3ae09ea60b79548872185d2f24ae1f2709d37299fd607", size = 31731, upload-time = "2025-12-15T18:24:53.744Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/2c/fc/1d7b80d0eb7b714984ce40efc78859c022cd930e402f599d8ca9e39c78a4/cachetools-6.2.4-py3-none-any.whl", hash = "sha256:69a7a52634fed8b8bf6e24a050fb60bff1c9bd8f6d24572b99c32d4e71e62a51", size = 11551, upload-time = "2025-12-15T18:24:52.332Z" }, +] + +[[package]] +name = "certifi" +version = "2025.11.12" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/a2/8c/58f469717fa48465e4a50c014a0400602d3c437d7c0c468e17ada824da3a/certifi-2025.11.12.tar.gz", hash = "sha256:d8ab5478f2ecd78af242878415affce761ca6bc54a22a27e026d7c25357c3316", size = 160538, upload-time = "2025-11-12T02:54:51.517Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/70/7d/9bc192684cea499815ff478dfcdc13835ddf401365057044fb721ec6bddb/certifi-2025.11.12-py3-none-any.whl", hash = "sha256:97de8790030bbd5c2d96b7ec782fc2f7820ef8dba6db909ccf95449f2d062d4b", size = 159438, upload-time = "2025-11-12T02:54:49.735Z" }, +] + +[[package]] +name = "cffi" +version = "2.0.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "pycparser", marker = "implementation_name != 'PyPy'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/eb/56/b1ba7935a17738ae8453301356628e8147c79dbb825bcbc73dc7401f9846/cffi-2.0.0.tar.gz", hash = "sha256:44d1b5909021139fe36001ae048dbdde8214afa20200eda0f64c068cac5d5529", size = 523588, upload-time = "2025-09-08T23:24:04.541Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ea/47/4f61023ea636104d4f16ab488e268b93008c3d0bb76893b1b31db1f96802/cffi-2.0.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:6d02d6655b0e54f54c4ef0b94eb6be0607b70853c45ce98bd278dc7de718be5d", size = 185271, upload-time = "2025-09-08T23:22:44.795Z" }, + { url = "https://files.pythonhosted.org/packages/df/a2/781b623f57358e360d62cdd7a8c681f074a71d445418a776eef0aadb4ab4/cffi-2.0.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:8eca2a813c1cb7ad4fb74d368c2ffbbb4789d377ee5bb8df98373c2cc0dee76c", size = 181048, upload-time = "2025-09-08T23:22:45.938Z" }, + { url = "https://files.pythonhosted.org/packages/ff/df/a4f0fbd47331ceeba3d37c2e51e9dfc9722498becbeec2bd8bc856c9538a/cffi-2.0.0-cp312-cp312-manylinux1_i686.manylinux2014_i686.manylinux_2_17_i686.manylinux_2_5_i686.whl", hash = "sha256:21d1152871b019407d8ac3985f6775c079416c282e431a4da6afe7aefd2bccbe", size = 212529, upload-time = "2025-09-08T23:22:47.349Z" }, + { url = "https://files.pythonhosted.org/packages/d5/72/12b5f8d3865bf0f87cf1404d8c374e7487dcf097a1c91c436e72e6badd83/cffi-2.0.0-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:b21e08af67b8a103c71a250401c78d5e0893beff75e28c53c98f4de42f774062", size = 220097, upload-time = "2025-09-08T23:22:48.677Z" }, + { url = "https://files.pythonhosted.org/packages/c2/95/7a135d52a50dfa7c882ab0ac17e8dc11cec9d55d2c18dda414c051c5e69e/cffi-2.0.0-cp312-cp312-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:1e3a615586f05fc4065a8b22b8152f0c1b00cdbc60596d187c2a74f9e3036e4e", size = 207983, upload-time = "2025-09-08T23:22:50.06Z" }, + { url = "https://files.pythonhosted.org/packages/3a/c8/15cb9ada8895957ea171c62dc78ff3e99159ee7adb13c0123c001a2546c1/cffi-2.0.0-cp312-cp312-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:81afed14892743bbe14dacb9e36d9e0e504cd204e0b165062c488942b9718037", size = 206519, upload-time = "2025-09-08T23:22:51.364Z" }, + { url = "https://files.pythonhosted.org/packages/78/2d/7fa73dfa841b5ac06c7b8855cfc18622132e365f5b81d02230333ff26e9e/cffi-2.0.0-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:3e17ed538242334bf70832644a32a7aae3d83b57567f9fd60a26257e992b79ba", size = 219572, upload-time = "2025-09-08T23:22:52.902Z" }, + { url = "https://files.pythonhosted.org/packages/07/e0/267e57e387b4ca276b90f0434ff88b2c2241ad72b16d31836adddfd6031b/cffi-2.0.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:3925dd22fa2b7699ed2617149842d2e6adde22b262fcbfada50e3d195e4b3a94", size = 222963, upload-time = "2025-09-08T23:22:54.518Z" }, + { url = "https://files.pythonhosted.org/packages/b6/75/1f2747525e06f53efbd878f4d03bac5b859cbc11c633d0fb81432d98a795/cffi-2.0.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:2c8f814d84194c9ea681642fd164267891702542f028a15fc97d4674b6206187", size = 221361, upload-time = "2025-09-08T23:22:55.867Z" }, + { url = "https://files.pythonhosted.org/packages/7b/2b/2b6435f76bfeb6bbf055596976da087377ede68df465419d192acf00c437/cffi-2.0.0-cp312-cp312-win32.whl", hash = "sha256:da902562c3e9c550df360bfa53c035b2f241fed6d9aef119048073680ace4a18", size = 172932, upload-time = "2025-09-08T23:22:57.188Z" }, + { url = "https://files.pythonhosted.org/packages/f8/ed/13bd4418627013bec4ed6e54283b1959cf6db888048c7cf4b4c3b5b36002/cffi-2.0.0-cp312-cp312-win_amd64.whl", hash = "sha256:da68248800ad6320861f129cd9c1bf96ca849a2771a59e0344e88681905916f5", size = 183557, upload-time = "2025-09-08T23:22:58.351Z" }, + { url = "https://files.pythonhosted.org/packages/95/31/9f7f93ad2f8eff1dbc1c3656d7ca5bfd8fb52c9d786b4dcf19b2d02217fa/cffi-2.0.0-cp312-cp312-win_arm64.whl", hash = "sha256:4671d9dd5ec934cb9a73e7ee9676f9362aba54f7f34910956b84d727b0d73fb6", size = 177762, upload-time = "2025-09-08T23:22:59.668Z" }, + { url = "https://files.pythonhosted.org/packages/4b/8d/a0a47a0c9e413a658623d014e91e74a50cdd2c423f7ccfd44086ef767f90/cffi-2.0.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:00bdf7acc5f795150faa6957054fbbca2439db2f775ce831222b66f192f03beb", size = 185230, upload-time = "2025-09-08T23:23:00.879Z" }, + { url = "https://files.pythonhosted.org/packages/4a/d2/a6c0296814556c68ee32009d9c2ad4f85f2707cdecfd7727951ec228005d/cffi-2.0.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:45d5e886156860dc35862657e1494b9bae8dfa63bf56796f2fb56e1679fc0bca", size = 181043, upload-time = "2025-09-08T23:23:02.231Z" }, + { url = "https://files.pythonhosted.org/packages/b0/1e/d22cc63332bd59b06481ceaac49d6c507598642e2230f201649058a7e704/cffi-2.0.0-cp313-cp313-manylinux1_i686.manylinux2014_i686.manylinux_2_17_i686.manylinux_2_5_i686.whl", hash = "sha256:07b271772c100085dd28b74fa0cd81c8fb1a3ba18b21e03d7c27f3436a10606b", size = 212446, upload-time = "2025-09-08T23:23:03.472Z" }, + { url = "https://files.pythonhosted.org/packages/a9/f5/a2c23eb03b61a0b8747f211eb716446c826ad66818ddc7810cc2cc19b3f2/cffi-2.0.0-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:d48a880098c96020b02d5a1f7d9251308510ce8858940e6fa99ece33f610838b", size = 220101, upload-time = "2025-09-08T23:23:04.792Z" }, + { url = "https://files.pythonhosted.org/packages/f2/7f/e6647792fc5850d634695bc0e6ab4111ae88e89981d35ac269956605feba/cffi-2.0.0-cp313-cp313-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:f93fd8e5c8c0a4aa1f424d6173f14a892044054871c771f8566e4008eaa359d2", size = 207948, upload-time = "2025-09-08T23:23:06.127Z" }, + { url = "https://files.pythonhosted.org/packages/cb/1e/a5a1bd6f1fb30f22573f76533de12a00bf274abcdc55c8edab639078abb6/cffi-2.0.0-cp313-cp313-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:dd4f05f54a52fb558f1ba9f528228066954fee3ebe629fc1660d874d040ae5a3", size = 206422, upload-time = "2025-09-08T23:23:07.753Z" }, + { url = "https://files.pythonhosted.org/packages/98/df/0a1755e750013a2081e863e7cd37e0cdd02664372c754e5560099eb7aa44/cffi-2.0.0-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:c8d3b5532fc71b7a77c09192b4a5a200ea992702734a2e9279a37f2478236f26", size = 219499, upload-time = "2025-09-08T23:23:09.648Z" }, + { url = "https://files.pythonhosted.org/packages/50/e1/a969e687fcf9ea58e6e2a928ad5e2dd88cc12f6f0ab477e9971f2309b57c/cffi-2.0.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:d9b29c1f0ae438d5ee9acb31cadee00a58c46cc9c0b2f9038c6b0b3470877a8c", size = 222928, upload-time = "2025-09-08T23:23:10.928Z" }, + { url = "https://files.pythonhosted.org/packages/36/54/0362578dd2c9e557a28ac77698ed67323ed5b9775ca9d3fe73fe191bb5d8/cffi-2.0.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:6d50360be4546678fc1b79ffe7a66265e28667840010348dd69a314145807a1b", size = 221302, upload-time = "2025-09-08T23:23:12.42Z" }, + { url = "https://files.pythonhosted.org/packages/eb/6d/bf9bda840d5f1dfdbf0feca87fbdb64a918a69bca42cfa0ba7b137c48cb8/cffi-2.0.0-cp313-cp313-win32.whl", hash = "sha256:74a03b9698e198d47562765773b4a8309919089150a0bb17d829ad7b44b60d27", size = 172909, upload-time = "2025-09-08T23:23:14.32Z" }, + { url = "https://files.pythonhosted.org/packages/37/18/6519e1ee6f5a1e579e04b9ddb6f1676c17368a7aba48299c3759bbc3c8b3/cffi-2.0.0-cp313-cp313-win_amd64.whl", hash = "sha256:19f705ada2530c1167abacb171925dd886168931e0a7b78f5bffcae5c6b5be75", size = 183402, upload-time = "2025-09-08T23:23:15.535Z" }, + { url = "https://files.pythonhosted.org/packages/cb/0e/02ceeec9a7d6ee63bb596121c2c8e9b3a9e150936f4fbef6ca1943e6137c/cffi-2.0.0-cp313-cp313-win_arm64.whl", hash = "sha256:256f80b80ca3853f90c21b23ee78cd008713787b1b1e93eae9f3d6a7134abd91", size = 177780, upload-time = "2025-09-08T23:23:16.761Z" }, + { url = "https://files.pythonhosted.org/packages/92/c4/3ce07396253a83250ee98564f8d7e9789fab8e58858f35d07a9a2c78de9f/cffi-2.0.0-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:fc33c5141b55ed366cfaad382df24fe7dcbc686de5be719b207bb248e3053dc5", size = 185320, upload-time = "2025-09-08T23:23:18.087Z" }, + { url = "https://files.pythonhosted.org/packages/59/dd/27e9fa567a23931c838c6b02d0764611c62290062a6d4e8ff7863daf9730/cffi-2.0.0-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:c654de545946e0db659b3400168c9ad31b5d29593291482c43e3564effbcee13", size = 181487, upload-time = "2025-09-08T23:23:19.622Z" }, + { url = "https://files.pythonhosted.org/packages/d6/43/0e822876f87ea8a4ef95442c3d766a06a51fc5298823f884ef87aaad168c/cffi-2.0.0-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:24b6f81f1983e6df8db3adc38562c83f7d4a0c36162885ec7f7b77c7dcbec97b", size = 220049, upload-time = "2025-09-08T23:23:20.853Z" }, + { url = "https://files.pythonhosted.org/packages/b4/89/76799151d9c2d2d1ead63c2429da9ea9d7aac304603de0c6e8764e6e8e70/cffi-2.0.0-cp314-cp314-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:12873ca6cb9b0f0d3a0da705d6086fe911591737a59f28b7936bdfed27c0d47c", size = 207793, upload-time = "2025-09-08T23:23:22.08Z" }, + { url = "https://files.pythonhosted.org/packages/bb/dd/3465b14bb9e24ee24cb88c9e3730f6de63111fffe513492bf8c808a3547e/cffi-2.0.0-cp314-cp314-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:d9b97165e8aed9272a6bb17c01e3cc5871a594a446ebedc996e2397a1c1ea8ef", size = 206300, upload-time = "2025-09-08T23:23:23.314Z" }, + { url = "https://files.pythonhosted.org/packages/47/d9/d83e293854571c877a92da46fdec39158f8d7e68da75bf73581225d28e90/cffi-2.0.0-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:afb8db5439b81cf9c9d0c80404b60c3cc9c3add93e114dcae767f1477cb53775", size = 219244, upload-time = "2025-09-08T23:23:24.541Z" }, + { url = "https://files.pythonhosted.org/packages/2b/0f/1f177e3683aead2bb00f7679a16451d302c436b5cbf2505f0ea8146ef59e/cffi-2.0.0-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:737fe7d37e1a1bffe70bd5754ea763a62a066dc5913ca57e957824b72a85e205", size = 222828, upload-time = "2025-09-08T23:23:26.143Z" }, + { url = "https://files.pythonhosted.org/packages/c6/0f/cafacebd4b040e3119dcb32fed8bdef8dfe94da653155f9d0b9dc660166e/cffi-2.0.0-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:38100abb9d1b1435bc4cc340bb4489635dc2f0da7456590877030c9b3d40b0c1", size = 220926, upload-time = "2025-09-08T23:23:27.873Z" }, + { url = "https://files.pythonhosted.org/packages/3e/aa/df335faa45b395396fcbc03de2dfcab242cd61a9900e914fe682a59170b1/cffi-2.0.0-cp314-cp314-win32.whl", hash = "sha256:087067fa8953339c723661eda6b54bc98c5625757ea62e95eb4898ad5e776e9f", size = 175328, upload-time = "2025-09-08T23:23:44.61Z" }, + { url = "https://files.pythonhosted.org/packages/bb/92/882c2d30831744296ce713f0feb4c1cd30f346ef747b530b5318715cc367/cffi-2.0.0-cp314-cp314-win_amd64.whl", hash = "sha256:203a48d1fb583fc7d78a4c6655692963b860a417c0528492a6bc21f1aaefab25", size = 185650, upload-time = "2025-09-08T23:23:45.848Z" }, + { url = "https://files.pythonhosted.org/packages/9f/2c/98ece204b9d35a7366b5b2c6539c350313ca13932143e79dc133ba757104/cffi-2.0.0-cp314-cp314-win_arm64.whl", hash = "sha256:dbd5c7a25a7cb98f5ca55d258b103a2054f859a46ae11aaf23134f9cc0d356ad", size = 180687, upload-time = "2025-09-08T23:23:47.105Z" }, + { url = "https://files.pythonhosted.org/packages/3e/61/c768e4d548bfa607abcda77423448df8c471f25dbe64fb2ef6d555eae006/cffi-2.0.0-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:9a67fc9e8eb39039280526379fb3a70023d77caec1852002b4da7e8b270c4dd9", size = 188773, upload-time = "2025-09-08T23:23:29.347Z" }, + { url = "https://files.pythonhosted.org/packages/2c/ea/5f76bce7cf6fcd0ab1a1058b5af899bfbef198bea4d5686da88471ea0336/cffi-2.0.0-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:7a66c7204d8869299919db4d5069a82f1561581af12b11b3c9f48c584eb8743d", size = 185013, upload-time = "2025-09-08T23:23:30.63Z" }, + { url = "https://files.pythonhosted.org/packages/be/b4/c56878d0d1755cf9caa54ba71e5d049479c52f9e4afc230f06822162ab2f/cffi-2.0.0-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:7cc09976e8b56f8cebd752f7113ad07752461f48a58cbba644139015ac24954c", size = 221593, upload-time = "2025-09-08T23:23:31.91Z" }, + { url = "https://files.pythonhosted.org/packages/e0/0d/eb704606dfe8033e7128df5e90fee946bbcb64a04fcdaa97321309004000/cffi-2.0.0-cp314-cp314t-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:92b68146a71df78564e4ef48af17551a5ddd142e5190cdf2c5624d0c3ff5b2e8", size = 209354, upload-time = "2025-09-08T23:23:33.214Z" }, + { url = "https://files.pythonhosted.org/packages/d8/19/3c435d727b368ca475fb8742ab97c9cb13a0de600ce86f62eab7fa3eea60/cffi-2.0.0-cp314-cp314t-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:b1e74d11748e7e98e2f426ab176d4ed720a64412b6a15054378afdb71e0f37dc", size = 208480, upload-time = "2025-09-08T23:23:34.495Z" }, + { url = "https://files.pythonhosted.org/packages/d0/44/681604464ed9541673e486521497406fadcc15b5217c3e326b061696899a/cffi-2.0.0-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:28a3a209b96630bca57cce802da70c266eb08c6e97e5afd61a75611ee6c64592", size = 221584, upload-time = "2025-09-08T23:23:36.096Z" }, + { url = "https://files.pythonhosted.org/packages/25/8e/342a504ff018a2825d395d44d63a767dd8ebc927ebda557fecdaca3ac33a/cffi-2.0.0-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:7553fb2090d71822f02c629afe6042c299edf91ba1bf94951165613553984512", size = 224443, upload-time = "2025-09-08T23:23:37.328Z" }, + { url = "https://files.pythonhosted.org/packages/e1/5e/b666bacbbc60fbf415ba9988324a132c9a7a0448a9a8f125074671c0f2c3/cffi-2.0.0-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:6c6c373cfc5c83a975506110d17457138c8c63016b563cc9ed6e056a82f13ce4", size = 223437, upload-time = "2025-09-08T23:23:38.945Z" }, + { url = "https://files.pythonhosted.org/packages/a0/1d/ec1a60bd1a10daa292d3cd6bb0b359a81607154fb8165f3ec95fe003b85c/cffi-2.0.0-cp314-cp314t-win32.whl", hash = "sha256:1fc9ea04857caf665289b7a75923f2c6ed559b8298a1b8c49e59f7dd95c8481e", size = 180487, upload-time = "2025-09-08T23:23:40.423Z" }, + { url = "https://files.pythonhosted.org/packages/bf/41/4c1168c74fac325c0c8156f04b6749c8b6a8f405bbf91413ba088359f60d/cffi-2.0.0-cp314-cp314t-win_amd64.whl", hash = "sha256:d68b6cef7827e8641e8ef16f4494edda8b36104d79773a334beaa1e3521430f6", size = 191726, upload-time = "2025-09-08T23:23:41.742Z" }, + { url = "https://files.pythonhosted.org/packages/ae/3a/dbeec9d1ee0844c679f6bb5d6ad4e9f198b1224f4e7a32825f47f6192b0c/cffi-2.0.0-cp314-cp314t-win_arm64.whl", hash = "sha256:0a1527a803f0a659de1af2e1fd700213caba79377e27e4693648c2923da066f9", size = 184195, upload-time = "2025-09-08T23:23:43.004Z" }, +] + +[[package]] +name = "cfgv" +version = "3.5.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/4e/b5/721b8799b04bf9afe054a3899c6cf4e880fcf8563cc71c15610242490a0c/cfgv-3.5.0.tar.gz", hash = "sha256:d5b1034354820651caa73ede66a6294d6e95c1b00acc5e9b098e917404669132", size = 7334, upload-time = "2025-11-19T20:55:51.612Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/db/3c/33bac158f8ab7f89b2e59426d5fe2e4f63f7ed25df84c036890172b412b5/cfgv-3.5.0-py2.py3-none-any.whl", hash = "sha256:a8dc6b26ad22ff227d2634a65cb388215ce6cc96bbcc5cfde7641ae87e8dacc0", size = 7445, upload-time = "2025-11-19T20:55:50.744Z" }, +] + +[[package]] +name = "chardet" +version = "5.2.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/f3/0d/f7b6ab21ec75897ed80c17d79b15951a719226b9fababf1e40ea74d69079/chardet-5.2.0.tar.gz", hash = "sha256:1b3b6ff479a8c414bc3fa2c0852995695c4a026dcd6d0633b2dd092ca39c1cf7", size = 2069618, upload-time = "2023-08-01T19:23:02.662Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/38/6f/f5fbc992a329ee4e0f288c1fe0e2ad9485ed064cac731ed2fe47dcc38cbf/chardet-5.2.0-py3-none-any.whl", hash = "sha256:e1cf59446890a00105fe7b7912492ea04b6e6f06d4b742b2c788469e34c82970", size = 199385, upload-time = "2023-08-01T19:23:00.661Z" }, +] + +[[package]] +name = "charset-normalizer" +version = "3.4.4" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/13/69/33ddede1939fdd074bce5434295f38fae7136463422fe4fd3e0e89b98062/charset_normalizer-3.4.4.tar.gz", hash = "sha256:94537985111c35f28720e43603b8e7b43a6ecfb2ce1d3058bbe955b73404e21a", size = 129418, upload-time = "2025-10-14T04:42:32.879Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/f3/85/1637cd4af66fa687396e757dec650f28025f2a2f5a5531a3208dc0ec43f2/charset_normalizer-3.4.4-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:0a98e6759f854bd25a58a73fa88833fba3b7c491169f86ce1180c948ab3fd394", size = 208425, upload-time = "2025-10-14T04:40:53.353Z" }, + { url = "https://files.pythonhosted.org/packages/9d/6a/04130023fef2a0d9c62d0bae2649b69f7b7d8d24ea5536feef50551029df/charset_normalizer-3.4.4-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:b5b290ccc2a263e8d185130284f8501e3e36c5e02750fc6b6bdeb2e9e96f1e25", size = 148162, upload-time = "2025-10-14T04:40:54.558Z" }, + { url = "https://files.pythonhosted.org/packages/78/29/62328d79aa60da22c9e0b9a66539feae06ca0f5a4171ac4f7dc285b83688/charset_normalizer-3.4.4-cp312-cp312-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:74bb723680f9f7a6234dcf67aea57e708ec1fbdf5699fb91dfd6f511b0a320ef", size = 144558, upload-time = "2025-10-14T04:40:55.677Z" }, + { url = "https://files.pythonhosted.org/packages/86/bb/b32194a4bf15b88403537c2e120b817c61cd4ecffa9b6876e941c3ee38fe/charset_normalizer-3.4.4-cp312-cp312-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:f1e34719c6ed0b92f418c7c780480b26b5d9c50349e9a9af7d76bf757530350d", size = 161497, upload-time = "2025-10-14T04:40:57.217Z" }, + { url = "https://files.pythonhosted.org/packages/19/89/a54c82b253d5b9b111dc74aca196ba5ccfcca8242d0fb64146d4d3183ff1/charset_normalizer-3.4.4-cp312-cp312-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:2437418e20515acec67d86e12bf70056a33abdacb5cb1655042f6538d6b085a8", size = 159240, upload-time = "2025-10-14T04:40:58.358Z" }, + { url = "https://files.pythonhosted.org/packages/c0/10/d20b513afe03acc89ec33948320a5544d31f21b05368436d580dec4e234d/charset_normalizer-3.4.4-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:11d694519d7f29d6cd09f6ac70028dba10f92f6cdd059096db198c283794ac86", size = 153471, upload-time = "2025-10-14T04:40:59.468Z" }, + { url = "https://files.pythonhosted.org/packages/61/fa/fbf177b55bdd727010f9c0a3c49eefa1d10f960e5f09d1d887bf93c2e698/charset_normalizer-3.4.4-cp312-cp312-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:ac1c4a689edcc530fc9d9aa11f5774b9e2f33f9a0c6a57864e90908f5208d30a", size = 150864, upload-time = "2025-10-14T04:41:00.623Z" }, + { url = "https://files.pythonhosted.org/packages/05/12/9fbc6a4d39c0198adeebbde20b619790e9236557ca59fc40e0e3cebe6f40/charset_normalizer-3.4.4-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:21d142cc6c0ec30d2efee5068ca36c128a30b0f2c53c1c07bd78cb6bc1d3be5f", size = 150647, upload-time = "2025-10-14T04:41:01.754Z" }, + { url = "https://files.pythonhosted.org/packages/ad/1f/6a9a593d52e3e8c5d2b167daf8c6b968808efb57ef4c210acb907c365bc4/charset_normalizer-3.4.4-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:5dbe56a36425d26d6cfb40ce79c314a2e4dd6211d51d6d2191c00bed34f354cc", size = 145110, upload-time = "2025-10-14T04:41:03.231Z" }, + { url = "https://files.pythonhosted.org/packages/30/42/9a52c609e72471b0fc54386dc63c3781a387bb4fe61c20231a4ebcd58bdd/charset_normalizer-3.4.4-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:5bfbb1b9acf3334612667b61bd3002196fe2a1eb4dd74d247e0f2a4d50ec9bbf", size = 162839, upload-time = "2025-10-14T04:41:04.715Z" }, + { url = "https://files.pythonhosted.org/packages/c4/5b/c0682bbf9f11597073052628ddd38344a3d673fda35a36773f7d19344b23/charset_normalizer-3.4.4-cp312-cp312-musllinux_1_2_riscv64.whl", hash = "sha256:d055ec1e26e441f6187acf818b73564e6e6282709e9bcb5b63f5b23068356a15", size = 150667, upload-time = "2025-10-14T04:41:05.827Z" }, + { url = "https://files.pythonhosted.org/packages/e4/24/a41afeab6f990cf2daf6cb8c67419b63b48cf518e4f56022230840c9bfb2/charset_normalizer-3.4.4-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:af2d8c67d8e573d6de5bc30cdb27e9b95e49115cd9baad5ddbd1a6207aaa82a9", size = 160535, upload-time = "2025-10-14T04:41:06.938Z" }, + { url = "https://files.pythonhosted.org/packages/2a/e5/6a4ce77ed243c4a50a1fecca6aaaab419628c818a49434be428fe24c9957/charset_normalizer-3.4.4-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:780236ac706e66881f3b7f2f32dfe90507a09e67d1d454c762cf642e6e1586e0", size = 154816, upload-time = "2025-10-14T04:41:08.101Z" }, + { url = "https://files.pythonhosted.org/packages/a8/ef/89297262b8092b312d29cdb2517cb1237e51db8ecef2e9af5edbe7b683b1/charset_normalizer-3.4.4-cp312-cp312-win32.whl", hash = "sha256:5833d2c39d8896e4e19b689ffc198f08ea58116bee26dea51e362ecc7cd3ed26", size = 99694, upload-time = "2025-10-14T04:41:09.23Z" }, + { url = "https://files.pythonhosted.org/packages/3d/2d/1e5ed9dd3b3803994c155cd9aacb60c82c331bad84daf75bcb9c91b3295e/charset_normalizer-3.4.4-cp312-cp312-win_amd64.whl", hash = "sha256:a79cfe37875f822425b89a82333404539ae63dbdddf97f84dcbc3d339aae9525", size = 107131, upload-time = "2025-10-14T04:41:10.467Z" }, + { url = "https://files.pythonhosted.org/packages/d0/d9/0ed4c7098a861482a7b6a95603edce4c0d9db2311af23da1fb2b75ec26fc/charset_normalizer-3.4.4-cp312-cp312-win_arm64.whl", hash = "sha256:376bec83a63b8021bb5c8ea75e21c4ccb86e7e45ca4eb81146091b56599b80c3", size = 100390, upload-time = "2025-10-14T04:41:11.915Z" }, + { url = "https://files.pythonhosted.org/packages/97/45/4b3a1239bbacd321068ea6e7ac28875b03ab8bc0aa0966452db17cd36714/charset_normalizer-3.4.4-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:e1f185f86a6f3403aa2420e815904c67b2f9ebc443f045edd0de921108345794", size = 208091, upload-time = "2025-10-14T04:41:13.346Z" }, + { url = "https://files.pythonhosted.org/packages/7d/62/73a6d7450829655a35bb88a88fca7d736f9882a27eacdca2c6d505b57e2e/charset_normalizer-3.4.4-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:6b39f987ae8ccdf0d2642338faf2abb1862340facc796048b604ef14919e55ed", size = 147936, upload-time = "2025-10-14T04:41:14.461Z" }, + { url = "https://files.pythonhosted.org/packages/89/c5/adb8c8b3d6625bef6d88b251bbb0d95f8205831b987631ab0c8bb5d937c2/charset_normalizer-3.4.4-cp313-cp313-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:3162d5d8ce1bb98dd51af660f2121c55d0fa541b46dff7bb9b9f86ea1d87de72", size = 144180, upload-time = "2025-10-14T04:41:15.588Z" }, + { url = "https://files.pythonhosted.org/packages/91/ed/9706e4070682d1cc219050b6048bfd293ccf67b3d4f5a4f39207453d4b99/charset_normalizer-3.4.4-cp313-cp313-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:81d5eb2a312700f4ecaa977a8235b634ce853200e828fbadf3a9c50bab278328", size = 161346, upload-time = "2025-10-14T04:41:16.738Z" }, + { url = "https://files.pythonhosted.org/packages/d5/0d/031f0d95e4972901a2f6f09ef055751805ff541511dc1252ba3ca1f80cf5/charset_normalizer-3.4.4-cp313-cp313-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:5bd2293095d766545ec1a8f612559f6b40abc0eb18bb2f5d1171872d34036ede", size = 158874, upload-time = "2025-10-14T04:41:17.923Z" }, + { url = "https://files.pythonhosted.org/packages/f5/83/6ab5883f57c9c801ce5e5677242328aa45592be8a00644310a008d04f922/charset_normalizer-3.4.4-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:a8a8b89589086a25749f471e6a900d3f662d1d3b6e2e59dcecf787b1cc3a1894", size = 153076, upload-time = "2025-10-14T04:41:19.106Z" }, + { url = "https://files.pythonhosted.org/packages/75/1e/5ff781ddf5260e387d6419959ee89ef13878229732732ee73cdae01800f2/charset_normalizer-3.4.4-cp313-cp313-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:bc7637e2f80d8530ee4a78e878bce464f70087ce73cf7c1caf142416923b98f1", size = 150601, upload-time = "2025-10-14T04:41:20.245Z" }, + { url = "https://files.pythonhosted.org/packages/d7/57/71be810965493d3510a6ca79b90c19e48696fb1ff964da319334b12677f0/charset_normalizer-3.4.4-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:f8bf04158c6b607d747e93949aa60618b61312fe647a6369f88ce2ff16043490", size = 150376, upload-time = "2025-10-14T04:41:21.398Z" }, + { url = "https://files.pythonhosted.org/packages/e5/d5/c3d057a78c181d007014feb7e9f2e65905a6c4ef182c0ddf0de2924edd65/charset_normalizer-3.4.4-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:554af85e960429cf30784dd47447d5125aaa3b99a6f0683589dbd27e2f45da44", size = 144825, upload-time = "2025-10-14T04:41:22.583Z" }, + { url = "https://files.pythonhosted.org/packages/e6/8c/d0406294828d4976f275ffbe66f00266c4b3136b7506941d87c00cab5272/charset_normalizer-3.4.4-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:74018750915ee7ad843a774364e13a3db91682f26142baddf775342c3f5b1133", size = 162583, upload-time = "2025-10-14T04:41:23.754Z" }, + { url = "https://files.pythonhosted.org/packages/d7/24/e2aa1f18c8f15c4c0e932d9287b8609dd30ad56dbe41d926bd846e22fb8d/charset_normalizer-3.4.4-cp313-cp313-musllinux_1_2_riscv64.whl", hash = "sha256:c0463276121fdee9c49b98908b3a89c39be45d86d1dbaa22957e38f6321d4ce3", size = 150366, upload-time = "2025-10-14T04:41:25.27Z" }, + { url = "https://files.pythonhosted.org/packages/e4/5b/1e6160c7739aad1e2df054300cc618b06bf784a7a164b0f238360721ab86/charset_normalizer-3.4.4-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:362d61fd13843997c1c446760ef36f240cf81d3ebf74ac62652aebaf7838561e", size = 160300, upload-time = "2025-10-14T04:41:26.725Z" }, + { url = "https://files.pythonhosted.org/packages/7a/10/f882167cd207fbdd743e55534d5d9620e095089d176d55cb22d5322f2afd/charset_normalizer-3.4.4-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:9a26f18905b8dd5d685d6d07b0cdf98a79f3c7a918906af7cc143ea2e164c8bc", size = 154465, upload-time = "2025-10-14T04:41:28.322Z" }, + { url = "https://files.pythonhosted.org/packages/89/66/c7a9e1b7429be72123441bfdbaf2bc13faab3f90b933f664db506dea5915/charset_normalizer-3.4.4-cp313-cp313-win32.whl", hash = "sha256:9b35f4c90079ff2e2edc5b26c0c77925e5d2d255c42c74fdb70fb49b172726ac", size = 99404, upload-time = "2025-10-14T04:41:29.95Z" }, + { url = "https://files.pythonhosted.org/packages/c4/26/b9924fa27db384bdcd97ab83b4f0a8058d96ad9626ead570674d5e737d90/charset_normalizer-3.4.4-cp313-cp313-win_amd64.whl", hash = "sha256:b435cba5f4f750aa6c0a0d92c541fb79f69a387c91e61f1795227e4ed9cece14", size = 107092, upload-time = "2025-10-14T04:41:31.188Z" }, + { url = "https://files.pythonhosted.org/packages/af/8f/3ed4bfa0c0c72a7ca17f0380cd9e4dd842b09f664e780c13cff1dcf2ef1b/charset_normalizer-3.4.4-cp313-cp313-win_arm64.whl", hash = "sha256:542d2cee80be6f80247095cc36c418f7bddd14f4a6de45af91dfad36d817bba2", size = 100408, upload-time = "2025-10-14T04:41:32.624Z" }, + { url = "https://files.pythonhosted.org/packages/2a/35/7051599bd493e62411d6ede36fd5af83a38f37c4767b92884df7301db25d/charset_normalizer-3.4.4-cp314-cp314-macosx_10_13_universal2.whl", hash = "sha256:da3326d9e65ef63a817ecbcc0df6e94463713b754fe293eaa03da99befb9a5bd", size = 207746, upload-time = "2025-10-14T04:41:33.773Z" }, + { url = "https://files.pythonhosted.org/packages/10/9a/97c8d48ef10d6cd4fcead2415523221624bf58bcf68a802721a6bc807c8f/charset_normalizer-3.4.4-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:8af65f14dc14a79b924524b1e7fffe304517b2bff5a58bf64f30b98bbc5079eb", size = 147889, upload-time = "2025-10-14T04:41:34.897Z" }, + { url = "https://files.pythonhosted.org/packages/10/bf/979224a919a1b606c82bd2c5fa49b5c6d5727aa47b4312bb27b1734f53cd/charset_normalizer-3.4.4-cp314-cp314-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:74664978bb272435107de04e36db5a9735e78232b85b77d45cfb38f758efd33e", size = 143641, upload-time = "2025-10-14T04:41:36.116Z" }, + { url = "https://files.pythonhosted.org/packages/ba/33/0ad65587441fc730dc7bd90e9716b30b4702dc7b617e6ba4997dc8651495/charset_normalizer-3.4.4-cp314-cp314-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:752944c7ffbfdd10c074dc58ec2d5a8a4cd9493b314d367c14d24c17684ddd14", size = 160779, upload-time = "2025-10-14T04:41:37.229Z" }, + { url = "https://files.pythonhosted.org/packages/67/ed/331d6b249259ee71ddea93f6f2f0a56cfebd46938bde6fcc6f7b9a3d0e09/charset_normalizer-3.4.4-cp314-cp314-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:d1f13550535ad8cff21b8d757a3257963e951d96e20ec82ab44bc64aeb62a191", size = 159035, upload-time = "2025-10-14T04:41:38.368Z" }, + { url = "https://files.pythonhosted.org/packages/67/ff/f6b948ca32e4f2a4576aa129d8bed61f2e0543bf9f5f2b7fc3758ed005c9/charset_normalizer-3.4.4-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:ecaae4149d99b1c9e7b88bb03e3221956f68fd6d50be2ef061b2381b61d20838", size = 152542, upload-time = "2025-10-14T04:41:39.862Z" }, + { url = "https://files.pythonhosted.org/packages/16/85/276033dcbcc369eb176594de22728541a925b2632f9716428c851b149e83/charset_normalizer-3.4.4-cp314-cp314-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:cb6254dc36b47a990e59e1068afacdcd02958bdcce30bb50cc1700a8b9d624a6", size = 149524, upload-time = "2025-10-14T04:41:41.319Z" }, + { url = "https://files.pythonhosted.org/packages/9e/f2/6a2a1f722b6aba37050e626530a46a68f74e63683947a8acff92569f979a/charset_normalizer-3.4.4-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:c8ae8a0f02f57a6e61203a31428fa1d677cbe50c93622b4149d5c0f319c1d19e", size = 150395, upload-time = "2025-10-14T04:41:42.539Z" }, + { url = "https://files.pythonhosted.org/packages/60/bb/2186cb2f2bbaea6338cad15ce23a67f9b0672929744381e28b0592676824/charset_normalizer-3.4.4-cp314-cp314-musllinux_1_2_armv7l.whl", hash = "sha256:47cc91b2f4dd2833fddaedd2893006b0106129d4b94fdb6af1f4ce5a9965577c", size = 143680, upload-time = "2025-10-14T04:41:43.661Z" }, + { url = "https://files.pythonhosted.org/packages/7d/a5/bf6f13b772fbb2a90360eb620d52ed8f796f3c5caee8398c3b2eb7b1c60d/charset_normalizer-3.4.4-cp314-cp314-musllinux_1_2_ppc64le.whl", hash = "sha256:82004af6c302b5d3ab2cfc4cc5f29db16123b1a8417f2e25f9066f91d4411090", size = 162045, upload-time = "2025-10-14T04:41:44.821Z" }, + { url = "https://files.pythonhosted.org/packages/df/c5/d1be898bf0dc3ef9030c3825e5d3b83f2c528d207d246cbabe245966808d/charset_normalizer-3.4.4-cp314-cp314-musllinux_1_2_riscv64.whl", hash = "sha256:2b7d8f6c26245217bd2ad053761201e9f9680f8ce52f0fcd8d0755aeae5b2152", size = 149687, upload-time = "2025-10-14T04:41:46.442Z" }, + { url = "https://files.pythonhosted.org/packages/a5/42/90c1f7b9341eef50c8a1cb3f098ac43b0508413f33affd762855f67a410e/charset_normalizer-3.4.4-cp314-cp314-musllinux_1_2_s390x.whl", hash = "sha256:799a7a5e4fb2d5898c60b640fd4981d6a25f1c11790935a44ce38c54e985f828", size = 160014, upload-time = "2025-10-14T04:41:47.631Z" }, + { url = "https://files.pythonhosted.org/packages/76/be/4d3ee471e8145d12795ab655ece37baed0929462a86e72372fd25859047c/charset_normalizer-3.4.4-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:99ae2cffebb06e6c22bdc25801d7b30f503cc87dbd283479e7b606f70aff57ec", size = 154044, upload-time = "2025-10-14T04:41:48.81Z" }, + { url = "https://files.pythonhosted.org/packages/b0/6f/8f7af07237c34a1defe7defc565a9bc1807762f672c0fde711a4b22bf9c0/charset_normalizer-3.4.4-cp314-cp314-win32.whl", hash = "sha256:f9d332f8c2a2fcbffe1378594431458ddbef721c1769d78e2cbc06280d8155f9", size = 99940, upload-time = "2025-10-14T04:41:49.946Z" }, + { url = "https://files.pythonhosted.org/packages/4b/51/8ade005e5ca5b0d80fb4aff72a3775b325bdc3d27408c8113811a7cbe640/charset_normalizer-3.4.4-cp314-cp314-win_amd64.whl", hash = "sha256:8a6562c3700cce886c5be75ade4a5db4214fda19fede41d9792d100288d8f94c", size = 107104, upload-time = "2025-10-14T04:41:51.051Z" }, + { url = "https://files.pythonhosted.org/packages/da/5f/6b8f83a55bb8278772c5ae54a577f3099025f9ade59d0136ac24a0df4bde/charset_normalizer-3.4.4-cp314-cp314-win_arm64.whl", hash = "sha256:de00632ca48df9daf77a2c65a484531649261ec9f25489917f09e455cb09ddb2", size = 100743, upload-time = "2025-10-14T04:41:52.122Z" }, + { url = "https://files.pythonhosted.org/packages/0a/4c/925909008ed5a988ccbb72dcc897407e5d6d3bd72410d69e051fc0c14647/charset_normalizer-3.4.4-py3-none-any.whl", hash = "sha256:7a32c560861a02ff789ad905a2fe94e3f840803362c84fecf1851cb4cf3dc37f", size = 53402, upload-time = "2025-10-14T04:42:31.76Z" }, +] + +[[package]] +name = "click" +version = "8.3.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "colorama", marker = "sys_platform == 'win32'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/3d/fa/656b739db8587d7b5dfa22e22ed02566950fbfbcdc20311993483657a5c0/click-8.3.1.tar.gz", hash = "sha256:12ff4785d337a1bb490bb7e9c2b1ee5da3112e94a8622f26a6c77f5d2fc6842a", size = 295065, upload-time = "2025-11-15T20:45:42.706Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/98/78/01c019cdb5d6498122777c1a43056ebb3ebfeef2076d9d026bfe15583b2b/click-8.3.1-py3-none-any.whl", hash = "sha256:981153a64e25f12d547d3426c367a4857371575ee7ad18df2a6183ab0545b2a6", size = 108274, upload-time = "2025-11-15T20:45:41.139Z" }, +] + +[[package]] +name = "colorama" +version = "0.4.6" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/d8/53/6f443c9a4a8358a93a6792e2acffb9d9d5cb0a5cfd8802644b7b1c9a02e4/colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44", size = 27697, upload-time = "2022-10-25T02:36:22.414Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/d1/d6/3965ed04c63042e047cb6a3e6ed1a63a35087b6a609aa3a15ed8ac56c221/colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6", size = 25335, upload-time = "2022-10-25T02:36:20.889Z" }, +] + +[[package]] +name = "coverage" +version = "7.13.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/b6/45/2c665ca77ec32ad67e25c77daf1cee28ee4558f3bc571cdbaf88a00b9f23/coverage-7.13.0.tar.gz", hash = "sha256:a394aa27f2d7ff9bc04cf703817773a59ad6dfbd577032e690f961d2460ee936", size = 820905, upload-time = "2025-12-08T13:14:38.055Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/9b/f1/2619559f17f31ba00fc40908efd1fbf1d0a5536eb75dc8341e7d660a08de/coverage-7.13.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:0b3d67d31383c4c68e19a88e28fc4c2e29517580f1b0ebec4a069d502ce1e0bf", size = 218274, upload-time = "2025-12-08T13:12:52.095Z" }, + { url = "https://files.pythonhosted.org/packages/2b/11/30d71ae5d6e949ff93b2a79a2c1b4822e00423116c5c6edfaeef37301396/coverage-7.13.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:581f086833d24a22c89ae0fe2142cfaa1c92c930adf637ddf122d55083fb5a0f", size = 218638, upload-time = "2025-12-08T13:12:53.418Z" }, + { url = "https://files.pythonhosted.org/packages/79/c2/fce80fc6ded8d77e53207489d6065d0fed75db8951457f9213776615e0f5/coverage-7.13.0-cp312-cp312-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:0a3a30f0e257df382f5f9534d4ce3d4cf06eafaf5192beb1a7bd066cb10e78fb", size = 250129, upload-time = "2025-12-08T13:12:54.744Z" }, + { url = "https://files.pythonhosted.org/packages/5b/b6/51b5d1eb6fcbb9a1d5d6984e26cbe09018475c2922d554fd724dd0f056ee/coverage-7.13.0-cp312-cp312-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:583221913fbc8f53b88c42e8dbb8fca1d0f2e597cb190ce45916662b8b9d9621", size = 252885, upload-time = "2025-12-08T13:12:56.401Z" }, + { url = "https://files.pythonhosted.org/packages/0d/f8/972a5affea41de798691ab15d023d3530f9f56a72e12e243f35031846ff7/coverage-7.13.0-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:5f5d9bd30756fff3e7216491a0d6d520c448d5124d3d8e8f56446d6412499e74", size = 253974, upload-time = "2025-12-08T13:12:57.718Z" }, + { url = "https://files.pythonhosted.org/packages/8a/56/116513aee860b2c7968aa3506b0f59b22a959261d1dbf3aea7b4450a7520/coverage-7.13.0-cp312-cp312-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:a23e5a1f8b982d56fa64f8e442e037f6ce29322f1f9e6c2344cd9e9f4407ee57", size = 250538, upload-time = "2025-12-08T13:12:59.254Z" }, + { url = "https://files.pythonhosted.org/packages/d6/75/074476d64248fbadf16dfafbf93fdcede389ec821f74ca858d7c87d2a98c/coverage-7.13.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:9b01c22bc74a7fb44066aaf765224c0d933ddf1f5047d6cdfe4795504a4493f8", size = 251912, upload-time = "2025-12-08T13:13:00.604Z" }, + { url = "https://files.pythonhosted.org/packages/f2/d2/aa4f8acd1f7c06024705c12609d8698c51b27e4d635d717cd1934c9668e2/coverage-7.13.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:898cce66d0836973f48dda4e3514d863d70142bdf6dfab932b9b6a90ea5b222d", size = 250054, upload-time = "2025-12-08T13:13:01.892Z" }, + { url = "https://files.pythonhosted.org/packages/19/98/8df9e1af6a493b03694a1e8070e024e7d2cdc77adedc225a35e616d505de/coverage-7.13.0-cp312-cp312-musllinux_1_2_riscv64.whl", hash = "sha256:3ab483ea0e251b5790c2aac03acde31bff0c736bf8a86829b89382b407cd1c3b", size = 249619, upload-time = "2025-12-08T13:13:03.236Z" }, + { url = "https://files.pythonhosted.org/packages/d8/71/f8679231f3353018ca66ef647fa6fe7b77e6bff7845be54ab84f86233363/coverage-7.13.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:1d84e91521c5e4cb6602fe11ece3e1de03b2760e14ae4fcf1a4b56fa3c801fcd", size = 251496, upload-time = "2025-12-08T13:13:04.511Z" }, + { url = "https://files.pythonhosted.org/packages/04/86/9cb406388034eaf3c606c22094edbbb82eea1fa9d20c0e9efadff20d0733/coverage-7.13.0-cp312-cp312-win32.whl", hash = "sha256:193c3887285eec1dbdb3f2bd7fbc351d570ca9c02ca756c3afbc71b3c98af6ef", size = 220808, upload-time = "2025-12-08T13:13:06.422Z" }, + { url = "https://files.pythonhosted.org/packages/1c/59/af483673df6455795daf5f447c2f81a3d2fcfc893a22b8ace983791f6f34/coverage-7.13.0-cp312-cp312-win_amd64.whl", hash = "sha256:4f3e223b2b2db5e0db0c2b97286aba0036ca000f06aca9b12112eaa9af3d92ae", size = 221616, upload-time = "2025-12-08T13:13:07.95Z" }, + { url = "https://files.pythonhosted.org/packages/64/b0/959d582572b30a6830398c60dd419c1965ca4b5fb38ac6b7093a0d50ca8d/coverage-7.13.0-cp312-cp312-win_arm64.whl", hash = "sha256:086cede306d96202e15a4b77ace8472e39d9f4e5f9fd92dd4fecdfb2313b2080", size = 220261, upload-time = "2025-12-08T13:13:09.581Z" }, + { url = "https://files.pythonhosted.org/packages/7c/cc/bce226595eb3bf7d13ccffe154c3c487a22222d87ff018525ab4dd2e9542/coverage-7.13.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:28ee1c96109974af104028a8ef57cec21447d42d0e937c0275329272e370ebcf", size = 218297, upload-time = "2025-12-08T13:13:10.977Z" }, + { url = "https://files.pythonhosted.org/packages/3b/9f/73c4d34600aae03447dff3d7ad1d0ac649856bfb87d1ca7d681cfc913f9e/coverage-7.13.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:d1e97353dcc5587b85986cda4ff3ec98081d7e84dd95e8b2a6d59820f0545f8a", size = 218673, upload-time = "2025-12-08T13:13:12.562Z" }, + { url = "https://files.pythonhosted.org/packages/63/ab/8fa097db361a1e8586535ae5073559e6229596b3489ec3ef2f5b38df8cb2/coverage-7.13.0-cp313-cp313-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:99acd4dfdfeb58e1937629eb1ab6ab0899b131f183ee5f23e0b5da5cba2fec74", size = 249652, upload-time = "2025-12-08T13:13:13.909Z" }, + { url = "https://files.pythonhosted.org/packages/90/3a/9bfd4de2ff191feb37ef9465855ca56a6f2f30a3bca172e474130731ac3d/coverage-7.13.0-cp313-cp313-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:ff45e0cd8451e293b63ced93161e189780baf444119391b3e7d25315060368a6", size = 252251, upload-time = "2025-12-08T13:13:15.553Z" }, + { url = "https://files.pythonhosted.org/packages/df/61/b5d8105f016e1b5874af0d7c67542da780ccd4a5f2244a433d3e20ceb1ad/coverage-7.13.0-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:f4f72a85316d8e13234cafe0a9f81b40418ad7a082792fa4165bd7d45d96066b", size = 253492, upload-time = "2025-12-08T13:13:16.849Z" }, + { url = "https://files.pythonhosted.org/packages/f3/b8/0fad449981803cc47a4694768b99823fb23632150743f9c83af329bb6090/coverage-7.13.0-cp313-cp313-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:11c21557d0e0a5a38632cbbaca5f008723b26a89d70db6315523df6df77d6232", size = 249850, upload-time = "2025-12-08T13:13:18.142Z" }, + { url = "https://files.pythonhosted.org/packages/9a/e9/8d68337c3125014d918cf4327d5257553a710a2995a6a6de2ac77e5aa429/coverage-7.13.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:76541dc8d53715fb4f7a3a06b34b0dc6846e3c69bc6204c55653a85dd6220971", size = 251633, upload-time = "2025-12-08T13:13:19.56Z" }, + { url = "https://files.pythonhosted.org/packages/55/14/d4112ab26b3a1bc4b3c1295d8452dcf399ed25be4cf649002fb3e64b2d93/coverage-7.13.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:6e9e451dee940a86789134b6b0ffbe31c454ade3b849bb8a9d2cca2541a8e91d", size = 249586, upload-time = "2025-12-08T13:13:20.883Z" }, + { url = "https://files.pythonhosted.org/packages/2c/a9/22b0000186db663b0d82f86c2f1028099ae9ac202491685051e2a11a5218/coverage-7.13.0-cp313-cp313-musllinux_1_2_riscv64.whl", hash = "sha256:5c67dace46f361125e6b9cace8fe0b729ed8479f47e70c89b838d319375c8137", size = 249412, upload-time = "2025-12-08T13:13:22.22Z" }, + { url = "https://files.pythonhosted.org/packages/a1/2e/42d8e0d9e7527fba439acdc6ed24a2b97613b1dc85849b1dd935c2cffef0/coverage-7.13.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:f59883c643cb19630500f57016f76cfdcd6845ca8c5b5ea1f6e17f74c8e5f511", size = 251191, upload-time = "2025-12-08T13:13:23.899Z" }, + { url = "https://files.pythonhosted.org/packages/a4/af/8c7af92b1377fd8860536aadd58745119252aaaa71a5213e5a8e8007a9f5/coverage-7.13.0-cp313-cp313-win32.whl", hash = "sha256:58632b187be6f0be500f553be41e277712baa278147ecb7559983c6d9faf7ae1", size = 220829, upload-time = "2025-12-08T13:13:25.182Z" }, + { url = "https://files.pythonhosted.org/packages/58/f9/725e8bf16f343d33cbe076c75dc8370262e194ff10072c0608b8e5cf33a3/coverage-7.13.0-cp313-cp313-win_amd64.whl", hash = "sha256:73419b89f812f498aca53f757dd834919b48ce4799f9d5cad33ca0ae442bdb1a", size = 221640, upload-time = "2025-12-08T13:13:26.836Z" }, + { url = "https://files.pythonhosted.org/packages/8a/ff/e98311000aa6933cc79274e2b6b94a2fe0fe3434fca778eba82003675496/coverage-7.13.0-cp313-cp313-win_arm64.whl", hash = "sha256:eb76670874fdd6091eedcc856128ee48c41a9bbbb9c3f1c7c3cf169290e3ffd6", size = 220269, upload-time = "2025-12-08T13:13:28.116Z" }, + { url = "https://files.pythonhosted.org/packages/cf/cf/bbaa2e1275b300343ea865f7d424cc0a2e2a1df6925a070b2b2d5d765330/coverage-7.13.0-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:6e63ccc6e0ad8986386461c3c4b737540f20426e7ec932f42e030320896c311a", size = 218990, upload-time = "2025-12-08T13:13:29.463Z" }, + { url = "https://files.pythonhosted.org/packages/21/1d/82f0b3323b3d149d7672e7744c116e9c170f4957e0c42572f0366dbb4477/coverage-7.13.0-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:494f5459ffa1bd45e18558cd98710c36c0b8fbfa82a5eabcbe671d80ecffbfe8", size = 219340, upload-time = "2025-12-08T13:13:31.524Z" }, + { url = "https://files.pythonhosted.org/packages/fb/e3/fe3fd4702a3832a255f4d43013eacb0ef5fc155a5960ea9269d8696db28b/coverage-7.13.0-cp313-cp313t-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:06cac81bf10f74034e055e903f5f946e3e26fc51c09fc9f584e4a1605d977053", size = 260638, upload-time = "2025-12-08T13:13:32.965Z" }, + { url = "https://files.pythonhosted.org/packages/ad/01/63186cb000307f2b4da463f72af9b85d380236965574c78e7e27680a2593/coverage-7.13.0-cp313-cp313t-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:f2ffc92b46ed6e6760f1d47a71e56b5664781bc68986dbd1836b2b70c0ce2071", size = 262705, upload-time = "2025-12-08T13:13:34.378Z" }, + { url = "https://files.pythonhosted.org/packages/7c/a1/c0dacef0cc865f2455d59eed3548573ce47ed603205ffd0735d1d78b5906/coverage-7.13.0-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:0602f701057c6823e5db1b74530ce85f17c3c5be5c85fc042ac939cbd909426e", size = 265125, upload-time = "2025-12-08T13:13:35.73Z" }, + { url = "https://files.pythonhosted.org/packages/ef/92/82b99223628b61300bd382c205795533bed021505eab6dd86e11fb5d7925/coverage-7.13.0-cp313-cp313t-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:25dc33618d45456ccb1d37bce44bc78cf269909aa14c4db2e03d63146a8a1493", size = 259844, upload-time = "2025-12-08T13:13:37.69Z" }, + { url = "https://files.pythonhosted.org/packages/cf/2c/89b0291ae4e6cd59ef042708e1c438e2290f8c31959a20055d8768349ee2/coverage-7.13.0-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:71936a8b3b977ddd0b694c28c6a34f4fff2e9dd201969a4ff5d5fc7742d614b0", size = 262700, upload-time = "2025-12-08T13:13:39.525Z" }, + { url = "https://files.pythonhosted.org/packages/bf/f9/a5f992efae1996245e796bae34ceb942b05db275e4b34222a9a40b9fbd3b/coverage-7.13.0-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:936bc20503ce24770c71938d1369461f0c5320830800933bc3956e2a4ded930e", size = 260321, upload-time = "2025-12-08T13:13:41.172Z" }, + { url = "https://files.pythonhosted.org/packages/4c/89/a29f5d98c64fedbe32e2ac3c227fbf78edc01cc7572eee17d61024d89889/coverage-7.13.0-cp313-cp313t-musllinux_1_2_riscv64.whl", hash = "sha256:af0a583efaacc52ae2521f8d7910aff65cdb093091d76291ac5820d5e947fc1c", size = 259222, upload-time = "2025-12-08T13:13:43.282Z" }, + { url = "https://files.pythonhosted.org/packages/b3/c3/940fe447aae302a6701ee51e53af7e08b86ff6eed7631e5740c157ee22b9/coverage-7.13.0-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:f1c23e24a7000da892a312fb17e33c5f94f8b001de44b7cf8ba2e36fbd15859e", size = 261411, upload-time = "2025-12-08T13:13:44.72Z" }, + { url = "https://files.pythonhosted.org/packages/eb/31/12a4aec689cb942a89129587860ed4d0fd522d5fda81237147fde554b8ae/coverage-7.13.0-cp313-cp313t-win32.whl", hash = "sha256:5f8a0297355e652001015e93be345ee54393e45dc3050af4a0475c5a2b767d46", size = 221505, upload-time = "2025-12-08T13:13:46.332Z" }, + { url = "https://files.pythonhosted.org/packages/65/8c/3b5fe3259d863572d2b0827642c50c3855d26b3aefe80bdc9eba1f0af3b0/coverage-7.13.0-cp313-cp313t-win_amd64.whl", hash = "sha256:6abb3a4c52f05e08460bd9acf04fec027f8718ecaa0d09c40ffbc3fbd70ecc39", size = 222569, upload-time = "2025-12-08T13:13:47.79Z" }, + { url = "https://files.pythonhosted.org/packages/b0/39/f71fa8316a96ac72fc3908839df651e8eccee650001a17f2c78cdb355624/coverage-7.13.0-cp313-cp313t-win_arm64.whl", hash = "sha256:3ad968d1e3aa6ce5be295ab5fe3ae1bf5bb4769d0f98a80a0252d543a2ef2e9e", size = 220841, upload-time = "2025-12-08T13:13:49.243Z" }, + { url = "https://files.pythonhosted.org/packages/f8/4b/9b54bedda55421449811dcd5263a2798a63f48896c24dfb92b0f1b0845bd/coverage-7.13.0-cp314-cp314-macosx_10_15_x86_64.whl", hash = "sha256:453b7ec753cf5e4356e14fe858064e5520c460d3bbbcb9c35e55c0d21155c256", size = 218343, upload-time = "2025-12-08T13:13:50.811Z" }, + { url = "https://files.pythonhosted.org/packages/59/df/c3a1f34d4bba2e592c8979f924da4d3d4598b0df2392fbddb7761258e3dc/coverage-7.13.0-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:af827b7cbb303e1befa6c4f94fd2bf72f108089cfa0f8abab8f4ca553cf5ca5a", size = 218672, upload-time = "2025-12-08T13:13:52.284Z" }, + { url = "https://files.pythonhosted.org/packages/07/62/eec0659e47857698645ff4e6ad02e30186eb8afd65214fd43f02a76537cb/coverage-7.13.0-cp314-cp314-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:9987a9e4f8197a1000280f7cc089e3ea2c8b3c0a64d750537809879a7b4ceaf9", size = 249715, upload-time = "2025-12-08T13:13:53.791Z" }, + { url = "https://files.pythonhosted.org/packages/23/2d/3c7ff8b2e0e634c1f58d095f071f52ed3c23ff25be524b0ccae8b71f99f8/coverage-7.13.0-cp314-cp314-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:3188936845cd0cb114fa6a51842a304cdbac2958145d03be2377ec41eb285d19", size = 252225, upload-time = "2025-12-08T13:13:55.274Z" }, + { url = "https://files.pythonhosted.org/packages/aa/ac/fb03b469d20e9c9a81093575003f959cf91a4a517b783aab090e4538764b/coverage-7.13.0-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:a2bdb3babb74079f021696cb46b8bb5f5661165c385d3a238712b031a12355be", size = 253559, upload-time = "2025-12-08T13:13:57.161Z" }, + { url = "https://files.pythonhosted.org/packages/29/62/14afa9e792383c66cc0a3b872a06ded6e4ed1079c7d35de274f11d27064e/coverage-7.13.0-cp314-cp314-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:7464663eaca6adba4175f6c19354feea61ebbdd735563a03d1e472c7072d27bb", size = 249724, upload-time = "2025-12-08T13:13:58.692Z" }, + { url = "https://files.pythonhosted.org/packages/31/b7/333f3dab2939070613696ab3ee91738950f0467778c6e5a5052e840646b7/coverage-7.13.0-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:8069e831f205d2ff1f3d355e82f511eb7c5522d7d413f5db5756b772ec8697f8", size = 251582, upload-time = "2025-12-08T13:14:00.642Z" }, + { url = "https://files.pythonhosted.org/packages/81/cb/69162bda9381f39b2287265d7e29ee770f7c27c19f470164350a38318764/coverage-7.13.0-cp314-cp314-musllinux_1_2_i686.whl", hash = "sha256:6fb2d5d272341565f08e962cce14cdf843a08ac43bd621783527adb06b089c4b", size = 249538, upload-time = "2025-12-08T13:14:02.556Z" }, + { url = "https://files.pythonhosted.org/packages/e0/76/350387b56a30f4970abe32b90b2a434f87d29f8b7d4ae40d2e8a85aacfb3/coverage-7.13.0-cp314-cp314-musllinux_1_2_riscv64.whl", hash = "sha256:5e70f92ef89bac1ac8a99b3324923b4749f008fdbd7aa9cb35e01d7a284a04f9", size = 249349, upload-time = "2025-12-08T13:14:04.015Z" }, + { url = "https://files.pythonhosted.org/packages/86/0d/7f6c42b8d59f4c7e43ea3059f573c0dcfed98ba46eb43c68c69e52ae095c/coverage-7.13.0-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:4b5de7d4583e60d5fd246dd57fcd3a8aa23c6e118a8c72b38adf666ba8e7e927", size = 251011, upload-time = "2025-12-08T13:14:05.505Z" }, + { url = "https://files.pythonhosted.org/packages/d7/f1/4bb2dff379721bb0b5c649d5c5eaf438462cad824acf32eb1b7ca0c7078e/coverage-7.13.0-cp314-cp314-win32.whl", hash = "sha256:a6c6e16b663be828a8f0b6c5027d36471d4a9f90d28444aa4ced4d48d7d6ae8f", size = 221091, upload-time = "2025-12-08T13:14:07.127Z" }, + { url = "https://files.pythonhosted.org/packages/ba/44/c239da52f373ce379c194b0ee3bcc121020e397242b85f99e0afc8615066/coverage-7.13.0-cp314-cp314-win_amd64.whl", hash = "sha256:0900872f2fdb3ee5646b557918d02279dc3af3dfb39029ac4e945458b13f73bc", size = 221904, upload-time = "2025-12-08T13:14:08.542Z" }, + { url = "https://files.pythonhosted.org/packages/89/1f/b9f04016d2a29c2e4a0307baefefad1a4ec5724946a2b3e482690486cade/coverage-7.13.0-cp314-cp314-win_arm64.whl", hash = "sha256:3a10260e6a152e5f03f26db4a407c4c62d3830b9af9b7c0450b183615f05d43b", size = 220480, upload-time = "2025-12-08T13:14:10.958Z" }, + { url = "https://files.pythonhosted.org/packages/16/d4/364a1439766c8e8647860584171c36010ca3226e6e45b1753b1b249c5161/coverage-7.13.0-cp314-cp314t-macosx_10_15_x86_64.whl", hash = "sha256:9097818b6cc1cfb5f174e3263eba4a62a17683bcfe5c4b5d07f4c97fa51fbf28", size = 219074, upload-time = "2025-12-08T13:14:13.345Z" }, + { url = "https://files.pythonhosted.org/packages/ce/f4/71ba8be63351e099911051b2089662c03d5671437a0ec2171823c8e03bec/coverage-7.13.0-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:0018f73dfb4301a89292c73be6ba5f58722ff79f51593352759c1790ded1cabe", size = 219342, upload-time = "2025-12-08T13:14:15.02Z" }, + { url = "https://files.pythonhosted.org/packages/5e/25/127d8ed03d7711a387d96f132589057213e3aef7475afdaa303412463f22/coverage-7.13.0-cp314-cp314t-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:166ad2a22ee770f5656e1257703139d3533b4a0b6909af67c6b4a3adc1c98657", size = 260713, upload-time = "2025-12-08T13:14:16.907Z" }, + { url = "https://files.pythonhosted.org/packages/fd/db/559fbb6def07d25b2243663b46ba9eb5a3c6586c0c6f4e62980a68f0ee1c/coverage-7.13.0-cp314-cp314t-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:f6aaef16d65d1787280943f1c8718dc32e9cf141014e4634d64446702d26e0ff", size = 262825, upload-time = "2025-12-08T13:14:18.68Z" }, + { url = "https://files.pythonhosted.org/packages/37/99/6ee5bf7eff884766edb43bd8736b5e1c5144d0fe47498c3779326fe75a35/coverage-7.13.0-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:e999e2dcc094002d6e2c7bbc1fb85b58ba4f465a760a8014d97619330cdbbbf3", size = 265233, upload-time = "2025-12-08T13:14:20.55Z" }, + { url = "https://files.pythonhosted.org/packages/d8/90/92f18fe0356ea69e1f98f688ed80cec39f44e9f09a1f26a1bbf017cc67f2/coverage-7.13.0-cp314-cp314t-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:00c3d22cf6fb1cf3bf662aaaa4e563be8243a5ed2630339069799835a9cc7f9b", size = 259779, upload-time = "2025-12-08T13:14:22.367Z" }, + { url = "https://files.pythonhosted.org/packages/90/5d/b312a8b45b37a42ea7d27d7d3ff98ade3a6c892dd48d1d503e773503373f/coverage-7.13.0-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:22ccfe8d9bb0d6134892cbe1262493a8c70d736b9df930f3f3afae0fe3ac924d", size = 262700, upload-time = "2025-12-08T13:14:24.309Z" }, + { url = "https://files.pythonhosted.org/packages/63/f8/b1d0de5c39351eb71c366f872376d09386640840a2e09b0d03973d791e20/coverage-7.13.0-cp314-cp314t-musllinux_1_2_i686.whl", hash = "sha256:9372dff5ea15930fea0445eaf37bbbafbc771a49e70c0aeed8b4e2c2614cc00e", size = 260302, upload-time = "2025-12-08T13:14:26.068Z" }, + { url = "https://files.pythonhosted.org/packages/aa/7c/d42f4435bc40c55558b3109a39e2d456cddcec37434f62a1f1230991667a/coverage-7.13.0-cp314-cp314t-musllinux_1_2_riscv64.whl", hash = "sha256:69ac2c492918c2461bc6ace42d0479638e60719f2a4ef3f0815fa2df88e9f940", size = 259136, upload-time = "2025-12-08T13:14:27.604Z" }, + { url = "https://files.pythonhosted.org/packages/b8/d3/23413241dc04d47cfe19b9a65b32a2edd67ecd0b817400c2843ebc58c847/coverage-7.13.0-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:739c6c051a7540608d097b8e13c76cfa85263ced467168dc6b477bae3df7d0e2", size = 261467, upload-time = "2025-12-08T13:14:29.09Z" }, + { url = "https://files.pythonhosted.org/packages/13/e6/6e063174500eee216b96272c0d1847bf215926786f85c2bd024cf4d02d2f/coverage-7.13.0-cp314-cp314t-win32.whl", hash = "sha256:fe81055d8c6c9de76d60c94ddea73c290b416e061d40d542b24a5871bad498b7", size = 221875, upload-time = "2025-12-08T13:14:31.106Z" }, + { url = "https://files.pythonhosted.org/packages/3b/46/f4fb293e4cbe3620e3ac2a3e8fd566ed33affb5861a9b20e3dd6c1896cbc/coverage-7.13.0-cp314-cp314t-win_amd64.whl", hash = "sha256:445badb539005283825959ac9fa4a28f712c214b65af3a2c464f1adc90f5fcbc", size = 222982, upload-time = "2025-12-08T13:14:33.1Z" }, + { url = "https://files.pythonhosted.org/packages/68/62/5b3b9018215ed9733fbd1ae3b2ed75c5de62c3b55377a52cae732e1b7805/coverage-7.13.0-cp314-cp314t-win_arm64.whl", hash = "sha256:de7f6748b890708578fc4b7bb967d810aeb6fcc9bff4bb77dbca77dab2f9df6a", size = 221016, upload-time = "2025-12-08T13:14:34.601Z" }, + { url = "https://files.pythonhosted.org/packages/8d/4c/1968f32fb9a2604645827e11ff84a31e59d532e01995f904723b4f5328b3/coverage-7.13.0-py3-none-any.whl", hash = "sha256:850d2998f380b1e266459ca5b47bc9e7daf9af1d070f66317972f382d46f1904", size = 210068, upload-time = "2025-12-08T13:14:36.236Z" }, +] + +[[package]] +name = "datamodel-code-generator" +version = "0.48.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "argcomplete" }, + { name = "black" }, + { name = "genson" }, + { name = "inflect" }, + { name = "isort" }, + { name = "jinja2" }, + { name = "packaging" }, + { name = "pydantic" }, + { name = "pyyaml" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/59/9e/d975995c21ecfc645c1724f836a6d53411abaa1e727872b513541edbe416/datamodel_code_generator-0.48.0.tar.gz", hash = "sha256:638a51d44c77af95f0789639c3144f9bd74429cfc0690ce12acb6038c04af6f5", size = 608754, upload-time = "2025-12-24T01:45:19.69Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/e6/e9/de87c085142bd74778940dc96b086ea6f58fa7ac9543f30642372dd2431e/datamodel_code_generator-0.48.0-py3-none-any.whl", hash = "sha256:b77c90672cc8bc7d239a875c0aefb4de7dc1e48188c0622ea1c1456ca0dd1823", size = 210004, upload-time = "2025-12-24T01:45:17.506Z" }, +] + +[[package]] +name = "distlib" +version = "0.4.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/96/8e/709914eb2b5749865801041647dc7f4e6d00b549cfe88b65ca192995f07c/distlib-0.4.0.tar.gz", hash = "sha256:feec40075be03a04501a973d81f633735b4b69f98b05450592310c0f401a4e0d", size = 614605, upload-time = "2025-07-17T16:52:00.465Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/33/6b/e0547afaf41bf2c42e52430072fa5658766e3d65bd4b03a563d1b6336f57/distlib-0.4.0-py2.py3-none-any.whl", hash = "sha256:9659f7d87e46584a30b5780e43ac7a2143098441670ff0a49d5f9034c54a6c16", size = 469047, upload-time = "2025-07-17T16:51:58.613Z" }, +] + +[[package]] +name = "editorconfig" +version = "0.17.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/88/3a/a61d9a1f319a186b05d14df17daea42fcddea63c213bcd61a929fb3a6796/editorconfig-0.17.1.tar.gz", hash = "sha256:23c08b00e8e08cc3adcddb825251c497478df1dada6aefeb01e626ad37303745", size = 14695, upload-time = "2025-06-09T08:21:37.097Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/96/fd/a40c621ff207f3ce8e484aa0fc8ba4eb6e3ecf52e15b42ba764b457a9550/editorconfig-0.17.1-py3-none-any.whl", hash = "sha256:1eda9c2c0db8c16dbd50111b710572a5e6de934e39772de1959d41f64fc17c82", size = 16360, upload-time = "2025-06-09T08:21:35.654Z" }, +] + +[[package]] +name = "execnet" +version = "2.1.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/bf/89/780e11f9588d9e7128a3f87788354c7946a9cbb1401ad38a48c4db9a4f07/execnet-2.1.2.tar.gz", hash = "sha256:63d83bfdd9a23e35b9c6a3261412324f964c2ec8dcd8d3c6916ee9373e0befcd", size = 166622, upload-time = "2025-11-12T09:56:37.75Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ab/84/02fc1827e8cdded4aa65baef11296a9bbe595c474f0d6d758af082d849fd/execnet-2.1.2-py3-none-any.whl", hash = "sha256:67fba928dd5a544b783f6056f449e5e3931a5c378b128bc18501f7ea79e296ec", size = 40708, upload-time = "2025-11-12T09:56:36.333Z" }, +] + +[[package]] +name = "executing" +version = "2.2.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/cc/28/c14e053b6762b1044f34a13aab6859bbf40456d37d23aa286ac24cfd9a5d/executing-2.2.1.tar.gz", hash = "sha256:3632cc370565f6648cc328b32435bd120a1e4ebb20c77e3fdde9a13cd1e533c4", size = 1129488, upload-time = "2025-09-01T09:48:10.866Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/c1/ea/53f2148663b321f21b5a606bd5f191517cf40b7072c0497d3c92c4a13b1e/executing-2.2.1-py2.py3-none-any.whl", hash = "sha256:760643d3452b4d777d295bb167ccc74c64a81df23fb5e08eff250c425a4b2017", size = 28317, upload-time = "2025-09-01T09:48:08.5Z" }, +] + +[[package]] +name = "filelock" +version = "3.20.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/a7/23/ce7a1126827cedeb958fc043d61745754464eb56c5937c35bbf2b8e26f34/filelock-3.20.1.tar.gz", hash = "sha256:b8360948b351b80f420878d8516519a2204b07aefcdcfd24912a5d33127f188c", size = 19476, upload-time = "2025-12-15T23:54:28.027Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/e3/7f/a1a97644e39e7316d850784c642093c99df1290a460df4ede27659056834/filelock-3.20.1-py3-none-any.whl", hash = "sha256:15d9e9a67306188a44baa72f569d2bfd803076269365fdea0934385da4dc361a", size = 16666, upload-time = "2025-12-15T23:54:26.874Z" }, +] + +[[package]] +name = "frozenlist" +version = "1.8.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/2d/f5/c831fac6cc817d26fd54c7eaccd04ef7e0288806943f7cc5bbf69f3ac1f0/frozenlist-1.8.0.tar.gz", hash = "sha256:3ede829ed8d842f6cd48fc7081d7a41001a56f1f38603f9d49bf3020d59a31ad", size = 45875, upload-time = "2025-10-06T05:38:17.865Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/69/29/948b9aa87e75820a38650af445d2ef2b6b8a6fab1a23b6bb9e4ef0be2d59/frozenlist-1.8.0-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:78f7b9e5d6f2fdb88cdde9440dc147259b62b9d3b019924def9f6478be254ac1", size = 87782, upload-time = "2025-10-06T05:36:06.649Z" }, + { url = "https://files.pythonhosted.org/packages/64/80/4f6e318ee2a7c0750ed724fa33a4bdf1eacdc5a39a7a24e818a773cd91af/frozenlist-1.8.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:229bf37d2e4acdaf808fd3f06e854a4a7a3661e871b10dc1f8f1896a3b05f18b", size = 50594, upload-time = "2025-10-06T05:36:07.69Z" }, + { url = "https://files.pythonhosted.org/packages/2b/94/5c8a2b50a496b11dd519f4a24cb5496cf125681dd99e94c604ccdea9419a/frozenlist-1.8.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:f833670942247a14eafbb675458b4e61c82e002a148f49e68257b79296e865c4", size = 50448, upload-time = "2025-10-06T05:36:08.78Z" }, + { url = "https://files.pythonhosted.org/packages/6a/bd/d91c5e39f490a49df14320f4e8c80161cfcce09f1e2cde1edd16a551abb3/frozenlist-1.8.0-cp312-cp312-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:494a5952b1c597ba44e0e78113a7266e656b9794eec897b19ead706bd7074383", size = 242411, upload-time = "2025-10-06T05:36:09.801Z" }, + { url = "https://files.pythonhosted.org/packages/8f/83/f61505a05109ef3293dfb1ff594d13d64a2324ac3482be2cedc2be818256/frozenlist-1.8.0-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:96f423a119f4777a4a056b66ce11527366a8bb92f54e541ade21f2374433f6d4", size = 243014, upload-time = "2025-10-06T05:36:11.394Z" }, + { url = "https://files.pythonhosted.org/packages/d8/cb/cb6c7b0f7d4023ddda30cf56b8b17494eb3a79e3fda666bf735f63118b35/frozenlist-1.8.0-cp312-cp312-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:3462dd9475af2025c31cc61be6652dfa25cbfb56cbbf52f4ccfe029f38decaf8", size = 234909, upload-time = "2025-10-06T05:36:12.598Z" }, + { url = "https://files.pythonhosted.org/packages/31/c5/cd7a1f3b8b34af009fb17d4123c5a778b44ae2804e3ad6b86204255f9ec5/frozenlist-1.8.0-cp312-cp312-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:c4c800524c9cd9bac5166cd6f55285957fcfc907db323e193f2afcd4d9abd69b", size = 250049, upload-time = "2025-10-06T05:36:14.065Z" }, + { url = "https://files.pythonhosted.org/packages/c0/01/2f95d3b416c584a1e7f0e1d6d31998c4a795f7544069ee2e0962a4b60740/frozenlist-1.8.0-cp312-cp312-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:d6a5df73acd3399d893dafc71663ad22534b5aa4f94e8a2fabfe856c3c1b6a52", size = 256485, upload-time = "2025-10-06T05:36:15.39Z" }, + { url = "https://files.pythonhosted.org/packages/ce/03/024bf7720b3abaebcff6d0793d73c154237b85bdf67b7ed55e5e9596dc9a/frozenlist-1.8.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:405e8fe955c2280ce66428b3ca55e12b3c4e9c336fb2103a4937e891c69a4a29", size = 237619, upload-time = "2025-10-06T05:36:16.558Z" }, + { url = "https://files.pythonhosted.org/packages/69/fa/f8abdfe7d76b731f5d8bd217827cf6764d4f1d9763407e42717b4bed50a0/frozenlist-1.8.0-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:908bd3f6439f2fef9e85031b59fd4f1297af54415fb60e4254a95f75b3cab3f3", size = 250320, upload-time = "2025-10-06T05:36:17.821Z" }, + { url = "https://files.pythonhosted.org/packages/f5/3c/b051329f718b463b22613e269ad72138cc256c540f78a6de89452803a47d/frozenlist-1.8.0-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:294e487f9ec720bd8ffcebc99d575f7eff3568a08a253d1ee1a0378754b74143", size = 246820, upload-time = "2025-10-06T05:36:19.046Z" }, + { url = "https://files.pythonhosted.org/packages/0f/ae/58282e8f98e444b3f4dd42448ff36fa38bef29e40d40f330b22e7108f565/frozenlist-1.8.0-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:74c51543498289c0c43656701be6b077f4b265868fa7f8a8859c197006efb608", size = 250518, upload-time = "2025-10-06T05:36:20.763Z" }, + { url = "https://files.pythonhosted.org/packages/8f/96/007e5944694d66123183845a106547a15944fbbb7154788cbf7272789536/frozenlist-1.8.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:776f352e8329135506a1d6bf16ac3f87bc25b28e765949282dcc627af36123aa", size = 239096, upload-time = "2025-10-06T05:36:22.129Z" }, + { url = "https://files.pythonhosted.org/packages/66/bb/852b9d6db2fa40be96f29c0d1205c306288f0684df8fd26ca1951d461a56/frozenlist-1.8.0-cp312-cp312-win32.whl", hash = "sha256:433403ae80709741ce34038da08511d4a77062aa924baf411ef73d1146e74faf", size = 39985, upload-time = "2025-10-06T05:36:23.661Z" }, + { url = "https://files.pythonhosted.org/packages/b8/af/38e51a553dd66eb064cdf193841f16f077585d4d28394c2fa6235cb41765/frozenlist-1.8.0-cp312-cp312-win_amd64.whl", hash = "sha256:34187385b08f866104f0c0617404c8eb08165ab1272e884abc89c112e9c00746", size = 44591, upload-time = "2025-10-06T05:36:24.958Z" }, + { url = "https://files.pythonhosted.org/packages/a7/06/1dc65480ab147339fecc70797e9c2f69d9cea9cf38934ce08df070fdb9cb/frozenlist-1.8.0-cp312-cp312-win_arm64.whl", hash = "sha256:fe3c58d2f5db5fbd18c2987cba06d51b0529f52bc3a6cdc33d3f4eab725104bd", size = 40102, upload-time = "2025-10-06T05:36:26.333Z" }, + { url = "https://files.pythonhosted.org/packages/2d/40/0832c31a37d60f60ed79e9dfb5a92e1e2af4f40a16a29abcc7992af9edff/frozenlist-1.8.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:8d92f1a84bb12d9e56f818b3a746f3efba93c1b63c8387a73dde655e1e42282a", size = 85717, upload-time = "2025-10-06T05:36:27.341Z" }, + { url = "https://files.pythonhosted.org/packages/30/ba/b0b3de23f40bc55a7057bd38434e25c34fa48e17f20ee273bbde5e0650f3/frozenlist-1.8.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:96153e77a591c8adc2ee805756c61f59fef4cf4073a9275ee86fe8cba41241f7", size = 49651, upload-time = "2025-10-06T05:36:28.855Z" }, + { url = "https://files.pythonhosted.org/packages/0c/ab/6e5080ee374f875296c4243c381bbdef97a9ac39c6e3ce1d5f7d42cb78d6/frozenlist-1.8.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:f21f00a91358803399890ab167098c131ec2ddd5f8f5fd5fe9c9f2c6fcd91e40", size = 49417, upload-time = "2025-10-06T05:36:29.877Z" }, + { url = "https://files.pythonhosted.org/packages/d5/4e/e4691508f9477ce67da2015d8c00acd751e6287739123113a9fca6f1604e/frozenlist-1.8.0-cp313-cp313-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:fb30f9626572a76dfe4293c7194a09fb1fe93ba94c7d4f720dfae3b646b45027", size = 234391, upload-time = "2025-10-06T05:36:31.301Z" }, + { url = "https://files.pythonhosted.org/packages/40/76/c202df58e3acdf12969a7895fd6f3bc016c642e6726aa63bd3025e0fc71c/frozenlist-1.8.0-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:eaa352d7047a31d87dafcacbabe89df0aa506abb5b1b85a2fb91bc3faa02d822", size = 233048, upload-time = "2025-10-06T05:36:32.531Z" }, + { url = "https://files.pythonhosted.org/packages/f9/c0/8746afb90f17b73ca5979c7a3958116e105ff796e718575175319b5bb4ce/frozenlist-1.8.0-cp313-cp313-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:03ae967b4e297f58f8c774c7eabcce57fe3c2434817d4385c50661845a058121", size = 226549, upload-time = "2025-10-06T05:36:33.706Z" }, + { url = "https://files.pythonhosted.org/packages/7e/eb/4c7eefc718ff72f9b6c4893291abaae5fbc0c82226a32dcd8ef4f7a5dbef/frozenlist-1.8.0-cp313-cp313-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:f6292f1de555ffcc675941d65fffffb0a5bcd992905015f85d0592201793e0e5", size = 239833, upload-time = "2025-10-06T05:36:34.947Z" }, + { url = "https://files.pythonhosted.org/packages/c2/4e/e5c02187cf704224f8b21bee886f3d713ca379535f16893233b9d672ea71/frozenlist-1.8.0-cp313-cp313-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:29548f9b5b5e3460ce7378144c3010363d8035cea44bc0bf02d57f5a685e084e", size = 245363, upload-time = "2025-10-06T05:36:36.534Z" }, + { url = "https://files.pythonhosted.org/packages/1f/96/cb85ec608464472e82ad37a17f844889c36100eed57bea094518bf270692/frozenlist-1.8.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:ec3cc8c5d4084591b4237c0a272cc4f50a5b03396a47d9caaf76f5d7b38a4f11", size = 229314, upload-time = "2025-10-06T05:36:38.582Z" }, + { url = "https://files.pythonhosted.org/packages/5d/6f/4ae69c550e4cee66b57887daeebe006fe985917c01d0fff9caab9883f6d0/frozenlist-1.8.0-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:517279f58009d0b1f2e7c1b130b377a349405da3f7621ed6bfae50b10adf20c1", size = 243365, upload-time = "2025-10-06T05:36:40.152Z" }, + { url = "https://files.pythonhosted.org/packages/7a/58/afd56de246cf11780a40a2c28dc7cbabbf06337cc8ddb1c780a2d97e88d8/frozenlist-1.8.0-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:db1e72ede2d0d7ccb213f218df6a078a9c09a7de257c2fe8fcef16d5925230b1", size = 237763, upload-time = "2025-10-06T05:36:41.355Z" }, + { url = "https://files.pythonhosted.org/packages/cb/36/cdfaf6ed42e2644740d4a10452d8e97fa1c062e2a8006e4b09f1b5fd7d63/frozenlist-1.8.0-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:b4dec9482a65c54a5044486847b8a66bf10c9cb4926d42927ec4e8fd5db7fed8", size = 240110, upload-time = "2025-10-06T05:36:42.716Z" }, + { url = "https://files.pythonhosted.org/packages/03/a8/9ea226fbefad669f11b52e864c55f0bd57d3c8d7eb07e9f2e9a0b39502e1/frozenlist-1.8.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:21900c48ae04d13d416f0e1e0c4d81f7931f73a9dfa0b7a8746fb2fe7dd970ed", size = 233717, upload-time = "2025-10-06T05:36:44.251Z" }, + { url = "https://files.pythonhosted.org/packages/1e/0b/1b5531611e83ba7d13ccc9988967ea1b51186af64c42b7a7af465dcc9568/frozenlist-1.8.0-cp313-cp313-win32.whl", hash = "sha256:8b7b94a067d1c504ee0b16def57ad5738701e4ba10cec90529f13fa03c833496", size = 39628, upload-time = "2025-10-06T05:36:45.423Z" }, + { url = "https://files.pythonhosted.org/packages/d8/cf/174c91dbc9cc49bc7b7aab74d8b734e974d1faa8f191c74af9b7e80848e6/frozenlist-1.8.0-cp313-cp313-win_amd64.whl", hash = "sha256:878be833caa6a3821caf85eb39c5ba92d28e85df26d57afb06b35b2efd937231", size = 43882, upload-time = "2025-10-06T05:36:46.796Z" }, + { url = "https://files.pythonhosted.org/packages/c1/17/502cd212cbfa96eb1388614fe39a3fc9ab87dbbe042b66f97acb57474834/frozenlist-1.8.0-cp313-cp313-win_arm64.whl", hash = "sha256:44389d135b3ff43ba8cc89ff7f51f5a0bb6b63d829c8300f79a2fe4fe61bcc62", size = 39676, upload-time = "2025-10-06T05:36:47.8Z" }, + { url = "https://files.pythonhosted.org/packages/d2/5c/3bbfaa920dfab09e76946a5d2833a7cbdf7b9b4a91c714666ac4855b88b4/frozenlist-1.8.0-cp313-cp313t-macosx_10_13_universal2.whl", hash = "sha256:e25ac20a2ef37e91c1b39938b591457666a0fa835c7783c3a8f33ea42870db94", size = 89235, upload-time = "2025-10-06T05:36:48.78Z" }, + { url = "https://files.pythonhosted.org/packages/d2/d6/f03961ef72166cec1687e84e8925838442b615bd0b8854b54923ce5b7b8a/frozenlist-1.8.0-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:07cdca25a91a4386d2e76ad992916a85038a9b97561bf7a3fd12d5d9ce31870c", size = 50742, upload-time = "2025-10-06T05:36:49.837Z" }, + { url = "https://files.pythonhosted.org/packages/1e/bb/a6d12b7ba4c3337667d0e421f7181c82dda448ce4e7ad7ecd249a16fa806/frozenlist-1.8.0-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:4e0c11f2cc6717e0a741f84a527c52616140741cd812a50422f83dc31749fb52", size = 51725, upload-time = "2025-10-06T05:36:50.851Z" }, + { url = "https://files.pythonhosted.org/packages/bc/71/d1fed0ffe2c2ccd70b43714c6cab0f4188f09f8a67a7914a6b46ee30f274/frozenlist-1.8.0-cp313-cp313t-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:b3210649ee28062ea6099cfda39e147fa1bc039583c8ee4481cb7811e2448c51", size = 284533, upload-time = "2025-10-06T05:36:51.898Z" }, + { url = "https://files.pythonhosted.org/packages/c9/1f/fb1685a7b009d89f9bf78a42d94461bc06581f6e718c39344754a5d9bada/frozenlist-1.8.0-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:581ef5194c48035a7de2aefc72ac6539823bb71508189e5de01d60c9dcd5fa65", size = 292506, upload-time = "2025-10-06T05:36:53.101Z" }, + { url = "https://files.pythonhosted.org/packages/e6/3b/b991fe1612703f7e0d05c0cf734c1b77aaf7c7d321df4572e8d36e7048c8/frozenlist-1.8.0-cp313-cp313t-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:3ef2d026f16a2b1866e1d86fc4e1291e1ed8a387b2c333809419a2f8b3a77b82", size = 274161, upload-time = "2025-10-06T05:36:54.309Z" }, + { url = "https://files.pythonhosted.org/packages/ca/ec/c5c618767bcdf66e88945ec0157d7f6c4a1322f1473392319b7a2501ded7/frozenlist-1.8.0-cp313-cp313t-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:5500ef82073f599ac84d888e3a8c1f77ac831183244bfd7f11eaa0289fb30714", size = 294676, upload-time = "2025-10-06T05:36:55.566Z" }, + { url = "https://files.pythonhosted.org/packages/7c/ce/3934758637d8f8a88d11f0585d6495ef54b2044ed6ec84492a91fa3b27aa/frozenlist-1.8.0-cp313-cp313t-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:50066c3997d0091c411a66e710f4e11752251e6d2d73d70d8d5d4c76442a199d", size = 300638, upload-time = "2025-10-06T05:36:56.758Z" }, + { url = "https://files.pythonhosted.org/packages/fc/4f/a7e4d0d467298f42de4b41cbc7ddaf19d3cfeabaf9ff97c20c6c7ee409f9/frozenlist-1.8.0-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:5c1c8e78426e59b3f8005e9b19f6ff46e5845895adbde20ece9218319eca6506", size = 283067, upload-time = "2025-10-06T05:36:57.965Z" }, + { url = "https://files.pythonhosted.org/packages/dc/48/c7b163063d55a83772b268e6d1affb960771b0e203b632cfe09522d67ea5/frozenlist-1.8.0-cp313-cp313t-musllinux_1_2_armv7l.whl", hash = "sha256:eefdba20de0d938cec6a89bd4d70f346a03108a19b9df4248d3cf0d88f1b0f51", size = 292101, upload-time = "2025-10-06T05:36:59.237Z" }, + { url = "https://files.pythonhosted.org/packages/9f/d0/2366d3c4ecdc2fd391e0afa6e11500bfba0ea772764d631bbf82f0136c9d/frozenlist-1.8.0-cp313-cp313t-musllinux_1_2_ppc64le.whl", hash = "sha256:cf253e0e1c3ceb4aaff6df637ce033ff6535fb8c70a764a8f46aafd3d6ab798e", size = 289901, upload-time = "2025-10-06T05:37:00.811Z" }, + { url = "https://files.pythonhosted.org/packages/b8/94/daff920e82c1b70e3618a2ac39fbc01ae3e2ff6124e80739ce5d71c9b920/frozenlist-1.8.0-cp313-cp313t-musllinux_1_2_s390x.whl", hash = "sha256:032efa2674356903cd0261c4317a561a6850f3ac864a63fc1583147fb05a79b0", size = 289395, upload-time = "2025-10-06T05:37:02.115Z" }, + { url = "https://files.pythonhosted.org/packages/e3/20/bba307ab4235a09fdcd3cc5508dbabd17c4634a1af4b96e0f69bfe551ebd/frozenlist-1.8.0-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:6da155091429aeba16851ecb10a9104a108bcd32f6c1642867eadaee401c1c41", size = 283659, upload-time = "2025-10-06T05:37:03.711Z" }, + { url = "https://files.pythonhosted.org/packages/fd/00/04ca1c3a7a124b6de4f8a9a17cc2fcad138b4608e7a3fc5877804b8715d7/frozenlist-1.8.0-cp313-cp313t-win32.whl", hash = "sha256:0f96534f8bfebc1a394209427d0f8a63d343c9779cda6fc25e8e121b5fd8555b", size = 43492, upload-time = "2025-10-06T05:37:04.915Z" }, + { url = "https://files.pythonhosted.org/packages/59/5e/c69f733a86a94ab10f68e496dc6b7e8bc078ebb415281d5698313e3af3a1/frozenlist-1.8.0-cp313-cp313t-win_amd64.whl", hash = "sha256:5d63a068f978fc69421fb0e6eb91a9603187527c86b7cd3f534a5b77a592b888", size = 48034, upload-time = "2025-10-06T05:37:06.343Z" }, + { url = "https://files.pythonhosted.org/packages/16/6c/be9d79775d8abe79b05fa6d23da99ad6e7763a1d080fbae7290b286093fd/frozenlist-1.8.0-cp313-cp313t-win_arm64.whl", hash = "sha256:bf0a7e10b077bf5fb9380ad3ae8ce20ef919a6ad93b4552896419ac7e1d8e042", size = 41749, upload-time = "2025-10-06T05:37:07.431Z" }, + { url = "https://files.pythonhosted.org/packages/f1/c8/85da824b7e7b9b6e7f7705b2ecaf9591ba6f79c1177f324c2735e41d36a2/frozenlist-1.8.0-cp314-cp314-macosx_10_13_universal2.whl", hash = "sha256:cee686f1f4cadeb2136007ddedd0aaf928ab95216e7691c63e50a8ec066336d0", size = 86127, upload-time = "2025-10-06T05:37:08.438Z" }, + { url = "https://files.pythonhosted.org/packages/8e/e8/a1185e236ec66c20afd72399522f142c3724c785789255202d27ae992818/frozenlist-1.8.0-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:119fb2a1bd47307e899c2fac7f28e85b9a543864df47aa7ec9d3c1b4545f096f", size = 49698, upload-time = "2025-10-06T05:37:09.48Z" }, + { url = "https://files.pythonhosted.org/packages/a1/93/72b1736d68f03fda5fdf0f2180fb6caaae3894f1b854d006ac61ecc727ee/frozenlist-1.8.0-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:4970ece02dbc8c3a92fcc5228e36a3e933a01a999f7094ff7c23fbd2beeaa67c", size = 49749, upload-time = "2025-10-06T05:37:10.569Z" }, + { url = "https://files.pythonhosted.org/packages/a7/b2/fabede9fafd976b991e9f1b9c8c873ed86f202889b864756f240ce6dd855/frozenlist-1.8.0-cp314-cp314-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:cba69cb73723c3f329622e34bdbf5ce1f80c21c290ff04256cff1cd3c2036ed2", size = 231298, upload-time = "2025-10-06T05:37:11.993Z" }, + { url = "https://files.pythonhosted.org/packages/3a/3b/d9b1e0b0eed36e70477ffb8360c49c85c8ca8ef9700a4e6711f39a6e8b45/frozenlist-1.8.0-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:778a11b15673f6f1df23d9586f83c4846c471a8af693a22e066508b77d201ec8", size = 232015, upload-time = "2025-10-06T05:37:13.194Z" }, + { url = "https://files.pythonhosted.org/packages/dc/94/be719d2766c1138148564a3960fc2c06eb688da592bdc25adcf856101be7/frozenlist-1.8.0-cp314-cp314-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:0325024fe97f94c41c08872db482cf8ac4800d80e79222c6b0b7b162d5b13686", size = 225038, upload-time = "2025-10-06T05:37:14.577Z" }, + { url = "https://files.pythonhosted.org/packages/e4/09/6712b6c5465f083f52f50cf74167b92d4ea2f50e46a9eea0523d658454ae/frozenlist-1.8.0-cp314-cp314-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:97260ff46b207a82a7567b581ab4190bd4dfa09f4db8a8b49d1a958f6aa4940e", size = 240130, upload-time = "2025-10-06T05:37:15.781Z" }, + { url = "https://files.pythonhosted.org/packages/f8/d4/cd065cdcf21550b54f3ce6a22e143ac9e4836ca42a0de1022da8498eac89/frozenlist-1.8.0-cp314-cp314-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:54b2077180eb7f83dd52c40b2750d0a9f175e06a42e3213ce047219de902717a", size = 242845, upload-time = "2025-10-06T05:37:17.037Z" }, + { url = "https://files.pythonhosted.org/packages/62/c3/f57a5c8c70cd1ead3d5d5f776f89d33110b1addae0ab010ad774d9a44fb9/frozenlist-1.8.0-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:2f05983daecab868a31e1da44462873306d3cbfd76d1f0b5b69c473d21dbb128", size = 229131, upload-time = "2025-10-06T05:37:18.221Z" }, + { url = "https://files.pythonhosted.org/packages/6c/52/232476fe9cb64f0742f3fde2b7d26c1dac18b6d62071c74d4ded55e0ef94/frozenlist-1.8.0-cp314-cp314-musllinux_1_2_armv7l.whl", hash = "sha256:33f48f51a446114bc5d251fb2954ab0164d5be02ad3382abcbfe07e2531d650f", size = 240542, upload-time = "2025-10-06T05:37:19.771Z" }, + { url = "https://files.pythonhosted.org/packages/5f/85/07bf3f5d0fb5414aee5f47d33c6f5c77bfe49aac680bfece33d4fdf6a246/frozenlist-1.8.0-cp314-cp314-musllinux_1_2_ppc64le.whl", hash = "sha256:154e55ec0655291b5dd1b8731c637ecdb50975a2ae70c606d100750a540082f7", size = 237308, upload-time = "2025-10-06T05:37:20.969Z" }, + { url = "https://files.pythonhosted.org/packages/11/99/ae3a33d5befd41ac0ca2cc7fd3aa707c9c324de2e89db0e0f45db9a64c26/frozenlist-1.8.0-cp314-cp314-musllinux_1_2_s390x.whl", hash = "sha256:4314debad13beb564b708b4a496020e5306c7333fa9a3ab90374169a20ffab30", size = 238210, upload-time = "2025-10-06T05:37:22.252Z" }, + { url = "https://files.pythonhosted.org/packages/b2/60/b1d2da22f4970e7a155f0adde9b1435712ece01b3cd45ba63702aea33938/frozenlist-1.8.0-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:073f8bf8becba60aa931eb3bc420b217bb7d5b8f4750e6f8b3be7f3da85d38b7", size = 231972, upload-time = "2025-10-06T05:37:23.5Z" }, + { url = "https://files.pythonhosted.org/packages/3f/ab/945b2f32de889993b9c9133216c068b7fcf257d8595a0ac420ac8677cab0/frozenlist-1.8.0-cp314-cp314-win32.whl", hash = "sha256:bac9c42ba2ac65ddc115d930c78d24ab8d4f465fd3fc473cdedfccadb9429806", size = 40536, upload-time = "2025-10-06T05:37:25.581Z" }, + { url = "https://files.pythonhosted.org/packages/59/ad/9caa9b9c836d9ad6f067157a531ac48b7d36499f5036d4141ce78c230b1b/frozenlist-1.8.0-cp314-cp314-win_amd64.whl", hash = "sha256:3e0761f4d1a44f1d1a47996511752cf3dcec5bbdd9cc2b4fe595caf97754b7a0", size = 44330, upload-time = "2025-10-06T05:37:26.928Z" }, + { url = "https://files.pythonhosted.org/packages/82/13/e6950121764f2676f43534c555249f57030150260aee9dcf7d64efda11dd/frozenlist-1.8.0-cp314-cp314-win_arm64.whl", hash = "sha256:d1eaff1d00c7751b7c6662e9c5ba6eb2c17a2306ba5e2a37f24ddf3cc953402b", size = 40627, upload-time = "2025-10-06T05:37:28.075Z" }, + { url = "https://files.pythonhosted.org/packages/c0/c7/43200656ecc4e02d3f8bc248df68256cd9572b3f0017f0a0c4e93440ae23/frozenlist-1.8.0-cp314-cp314t-macosx_10_13_universal2.whl", hash = "sha256:d3bb933317c52d7ea5004a1c442eef86f426886fba134ef8cf4226ea6ee1821d", size = 89238, upload-time = "2025-10-06T05:37:29.373Z" }, + { url = "https://files.pythonhosted.org/packages/d1/29/55c5f0689b9c0fb765055629f472c0de484dcaf0acee2f7707266ae3583c/frozenlist-1.8.0-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:8009897cdef112072f93a0efdce29cd819e717fd2f649ee3016efd3cd885a7ed", size = 50738, upload-time = "2025-10-06T05:37:30.792Z" }, + { url = "https://files.pythonhosted.org/packages/ba/7d/b7282a445956506fa11da8c2db7d276adcbf2b17d8bb8407a47685263f90/frozenlist-1.8.0-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:2c5dcbbc55383e5883246d11fd179782a9d07a986c40f49abe89ddf865913930", size = 51739, upload-time = "2025-10-06T05:37:32.127Z" }, + { url = "https://files.pythonhosted.org/packages/62/1c/3d8622e60d0b767a5510d1d3cf21065b9db874696a51ea6d7a43180a259c/frozenlist-1.8.0-cp314-cp314t-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:39ecbc32f1390387d2aa4f5a995e465e9e2f79ba3adcac92d68e3e0afae6657c", size = 284186, upload-time = "2025-10-06T05:37:33.21Z" }, + { url = "https://files.pythonhosted.org/packages/2d/14/aa36d5f85a89679a85a1d44cd7a6657e0b1c75f61e7cad987b203d2daca8/frozenlist-1.8.0-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:92db2bf818d5cc8d9c1f1fc56b897662e24ea5adb36ad1f1d82875bd64e03c24", size = 292196, upload-time = "2025-10-06T05:37:36.107Z" }, + { url = "https://files.pythonhosted.org/packages/05/23/6bde59eb55abd407d34f77d39a5126fb7b4f109a3f611d3929f14b700c66/frozenlist-1.8.0-cp314-cp314t-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:2dc43a022e555de94c3b68a4ef0b11c4f747d12c024a520c7101709a2144fb37", size = 273830, upload-time = "2025-10-06T05:37:37.663Z" }, + { url = "https://files.pythonhosted.org/packages/d2/3f/22cff331bfad7a8afa616289000ba793347fcd7bc275f3b28ecea2a27909/frozenlist-1.8.0-cp314-cp314t-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:cb89a7f2de3602cfed448095bab3f178399646ab7c61454315089787df07733a", size = 294289, upload-time = "2025-10-06T05:37:39.261Z" }, + { url = "https://files.pythonhosted.org/packages/a4/89/5b057c799de4838b6c69aa82b79705f2027615e01be996d2486a69ca99c4/frozenlist-1.8.0-cp314-cp314t-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:33139dc858c580ea50e7e60a1b0ea003efa1fd42e6ec7fdbad78fff65fad2fd2", size = 300318, upload-time = "2025-10-06T05:37:43.213Z" }, + { url = "https://files.pythonhosted.org/packages/30/de/2c22ab3eb2a8af6d69dc799e48455813bab3690c760de58e1bf43b36da3e/frozenlist-1.8.0-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:168c0969a329b416119507ba30b9ea13688fafffac1b7822802537569a1cb0ef", size = 282814, upload-time = "2025-10-06T05:37:45.337Z" }, + { url = "https://files.pythonhosted.org/packages/59/f7/970141a6a8dbd7f556d94977858cfb36fa9b66e0892c6dd780d2219d8cd8/frozenlist-1.8.0-cp314-cp314t-musllinux_1_2_armv7l.whl", hash = "sha256:28bd570e8e189d7f7b001966435f9dac6718324b5be2990ac496cf1ea9ddb7fe", size = 291762, upload-time = "2025-10-06T05:37:46.657Z" }, + { url = "https://files.pythonhosted.org/packages/c1/15/ca1adae83a719f82df9116d66f5bb28bb95557b3951903d39135620ef157/frozenlist-1.8.0-cp314-cp314t-musllinux_1_2_ppc64le.whl", hash = "sha256:b2a095d45c5d46e5e79ba1e5b9cb787f541a8dee0433836cea4b96a2c439dcd8", size = 289470, upload-time = "2025-10-06T05:37:47.946Z" }, + { url = "https://files.pythonhosted.org/packages/ac/83/dca6dc53bf657d371fbc88ddeb21b79891e747189c5de990b9dfff2ccba1/frozenlist-1.8.0-cp314-cp314t-musllinux_1_2_s390x.whl", hash = "sha256:eab8145831a0d56ec9c4139b6c3e594c7a83c2c8be25d5bcf2d86136a532287a", size = 289042, upload-time = "2025-10-06T05:37:49.499Z" }, + { url = "https://files.pythonhosted.org/packages/96/52/abddd34ca99be142f354398700536c5bd315880ed0a213812bc491cff5e4/frozenlist-1.8.0-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:974b28cf63cc99dfb2188d8d222bc6843656188164848c4f679e63dae4b0708e", size = 283148, upload-time = "2025-10-06T05:37:50.745Z" }, + { url = "https://files.pythonhosted.org/packages/af/d3/76bd4ed4317e7119c2b7f57c3f6934aba26d277acc6309f873341640e21f/frozenlist-1.8.0-cp314-cp314t-win32.whl", hash = "sha256:342c97bf697ac5480c0a7ec73cd700ecfa5a8a40ac923bd035484616efecc2df", size = 44676, upload-time = "2025-10-06T05:37:52.222Z" }, + { url = "https://files.pythonhosted.org/packages/89/76/c615883b7b521ead2944bb3480398cbb07e12b7b4e4d073d3752eb721558/frozenlist-1.8.0-cp314-cp314t-win_amd64.whl", hash = "sha256:06be8f67f39c8b1dc671f5d83aaefd3358ae5cdcf8314552c57e7ed3e6475bdd", size = 49451, upload-time = "2025-10-06T05:37:53.425Z" }, + { url = "https://files.pythonhosted.org/packages/e0/a3/5982da14e113d07b325230f95060e2169f5311b1017ea8af2a29b374c289/frozenlist-1.8.0-cp314-cp314t-win_arm64.whl", hash = "sha256:102e6314ca4da683dca92e3b1355490fed5f313b768500084fbe6371fddfdb79", size = 42507, upload-time = "2025-10-06T05:37:54.513Z" }, + { url = "https://files.pythonhosted.org/packages/9a/9a/e35b4a917281c0b8419d4207f4334c8e8c5dbf4f3f5f9ada73958d937dcc/frozenlist-1.8.0-py3-none-any.whl", hash = "sha256:0c18a16eab41e82c295618a77502e17b195883241c563b00f0aa5106fc4eaa0d", size = 13409, upload-time = "2025-10-06T05:38:16.721Z" }, +] + +[[package]] +name = "genson" +version = "1.3.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/c5/cf/2303c8ad276dcf5ee2ad6cf69c4338fd86ef0f471a5207b069adf7a393cf/genson-1.3.0.tar.gz", hash = "sha256:e02db9ac2e3fd29e65b5286f7135762e2cd8a986537c075b06fc5f1517308e37", size = 34919, upload-time = "2024-05-15T22:08:49.123Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/f8/5c/e226de133afd8bb267ec27eead9ae3d784b95b39a287ed404caab39a5f50/genson-1.3.0-py3-none-any.whl", hash = "sha256:468feccd00274cc7e4c09e84b08704270ba8d95232aa280f65b986139cec67f7", size = 21470, upload-time = "2024-05-15T22:08:47.056Z" }, +] + +[[package]] +name = "gevent" +version = "25.9.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "cffi", marker = "platform_python_implementation == 'CPython' and sys_platform == 'win32'" }, + { name = "greenlet", marker = "platform_python_implementation == 'CPython'" }, + { name = "zope-event" }, + { name = "zope-interface" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/9e/48/b3ef2673ffb940f980966694e40d6d32560f3ffa284ecaeb5ea3a90a6d3f/gevent-25.9.1.tar.gz", hash = "sha256:adf9cd552de44a4e6754c51ff2e78d9193b7fa6eab123db9578a210e657235dd", size = 5059025, upload-time = "2025-09-17T16:15:34.528Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/f7/49/e55930ba5259629eb28ac7ee1abbca971996a9165f902f0249b561602f24/gevent-25.9.1-cp312-cp312-macosx_11_0_universal2.whl", hash = "sha256:46b188248c84ffdec18a686fcac5dbb32365d76912e14fda350db5dc0bfd4f86", size = 2955991, upload-time = "2025-09-17T14:52:30.568Z" }, + { url = "https://files.pythonhosted.org/packages/aa/88/63dc9e903980e1da1e16541ec5c70f2b224ec0a8e34088cb42794f1c7f52/gevent-25.9.1-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:f2b54ea3ca6f0c763281cd3f96010ac7e98c2e267feb1221b5a26e2ca0b9a692", size = 1808503, upload-time = "2025-09-17T15:41:25.59Z" }, + { url = "https://files.pythonhosted.org/packages/7a/8d/7236c3a8f6ef7e94c22e658397009596fa90f24c7d19da11ad7ab3a9248e/gevent-25.9.1-cp312-cp312-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:7a834804ac00ed8a92a69d3826342c677be651b1c3cd66cc35df8bc711057aa2", size = 1890001, upload-time = "2025-09-17T15:49:01.227Z" }, + { url = "https://files.pythonhosted.org/packages/4f/63/0d7f38c4a2085ecce26b50492fc6161aa67250d381e26d6a7322c309b00f/gevent-25.9.1-cp312-cp312-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:323a27192ec4da6b22a9e51c3d9d896ff20bc53fdc9e45e56eaab76d1c39dd74", size = 1855335, upload-time = "2025-09-17T15:49:20.582Z" }, + { url = "https://files.pythonhosted.org/packages/95/18/da5211dfc54c7a57e7432fd9a6ffeae1ce36fe5a313fa782b1c96529ea3d/gevent-25.9.1-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:6ea78b39a2c51d47ff0f130f4c755a9a4bbb2dd9721149420ad4712743911a51", size = 2109046, upload-time = "2025-09-17T15:15:13.817Z" }, + { url = "https://files.pythonhosted.org/packages/a6/5a/7bb5ec8e43a2c6444853c4a9f955f3e72f479d7c24ea86c95fb264a2de65/gevent-25.9.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:dc45cd3e1cc07514a419960af932a62eb8515552ed004e56755e4bf20bad30c5", size = 1827099, upload-time = "2025-09-17T15:52:41.384Z" }, + { url = "https://files.pythonhosted.org/packages/ca/d4/b63a0a60635470d7d986ef19897e893c15326dd69e8fb342c76a4f07fe9e/gevent-25.9.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:34e01e50c71eaf67e92c186ee0196a039d6e4f4b35670396baed4a2d8f1b347f", size = 2172623, upload-time = "2025-09-17T15:24:12.03Z" }, + { url = "https://files.pythonhosted.org/packages/d5/98/caf06d5d22a7c129c1fb2fc1477306902a2c8ddfd399cd26bbbd4caf2141/gevent-25.9.1-cp312-cp312-win_amd64.whl", hash = "sha256:4acd6bcd5feabf22c7c5174bd3b9535ee9f088d2bbce789f740ad8d6554b18f3", size = 1682837, upload-time = "2025-09-17T19:48:47.318Z" }, + { url = "https://files.pythonhosted.org/packages/5a/77/b97f086388f87f8ad3e01364f845004aef0123d4430241c7c9b1f9bde742/gevent-25.9.1-cp313-cp313-macosx_11_0_universal2.whl", hash = "sha256:4f84591d13845ee31c13f44bdf6bd6c3dbf385b5af98b2f25ec328213775f2ed", size = 2973739, upload-time = "2025-09-17T14:53:30.279Z" }, + { url = "https://files.pythonhosted.org/packages/3c/2e/9d5f204ead343e5b27bbb2fedaec7cd0009d50696b2266f590ae845d0331/gevent-25.9.1-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:9cdbb24c276a2d0110ad5c978e49daf620b153719ac8a548ce1250a7eb1b9245", size = 1809165, upload-time = "2025-09-17T15:41:27.193Z" }, + { url = "https://files.pythonhosted.org/packages/10/3e/791d1bf1eb47748606d5f2c2aa66571f474d63e0176228b1f1fd7b77ab37/gevent-25.9.1-cp313-cp313-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:88b6c07169468af631dcf0fdd3658f9246d6822cc51461d43f7c44f28b0abb82", size = 1890638, upload-time = "2025-09-17T15:49:02.45Z" }, + { url = "https://files.pythonhosted.org/packages/f2/5c/9ad0229b2b4d81249ca41e4f91dd8057deaa0da6d4fbe40bf13cdc5f7a47/gevent-25.9.1-cp313-cp313-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:b7bb0e29a7b3e6ca9bed2394aa820244069982c36dc30b70eb1004dd67851a48", size = 1857118, upload-time = "2025-09-17T15:49:22.125Z" }, + { url = "https://files.pythonhosted.org/packages/49/2a/3010ed6c44179a3a5c5c152e6de43a30ff8bc2c8de3115ad8733533a018f/gevent-25.9.1-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:2951bb070c0ee37b632ac9134e4fdaad70d2e660c931bb792983a0837fe5b7d7", size = 2111598, upload-time = "2025-09-17T15:15:15.226Z" }, + { url = "https://files.pythonhosted.org/packages/08/75/6bbe57c19a7aa4527cc0f9afcdf5a5f2aed2603b08aadbccb5bf7f607ff4/gevent-25.9.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:e4e17c2d57e9a42e25f2a73d297b22b60b2470a74be5a515b36c984e1a246d47", size = 1829059, upload-time = "2025-09-17T15:52:42.596Z" }, + { url = "https://files.pythonhosted.org/packages/06/6e/19a9bee9092be45679cb69e4dd2e0bf5f897b7140b4b39c57cc123d24829/gevent-25.9.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:8d94936f8f8b23d9de2251798fcb603b84f083fdf0d7f427183c1828fb64f117", size = 2173529, upload-time = "2025-09-17T15:24:13.897Z" }, + { url = "https://files.pythonhosted.org/packages/ca/4f/50de9afd879440e25737e63f5ba6ee764b75a3abe17376496ab57f432546/gevent-25.9.1-cp313-cp313-win_amd64.whl", hash = "sha256:eb51c5f9537b07da673258b4832f6635014fee31690c3f0944d34741b69f92fa", size = 1681518, upload-time = "2025-09-17T19:39:47.488Z" }, + { url = "https://files.pythonhosted.org/packages/15/1a/948f8167b2cdce573cf01cec07afc64d0456dc134b07900b26ac7018b37e/gevent-25.9.1-cp314-cp314-macosx_11_0_universal2.whl", hash = "sha256:1a3fe4ea1c312dbf6b375b416925036fe79a40054e6bf6248ee46526ea628be1", size = 2982934, upload-time = "2025-09-17T14:54:11.302Z" }, + { url = "https://files.pythonhosted.org/packages/9b/ec/726b146d1d3aad82e03d2e1e1507048ab6072f906e83f97f40667866e582/gevent-25.9.1-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:0adb937f13e5fb90cca2edf66d8d7e99d62a299687400ce2edee3f3504009356", size = 1813982, upload-time = "2025-09-17T15:41:28.506Z" }, + { url = "https://files.pythonhosted.org/packages/35/5d/5f83f17162301662bd1ce702f8a736a8a8cac7b7a35e1d8b9866938d1f9d/gevent-25.9.1-cp314-cp314-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:427f869a2050a4202d93cf7fd6ab5cffb06d3e9113c10c967b6e2a0d45237cb8", size = 1894902, upload-time = "2025-09-17T15:49:03.702Z" }, + { url = "https://files.pythonhosted.org/packages/83/cd/cf5e74e353f60dab357829069ffc300a7bb414c761f52cf8c0c6e9728b8d/gevent-25.9.1-cp314-cp314-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:c049880175e8c93124188f9d926af0a62826a3b81aa6d3074928345f8238279e", size = 1861792, upload-time = "2025-09-17T15:49:23.279Z" }, + { url = "https://files.pythonhosted.org/packages/dd/65/b9a4526d4a4edce26fe4b3b993914ec9dc64baabad625a3101e51adb17f3/gevent-25.9.1-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:b5a67a0974ad9f24721034d1e008856111e0535f1541499f72a733a73d658d1c", size = 2113215, upload-time = "2025-09-17T15:15:16.34Z" }, + { url = "https://files.pythonhosted.org/packages/e5/be/7d35731dfaf8370795b606e515d964a0967e129db76ea7873f552045dd39/gevent-25.9.1-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:1d0f5d8d73f97e24ea8d24d8be0f51e0cf7c54b8021c1fddb580bf239474690f", size = 1833449, upload-time = "2025-09-17T15:52:43.75Z" }, + { url = "https://files.pythonhosted.org/packages/65/58/7bc52544ea5e63af88c4a26c90776feb42551b7555a1c89c20069c168a3f/gevent-25.9.1-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:ddd3ff26e5c4240d3fbf5516c2d9d5f2a998ef87cfb73e1429cfaeaaec860fa6", size = 2176034, upload-time = "2025-09-17T15:24:15.676Z" }, + { url = "https://files.pythonhosted.org/packages/c2/69/a7c4ba2ffbc7c7dbf6d8b4f5d0f0a421f7815d229f4909854266c445a3d4/gevent-25.9.1-cp314-cp314-win_amd64.whl", hash = "sha256:bb63c0d6cb9950cc94036a4995b9cc4667b8915366613449236970f4394f94d7", size = 1703019, upload-time = "2025-09-17T19:30:55.272Z" }, +] + +[[package]] +name = "ghp-import" +version = "2.1.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "python-dateutil" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/d9/29/d40217cbe2f6b1359e00c6c307bb3fc876ba74068cbab3dde77f03ca0dc4/ghp-import-2.1.0.tar.gz", hash = "sha256:9c535c4c61193c2df8871222567d7fd7e5014d835f97dc7b7439069e2413d343", size = 10943, upload-time = "2022-05-02T15:47:16.11Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/f7/ec/67fbef5d497f86283db54c22eec6f6140243aae73265799baaaa19cd17fb/ghp_import-2.1.0-py3-none-any.whl", hash = "sha256:8337dd7b50877f163d4c0289bc1f1c7f127550241988d568c1db512c4324a619", size = 11034, upload-time = "2022-05-02T15:47:14.552Z" }, +] + +[[package]] +name = "gitdb" +version = "4.0.12" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "smmap" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/72/94/63b0fc47eb32792c7ba1fe1b694daec9a63620db1e313033d18140c2320a/gitdb-4.0.12.tar.gz", hash = "sha256:5ef71f855d191a3326fcfbc0d5da835f26b13fbcba60c32c21091c349ffdb571", size = 394684, upload-time = "2025-01-02T07:20:46.413Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/a0/61/5c78b91c3143ed5c14207f463aecfc8f9dbb5092fb2869baf37c273b2705/gitdb-4.0.12-py3-none-any.whl", hash = "sha256:67073e15955400952c6565cc3e707c554a4eea2e428946f7a4c162fab9bd9bcf", size = 62794, upload-time = "2025-01-02T07:20:43.624Z" }, +] + +[[package]] +name = "gitpython" +version = "3.1.45" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "gitdb" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/9a/c8/dd58967d119baab745caec2f9d853297cec1989ec1d63f677d3880632b88/gitpython-3.1.45.tar.gz", hash = "sha256:85b0ee964ceddf211c41b9f27a49086010a190fd8132a24e21f362a4b36a791c", size = 215076, upload-time = "2025-07-24T03:45:54.871Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/01/61/d4b89fec821f72385526e1b9d9a3a0385dda4a72b206d28049e2c7cd39b8/gitpython-3.1.45-py3-none-any.whl", hash = "sha256:8908cb2e02fb3b93b7eb0f2827125cb699869470432cc885f019b8fd0fccff77", size = 208168, upload-time = "2025-07-24T03:45:52.517Z" }, +] + +[[package]] +name = "googleapis-common-protos" +version = "1.72.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "protobuf" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/e5/7b/adfd75544c415c487b33061fe7ae526165241c1ea133f9a9125a56b39fd8/googleapis_common_protos-1.72.0.tar.gz", hash = "sha256:e55a601c1b32b52d7a3e65f43563e2aa61bcd737998ee672ac9b951cd49319f5", size = 147433, upload-time = "2025-11-06T18:29:24.087Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/c4/ab/09169d5a4612a5f92490806649ac8d41e3ec9129c636754575b3553f4ea4/googleapis_common_protos-1.72.0-py3-none-any.whl", hash = "sha256:4299c5a82d5ae1a9702ada957347726b167f9f8d1fc352477702a1e851ff4038", size = 297515, upload-time = "2025-11-06T18:29:13.14Z" }, +] + +[[package]] +name = "greenlet" +version = "3.3.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/c7/e5/40dbda2736893e3e53d25838e0f19a2b417dfc122b9989c91918db30b5d3/greenlet-3.3.0.tar.gz", hash = "sha256:a82bb225a4e9e4d653dd2fb7b8b2d36e4fb25bc0165422a11e48b88e9e6f78fb", size = 190651, upload-time = "2025-12-04T14:49:44.05Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/f8/0a/a3871375c7b9727edaeeea994bfff7c63ff7804c9829c19309ba2e058807/greenlet-3.3.0-cp312-cp312-macosx_11_0_universal2.whl", hash = "sha256:b01548f6e0b9e9784a2c99c5651e5dc89ffcbe870bc5fb2e5ef864e9cc6b5dcb", size = 276379, upload-time = "2025-12-04T14:23:30.498Z" }, + { url = "https://files.pythonhosted.org/packages/43/ab/7ebfe34dce8b87be0d11dae91acbf76f7b8246bf9d6b319c741f99fa59c6/greenlet-3.3.0-cp312-cp312-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:349345b770dc88f81506c6861d22a6ccd422207829d2c854ae2af8025af303e3", size = 597294, upload-time = "2025-12-04T14:50:06.847Z" }, + { url = "https://files.pythonhosted.org/packages/a4/39/f1c8da50024feecd0793dbd5e08f526809b8ab5609224a2da40aad3a7641/greenlet-3.3.0-cp312-cp312-manylinux_2_24_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:e8e18ed6995e9e2c0b4ed264d2cf89260ab3ac7e13555b8032b25a74c6d18655", size = 607742, upload-time = "2025-12-04T14:57:42.349Z" }, + { url = "https://files.pythonhosted.org/packages/77/cb/43692bcd5f7a0da6ec0ec6d58ee7cddb606d055ce94a62ac9b1aa481e969/greenlet-3.3.0-cp312-cp312-manylinux_2_24_s390x.manylinux_2_28_s390x.whl", hash = "sha256:c024b1e5696626890038e34f76140ed1daf858e37496d33f2af57f06189e70d7", size = 622297, upload-time = "2025-12-04T15:07:13.552Z" }, + { url = "https://files.pythonhosted.org/packages/75/b0/6bde0b1011a60782108c01de5913c588cf51a839174538d266de15e4bf4d/greenlet-3.3.0-cp312-cp312-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:047ab3df20ede6a57c35c14bf5200fcf04039d50f908270d3f9a7a82064f543b", size = 609885, upload-time = "2025-12-04T14:26:02.368Z" }, + { url = "https://files.pythonhosted.org/packages/49/0e/49b46ac39f931f59f987b7cd9f34bfec8ef81d2a1e6e00682f55be5de9f4/greenlet-3.3.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:2d9ad37fc657b1102ec880e637cccf20191581f75c64087a549e66c57e1ceb53", size = 1567424, upload-time = "2025-12-04T15:04:23.757Z" }, + { url = "https://files.pythonhosted.org/packages/05/f5/49a9ac2dff7f10091935def9165c90236d8f175afb27cbed38fb1d61ab6b/greenlet-3.3.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:83cd0e36932e0e7f36a64b732a6f60c2fc2df28c351bae79fbaf4f8092fe7614", size = 1636017, upload-time = "2025-12-04T14:27:29.688Z" }, + { url = "https://files.pythonhosted.org/packages/6c/79/3912a94cf27ec503e51ba493692d6db1e3cd8ac7ac52b0b47c8e33d7f4f9/greenlet-3.3.0-cp312-cp312-win_amd64.whl", hash = "sha256:a7a34b13d43a6b78abf828a6d0e87d3385680eaf830cd60d20d52f249faabf39", size = 301964, upload-time = "2025-12-04T14:36:58.316Z" }, + { url = "https://files.pythonhosted.org/packages/02/2f/28592176381b9ab2cafa12829ba7b472d177f3acc35d8fbcf3673d966fff/greenlet-3.3.0-cp313-cp313-macosx_11_0_universal2.whl", hash = "sha256:a1e41a81c7e2825822f4e068c48cb2196002362619e2d70b148f20a831c00739", size = 275140, upload-time = "2025-12-04T14:23:01.282Z" }, + { url = "https://files.pythonhosted.org/packages/2c/80/fbe937bf81e9fca98c981fe499e59a3f45df2a04da0baa5c2be0dca0d329/greenlet-3.3.0-cp313-cp313-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:9f515a47d02da4d30caaa85b69474cec77b7929b2e936ff7fb853d42f4bf8808", size = 599219, upload-time = "2025-12-04T14:50:08.309Z" }, + { url = "https://files.pythonhosted.org/packages/c2/ff/7c985128f0514271b8268476af89aee6866df5eec04ac17dcfbc676213df/greenlet-3.3.0-cp313-cp313-manylinux_2_24_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:7d2d9fd66bfadf230b385fdc90426fcd6eb64db54b40c495b72ac0feb5766c54", size = 610211, upload-time = "2025-12-04T14:57:43.968Z" }, + { url = "https://files.pythonhosted.org/packages/79/07/c47a82d881319ec18a4510bb30463ed6891f2ad2c1901ed5ec23d3de351f/greenlet-3.3.0-cp313-cp313-manylinux_2_24_s390x.manylinux_2_28_s390x.whl", hash = "sha256:30a6e28487a790417d036088b3bcb3f3ac7d8babaa7d0139edbaddebf3af9492", size = 624311, upload-time = "2025-12-04T15:07:14.697Z" }, + { url = "https://files.pythonhosted.org/packages/fd/8e/424b8c6e78bd9837d14ff7df01a9829fc883ba2ab4ea787d4f848435f23f/greenlet-3.3.0-cp313-cp313-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:087ea5e004437321508a8d6f20efc4cfec5e3c30118e1417ea96ed1d93950527", size = 612833, upload-time = "2025-12-04T14:26:03.669Z" }, + { url = "https://files.pythonhosted.org/packages/b5/ba/56699ff9b7c76ca12f1cdc27a886d0f81f2189c3455ff9f65246780f713d/greenlet-3.3.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:ab97cf74045343f6c60a39913fa59710e4bd26a536ce7ab2397adf8b27e67c39", size = 1567256, upload-time = "2025-12-04T15:04:25.276Z" }, + { url = "https://files.pythonhosted.org/packages/1e/37/f31136132967982d698c71a281a8901daf1a8fbab935dce7c0cf15f942cc/greenlet-3.3.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:5375d2e23184629112ca1ea89a53389dddbffcf417dad40125713d88eb5f96e8", size = 1636483, upload-time = "2025-12-04T14:27:30.804Z" }, + { url = "https://files.pythonhosted.org/packages/7e/71/ba21c3fb8c5dce83b8c01f458a42e99ffdb1963aeec08fff5a18588d8fd7/greenlet-3.3.0-cp313-cp313-win_amd64.whl", hash = "sha256:9ee1942ea19550094033c35d25d20726e4f1c40d59545815e1128ac58d416d38", size = 301833, upload-time = "2025-12-04T14:32:23.929Z" }, + { url = "https://files.pythonhosted.org/packages/d7/7c/f0a6d0ede2c7bf092d00bc83ad5bafb7e6ec9b4aab2fbdfa6f134dc73327/greenlet-3.3.0-cp314-cp314-macosx_11_0_universal2.whl", hash = "sha256:60c2ef0f578afb3c8d92ea07ad327f9a062547137afe91f38408f08aacab667f", size = 275671, upload-time = "2025-12-04T14:23:05.267Z" }, + { url = "https://files.pythonhosted.org/packages/44/06/dac639ae1a50f5969d82d2e3dd9767d30d6dbdbab0e1a54010c8fe90263c/greenlet-3.3.0-cp314-cp314-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:0a5d554d0712ba1de0a6c94c640f7aeba3f85b3a6e1f2899c11c2c0428da9365", size = 646360, upload-time = "2025-12-04T14:50:10.026Z" }, + { url = "https://files.pythonhosted.org/packages/e0/94/0fb76fe6c5369fba9bf98529ada6f4c3a1adf19e406a47332245ef0eb357/greenlet-3.3.0-cp314-cp314-manylinux_2_24_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:3a898b1e9c5f7307ebbde4102908e6cbfcb9ea16284a3abe15cab996bee8b9b3", size = 658160, upload-time = "2025-12-04T14:57:45.41Z" }, + { url = "https://files.pythonhosted.org/packages/93/79/d2c70cae6e823fac36c3bbc9077962105052b7ef81db2f01ec3b9bf17e2b/greenlet-3.3.0-cp314-cp314-manylinux_2_24_s390x.manylinux_2_28_s390x.whl", hash = "sha256:dcd2bdbd444ff340e8d6bdf54d2f206ccddbb3ccfdcd3c25bf4afaa7b8f0cf45", size = 671388, upload-time = "2025-12-04T15:07:15.789Z" }, + { url = "https://files.pythonhosted.org/packages/b8/14/bab308fc2c1b5228c3224ec2bf928ce2e4d21d8046c161e44a2012b5203e/greenlet-3.3.0-cp314-cp314-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:5773edda4dc00e173820722711d043799d3adb4f01731f40619e07ea2750b955", size = 660166, upload-time = "2025-12-04T14:26:05.099Z" }, + { url = "https://files.pythonhosted.org/packages/4b/d2/91465d39164eaa0085177f61983d80ffe746c5a1860f009811d498e7259c/greenlet-3.3.0-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:ac0549373982b36d5fd5d30beb8a7a33ee541ff98d2b502714a09f1169f31b55", size = 1615193, upload-time = "2025-12-04T15:04:27.041Z" }, + { url = "https://files.pythonhosted.org/packages/42/1b/83d110a37044b92423084d52d5d5a3b3a73cafb51b547e6d7366ff62eff1/greenlet-3.3.0-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:d198d2d977460358c3b3a4dc844f875d1adb33817f0613f663a656f463764ccc", size = 1683653, upload-time = "2025-12-04T14:27:32.366Z" }, + { url = "https://files.pythonhosted.org/packages/7c/9a/9030e6f9aa8fd7808e9c31ba4c38f87c4f8ec324ee67431d181fe396d705/greenlet-3.3.0-cp314-cp314-win_amd64.whl", hash = "sha256:73f51dd0e0bdb596fb0417e475fa3c5e32d4c83638296e560086b8d7da7c4170", size = 305387, upload-time = "2025-12-04T14:26:51.063Z" }, + { url = "https://files.pythonhosted.org/packages/a0/66/bd6317bc5932accf351fc19f177ffba53712a202f9df10587da8df257c7e/greenlet-3.3.0-cp314-cp314t-macosx_11_0_universal2.whl", hash = "sha256:d6ed6f85fae6cdfdb9ce04c9bf7a08d666cfcfb914e7d006f44f840b46741931", size = 282638, upload-time = "2025-12-04T14:25:20.941Z" }, + { url = "https://files.pythonhosted.org/packages/30/cf/cc81cb030b40e738d6e69502ccbd0dd1bced0588e958f9e757945de24404/greenlet-3.3.0-cp314-cp314t-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:d9125050fcf24554e69c4cacb086b87b3b55dc395a8b3ebe6487b045b2614388", size = 651145, upload-time = "2025-12-04T14:50:11.039Z" }, + { url = "https://files.pythonhosted.org/packages/9c/ea/1020037b5ecfe95ca7df8d8549959baceb8186031da83d5ecceff8b08cd2/greenlet-3.3.0-cp314-cp314t-manylinux_2_24_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:87e63ccfa13c0a0f6234ed0add552af24cc67dd886731f2261e46e241608bee3", size = 654236, upload-time = "2025-12-04T14:57:47.007Z" }, + { url = "https://files.pythonhosted.org/packages/69/cc/1e4bae2e45ca2fa55299f4e85854606a78ecc37fead20d69322f96000504/greenlet-3.3.0-cp314-cp314t-manylinux_2_24_s390x.manylinux_2_28_s390x.whl", hash = "sha256:2662433acbca297c9153a4023fe2161c8dcfdcc91f10433171cf7e7d94ba2221", size = 662506, upload-time = "2025-12-04T15:07:16.906Z" }, + { url = "https://files.pythonhosted.org/packages/57/b9/f8025d71a6085c441a7eaff0fd928bbb275a6633773667023d19179fe815/greenlet-3.3.0-cp314-cp314t-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:3c6e9b9c1527a78520357de498b0e709fb9e2f49c3a513afd5a249007261911b", size = 653783, upload-time = "2025-12-04T14:26:06.225Z" }, + { url = "https://files.pythonhosted.org/packages/f6/c7/876a8c7a7485d5d6b5c6821201d542ef28be645aa024cfe1145b35c120c1/greenlet-3.3.0-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:286d093f95ec98fdd92fcb955003b8a3d054b4e2cab3e2707a5039e7b50520fd", size = 1614857, upload-time = "2025-12-04T15:04:28.484Z" }, + { url = "https://files.pythonhosted.org/packages/4f/dc/041be1dff9f23dac5f48a43323cd0789cb798342011c19a248d9c9335536/greenlet-3.3.0-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:6c10513330af5b8ae16f023e8ddbfb486ab355d04467c4679c5cfe4659975dd9", size = 1676034, upload-time = "2025-12-04T14:27:33.531Z" }, +] + +[[package]] +name = "griffe" +version = "1.15.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "colorama" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/0d/0c/3a471b6e31951dce2360477420d0a8d1e00dea6cf33b70f3e8c3ab6e28e1/griffe-1.15.0.tar.gz", hash = "sha256:7726e3afd6f298fbc3696e67958803e7ac843c1cfe59734b6251a40cdbfb5eea", size = 424112, upload-time = "2025-11-10T15:03:15.52Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/9c/83/3b1d03d36f224edded98e9affd0467630fc09d766c0e56fb1498cbb04a9b/griffe-1.15.0-py3-none-any.whl", hash = "sha256:6f6762661949411031f5fcda9593f586e6ce8340f0ba88921a0f2ef7a81eb9a3", size = 150705, upload-time = "2025-11-10T15:03:13.549Z" }, +] + +[[package]] +name = "h11" +version = "0.16.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/01/ee/02a2c011bdab74c6fb3c75474d40b3052059d95df7e73351460c8588d963/h11-0.16.0.tar.gz", hash = "sha256:4e35b956cf45792e4caa5885e69fba00bdbc6ffafbfa020300e549b208ee5ff1", size = 101250, upload-time = "2025-04-24T03:35:25.427Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/04/4b/29cac41a4d98d144bf5f6d33995617b185d14b22401f75ca86f384e87ff1/h11-0.16.0-py3-none-any.whl", hash = "sha256:63cf8bbe7522de3bf65932fda1d9c2772064ffb3dae62d55932da54b31cb6c86", size = 37515, upload-time = "2025-04-24T03:35:24.344Z" }, +] + +[[package]] +name = "haidra-core" +version = "0.0.5" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "pydantic" }, + { name = "pydantic-settings" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/c0/5f/71dcaf8bbf1dffbbaa147d14a20138df0963ad25cfc11eee7c78ba90542d/haidra_core-0.0.5.tar.gz", hash = "sha256:a6035f18f3c07e0b3f2452a567f05b6f6b9be8d75835a40931b4c4f3c20ad02b", size = 80165, upload-time = "2025-08-21T15:46:40.331Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/4b/24/9eb60184d1996d9ddbe592b9642db2dfd99b591cd12ae16eb04af6458fdb/haidra_core-0.0.5-py3-none-any.whl", hash = "sha256:ea4f089b71cfe78620616a74063c3393ab827dd7a0d23418ff99f115b006e8b8", size = 29189, upload-time = "2025-08-21T15:46:39.295Z" }, +] + +[[package]] +name = "hiredis" +version = "3.3.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/65/82/d2817ce0653628e0a0cb128533f6af0dd6318a49f3f3a6a7bd1f2f2154af/hiredis-3.3.0.tar.gz", hash = "sha256:105596aad9249634361815c574351f1bd50455dc23b537c2940066c4a9dea685", size = 89048, upload-time = "2025-10-14T16:33:34.263Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/48/1c/ed28ae5d704f5c7e85b946fa327f30d269e6272c847fef7e91ba5fc86193/hiredis-3.3.0-cp312-cp312-macosx_10_15_universal2.whl", hash = "sha256:5b8e1d6a2277ec5b82af5dce11534d3ed5dffeb131fd9b210bc1940643b39b5f", size = 82026, upload-time = "2025-10-14T16:32:12.004Z" }, + { url = "https://files.pythonhosted.org/packages/f4/9b/79f30c5c40e248291023b7412bfdef4ad9a8a92d9e9285d65d600817dac7/hiredis-3.3.0-cp312-cp312-macosx_10_15_x86_64.whl", hash = "sha256:c4981de4d335f996822419e8a8b3b87367fcef67dc5fb74d3bff4df9f6f17783", size = 46217, upload-time = "2025-10-14T16:32:13.133Z" }, + { url = "https://files.pythonhosted.org/packages/e7/c3/02b9ed430ad9087aadd8afcdf616717452d16271b701fa47edfe257b681e/hiredis-3.3.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:1706480a683e328ae9ba5d704629dee2298e75016aa0207e7067b9c40cecc271", size = 41858, upload-time = "2025-10-14T16:32:13.98Z" }, + { url = "https://files.pythonhosted.org/packages/f1/98/b2a42878b82130a535c7aa20bc937ba2d07d72e9af3ad1ad93e837c419b5/hiredis-3.3.0-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:0a95cef9989736ac313639f8f545b76b60b797e44e65834aabbb54e4fad8d6c8", size = 170195, upload-time = "2025-10-14T16:32:14.728Z" }, + { url = "https://files.pythonhosted.org/packages/66/1d/9dcde7a75115d3601b016113d9b90300726fa8e48aacdd11bf01a453c145/hiredis-3.3.0-cp312-cp312-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:ca2802934557ccc28a954414c245ba7ad904718e9712cb67c05152cf6b9dd0a3", size = 181808, upload-time = "2025-10-14T16:32:15.622Z" }, + { url = "https://files.pythonhosted.org/packages/56/a1/60f6bda9b20b4e73c85f7f5f046bc2c154a5194fc94eb6861e1fd97ced52/hiredis-3.3.0-cp312-cp312-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:fe730716775f61e76d75810a38ee4c349d3af3896450f1525f5a4034cf8f2ed7", size = 180578, upload-time = "2025-10-14T16:32:16.514Z" }, + { url = "https://files.pythonhosted.org/packages/d9/01/859d21de65085f323a701824e23ea3330a0ac05f8e184544d7aa5c26128d/hiredis-3.3.0-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:749faa69b1ce1f741f5eaf743435ac261a9262e2d2d66089192477e7708a9abc", size = 172508, upload-time = "2025-10-14T16:32:17.411Z" }, + { url = "https://files.pythonhosted.org/packages/99/a8/28fd526e554c80853d0fbf57ef2a3235f00e4ed34ce0e622e05d27d0f788/hiredis-3.3.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:95c9427f2ac3f1dd016a3da4e1161fa9d82f221346c8f3fdd6f3f77d4e28946c", size = 166341, upload-time = "2025-10-14T16:32:18.561Z" }, + { url = "https://files.pythonhosted.org/packages/f2/91/ded746b7d2914f557fbbf77be55e90d21f34ba758ae10db6591927c642c8/hiredis-3.3.0-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:c863ee44fe7bff25e41f3a5105c936a63938b76299b802d758f40994ab340071", size = 176765, upload-time = "2025-10-14T16:32:19.491Z" }, + { url = "https://files.pythonhosted.org/packages/d6/4c/04aa46ff386532cb5f08ee495c2bf07303e93c0acf2fa13850e031347372/hiredis-3.3.0-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:2213c7eb8ad5267434891f3241c7776e3bafd92b5933fc57d53d4456247dc542", size = 170312, upload-time = "2025-10-14T16:32:20.404Z" }, + { url = "https://files.pythonhosted.org/packages/90/6e/67f9d481c63f542a9cf4c9f0ea4e5717db0312fb6f37fb1f78f3a66de93c/hiredis-3.3.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:a172bae3e2837d74530cd60b06b141005075db1b814d966755977c69bd882ce8", size = 167965, upload-time = "2025-10-14T16:32:21.259Z" }, + { url = "https://files.pythonhosted.org/packages/7a/df/dde65144d59c3c0d85e43255798f1fa0c48d413e668cfd92b3d9f87924ef/hiredis-3.3.0-cp312-cp312-win32.whl", hash = "sha256:cb91363b9fd6d41c80df9795e12fffbaf5c399819e6ae8120f414dedce6de068", size = 20533, upload-time = "2025-10-14T16:32:22.192Z" }, + { url = "https://files.pythonhosted.org/packages/f5/a9/55a4ac9c16fdf32e92e9e22c49f61affe5135e177ca19b014484e28950f7/hiredis-3.3.0-cp312-cp312-win_amd64.whl", hash = "sha256:04ec150e95eea3de9ff8bac754978aa17b8bf30a86d4ab2689862020945396b0", size = 22379, upload-time = "2025-10-14T16:32:22.916Z" }, + { url = "https://files.pythonhosted.org/packages/6d/39/2b789ebadd1548ccb04a2c18fbc123746ad1a7e248b7f3f3cac618ca10a6/hiredis-3.3.0-cp313-cp313-macosx_10_15_universal2.whl", hash = "sha256:b7048b4ec0d5dddc8ddd03da603de0c4b43ef2540bf6e4c54f47d23e3480a4fa", size = 82035, upload-time = "2025-10-14T16:32:23.715Z" }, + { url = "https://files.pythonhosted.org/packages/85/74/4066d9c1093be744158ede277f2a0a4e4cd0fefeaa525c79e2876e9e5c72/hiredis-3.3.0-cp313-cp313-macosx_10_15_x86_64.whl", hash = "sha256:e5f86ce5a779319c15567b79e0be806e8e92c18bb2ea9153e136312fafa4b7d6", size = 46219, upload-time = "2025-10-14T16:32:24.554Z" }, + { url = "https://files.pythonhosted.org/packages/fa/3f/f9e0f6d632f399d95b3635703e1558ffaa2de3aea4cfcbc2d7832606ba43/hiredis-3.3.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:fbdb97a942e66016fff034df48a7a184e2b7dc69f14c4acd20772e156f20d04b", size = 41860, upload-time = "2025-10-14T16:32:25.356Z" }, + { url = "https://files.pythonhosted.org/packages/4a/c5/b7dde5ec390dabd1cabe7b364a509c66d4e26de783b0b64cf1618f7149fc/hiredis-3.3.0-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:b0fb4bea72fe45ff13e93ddd1352b43ff0749f9866263b5cca759a4c960c776f", size = 170094, upload-time = "2025-10-14T16:32:26.148Z" }, + { url = "https://files.pythonhosted.org/packages/3e/d6/7f05c08ee74d41613be466935688068e07f7b6c55266784b5ace7b35b766/hiredis-3.3.0-cp313-cp313-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:85b9baf98050e8f43c2826ab46aaf775090d608217baf7af7882596aef74e7f9", size = 181746, upload-time = "2025-10-14T16:32:27.844Z" }, + { url = "https://files.pythonhosted.org/packages/0e/d2/aaf9f8edab06fbf5b766e0cae3996324297c0516a91eb2ca3bd1959a0308/hiredis-3.3.0-cp313-cp313-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:69079fb0f0ebb61ba63340b9c4bce9388ad016092ca157e5772eb2818209d930", size = 180465, upload-time = "2025-10-14T16:32:29.185Z" }, + { url = "https://files.pythonhosted.org/packages/8d/1e/93ded8b9b484519b211fc71746a231af98c98928e3ebebb9086ed20bb1ad/hiredis-3.3.0-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:c17f77b79031ea4b0967d30255d2ae6e7df0603ee2426ad3274067f406938236", size = 172419, upload-time = "2025-10-14T16:32:30.059Z" }, + { url = "https://files.pythonhosted.org/packages/68/13/02880458e02bbfcedcaabb8f7510f9dda1c89d7c1921b1bb28c22bb38cbf/hiredis-3.3.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:45d14f745fc177bc05fc24bdf20e2b515e9a068d3d4cce90a0fb78d04c9c9d9a", size = 166400, upload-time = "2025-10-14T16:32:31.173Z" }, + { url = "https://files.pythonhosted.org/packages/11/60/896e03267670570f19f61dc65a2137fcb2b06e83ab0911d58eeec9f3cb88/hiredis-3.3.0-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:ba063fdf1eff6377a0c409609cbe890389aefddfec109c2d20fcc19cfdafe9da", size = 176845, upload-time = "2025-10-14T16:32:32.12Z" }, + { url = "https://files.pythonhosted.org/packages/f1/90/a1d4bd0cdcf251fda72ac0bd932f547b48ad3420f89bb2ef91bf6a494534/hiredis-3.3.0-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:1799cc66353ad066bfdd410135c951959da9f16bcb757c845aab2f21fc4ef099", size = 170365, upload-time = "2025-10-14T16:32:33.035Z" }, + { url = "https://files.pythonhosted.org/packages/f1/9a/7c98f7bb76bdb4a6a6003cf8209721f083e65d2eed2b514f4a5514bda665/hiredis-3.3.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:2cbf71a121996ffac82436b6153290815b746afb010cac19b3290a1644381b07", size = 168022, upload-time = "2025-10-14T16:32:34.81Z" }, + { url = "https://files.pythonhosted.org/packages/0d/ca/672ee658ffe9525558615d955b554ecd36aa185acd4431ccc9701c655c9b/hiredis-3.3.0-cp313-cp313-win32.whl", hash = "sha256:a7cbbc6026bf03659f0b25e94bbf6e64f6c8c22f7b4bc52fe569d041de274194", size = 20533, upload-time = "2025-10-14T16:32:35.7Z" }, + { url = "https://files.pythonhosted.org/packages/20/93/511fd94f6a7b6d72a4cf9c2b159bf3d780585a9a1dca52715dd463825299/hiredis-3.3.0-cp313-cp313-win_amd64.whl", hash = "sha256:a8def89dd19d4e2e4482b7412d453dec4a5898954d9a210d7d05f60576cedef6", size = 22387, upload-time = "2025-10-14T16:32:36.441Z" }, + { url = "https://files.pythonhosted.org/packages/aa/b3/b948ee76a6b2bc7e45249861646f91f29704f743b52565cf64cee9c4658b/hiredis-3.3.0-cp314-cp314-macosx_10_15_universal2.whl", hash = "sha256:c135bda87211f7af9e2fd4e046ab433c576cd17b69e639a0f5bb2eed5e0e71a9", size = 82105, upload-time = "2025-10-14T16:32:37.204Z" }, + { url = "https://files.pythonhosted.org/packages/a2/9b/4210f4ebfb3ab4ada964b8de08190f54cbac147198fb463cd3c111cc13e0/hiredis-3.3.0-cp314-cp314-macosx_10_15_x86_64.whl", hash = "sha256:2f855c678230aed6fc29b962ce1cc67e5858a785ef3a3fd6b15dece0487a2e60", size = 46237, upload-time = "2025-10-14T16:32:38.07Z" }, + { url = "https://files.pythonhosted.org/packages/b3/7a/e38bfd7d04c05036b4ccc6f42b86b1032185cf6ae426e112a97551fece14/hiredis-3.3.0-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:4059c78a930cbb33c391452ccce75b137d6f89e2eebf6273d75dafc5c2143c03", size = 41894, upload-time = "2025-10-14T16:32:38.929Z" }, + { url = "https://files.pythonhosted.org/packages/28/d3/eae43d9609c5d9a6effef0586ee47e13a0d84b44264b688d97a75cd17ee5/hiredis-3.3.0-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:334a3f1d14c253bb092e187736c3384203bd486b244e726319bbb3f7dffa4a20", size = 170486, upload-time = "2025-10-14T16:32:40.147Z" }, + { url = "https://files.pythonhosted.org/packages/c3/fd/34d664554880b27741ab2916d66207357563b1639e2648685f4c84cfb755/hiredis-3.3.0-cp314-cp314-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:fd137b147235447b3d067ec952c5b9b95ca54b71837e1b38dbb2ec03b89f24fc", size = 182031, upload-time = "2025-10-14T16:32:41.06Z" }, + { url = "https://files.pythonhosted.org/packages/08/a3/0c69fdde3f4155b9f7acc64ccffde46f312781469260061b3bbaa487fd34/hiredis-3.3.0-cp314-cp314-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:8f88f4f2aceb73329ece86a1cb0794fdbc8e6d614cb5ca2d1023c9b7eb432db8", size = 180542, upload-time = "2025-10-14T16:32:42.993Z" }, + { url = "https://files.pythonhosted.org/packages/68/7a/ad5da4d7bc241e57c5b0c4fe95aa75d1f2116e6e6c51577394d773216e01/hiredis-3.3.0-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:550f4d1538822fc75ebf8cf63adc396b23d4958bdbbad424521f2c0e3dfcb169", size = 172353, upload-time = "2025-10-14T16:32:43.965Z" }, + { url = "https://files.pythonhosted.org/packages/4b/dc/c46eace64eb047a5b31acd5e4b0dc6d2f0390a4a3f6d507442d9efa570ad/hiredis-3.3.0-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:54b14211fbd5930fc696f6fcd1f1f364c660970d61af065a80e48a1fa5464dd6", size = 166435, upload-time = "2025-10-14T16:32:44.97Z" }, + { url = "https://files.pythonhosted.org/packages/4a/ac/ad13a714e27883a2e4113c980c94caf46b801b810de5622c40f8d3e8335f/hiredis-3.3.0-cp314-cp314-musllinux_1_2_ppc64le.whl", hash = "sha256:c9e96f63dbc489fc86f69951e9f83dadb9582271f64f6822c47dcffa6fac7e4a", size = 177218, upload-time = "2025-10-14T16:32:45.936Z" }, + { url = "https://files.pythonhosted.org/packages/c2/38/268fabd85b225271fe1ba82cb4a484fcc1bf922493ff2c74b400f1a6f339/hiredis-3.3.0-cp314-cp314-musllinux_1_2_s390x.whl", hash = "sha256:106e99885d46684d62ab3ec1d6b01573cc0e0083ac295b11aaa56870b536c7ec", size = 170477, upload-time = "2025-10-14T16:32:46.898Z" }, + { url = "https://files.pythonhosted.org/packages/20/6b/02bb8af810ea04247334ab7148acff7a61c08a8832830c6703f464be83a9/hiredis-3.3.0-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:087e2ef3206361281b1a658b5b4263572b6ba99465253e827796964208680459", size = 167915, upload-time = "2025-10-14T16:32:47.847Z" }, + { url = "https://files.pythonhosted.org/packages/83/94/901fa817e667b2e69957626395e6dee416e31609dca738f28e6b545ca6c2/hiredis-3.3.0-cp314-cp314-win32.whl", hash = "sha256:80638ebeab1cefda9420e9fedc7920e1ec7b4f0513a6b23d58c9d13c882f8065", size = 21165, upload-time = "2025-10-14T16:32:50.753Z" }, + { url = "https://files.pythonhosted.org/packages/b1/7e/4881b9c1d0b4cdaba11bd10e600e97863f977ea9d67c5988f7ec8cd363e5/hiredis-3.3.0-cp314-cp314-win_amd64.whl", hash = "sha256:a68aaf9ba024f4e28cf23df9196ff4e897bd7085872f3a30644dca07fa787816", size = 22996, upload-time = "2025-10-14T16:32:51.543Z" }, + { url = "https://files.pythonhosted.org/packages/a7/b6/d7e6c17da032665a954a89c1e6ee3bd12cb51cd78c37527842b03519981d/hiredis-3.3.0-cp314-cp314t-macosx_10_15_universal2.whl", hash = "sha256:f7f80442a32ce51ee5d89aeb5a84ee56189a0e0e875f1a57bbf8d462555ae48f", size = 83034, upload-time = "2025-10-14T16:32:52.395Z" }, + { url = "https://files.pythonhosted.org/packages/27/6c/6751b698060cdd1b2d8427702cff367c9ed7a1705bcf3792eb5b896f149b/hiredis-3.3.0-cp314-cp314t-macosx_10_15_x86_64.whl", hash = "sha256:a1a67530da714954ed50579f4fe1ab0ddbac9c43643b1721c2cb226a50dde263", size = 46701, upload-time = "2025-10-14T16:32:53.572Z" }, + { url = "https://files.pythonhosted.org/packages/ce/8e/20a5cf2c83c7a7e08c76b9abab113f99f71cd57468a9c7909737ce6e9bf8/hiredis-3.3.0-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:616868352e47ab355559adca30f4f3859f9db895b4e7bc71e2323409a2add751", size = 42381, upload-time = "2025-10-14T16:32:54.762Z" }, + { url = "https://files.pythonhosted.org/packages/be/0a/547c29c06e8c9c337d0df3eec39da0cf1aad701daf8a9658dd37f25aca66/hiredis-3.3.0-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:e799b79f3150083e9702fc37e6243c0bd47a443d6eae3f3077b0b3f510d6a145", size = 180313, upload-time = "2025-10-14T16:32:55.644Z" }, + { url = "https://files.pythonhosted.org/packages/89/8a/488de5469e3d0921a1c425045bf00e983d48b2111a90e47cf5769eaa536c/hiredis-3.3.0-cp314-cp314t-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:9ef1dfb0d2c92c3701655e2927e6bbe10c499aba632c7ea57b6392516df3864b", size = 190488, upload-time = "2025-10-14T16:32:56.649Z" }, + { url = "https://files.pythonhosted.org/packages/b5/59/8493edc3eb9ae0dbea2b2230c2041a52bc03e390b02ffa3ac0bca2af9aea/hiredis-3.3.0-cp314-cp314t-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:c290da6bc2a57e854c7da9956cd65013483ede935677e84560da3b848f253596", size = 189210, upload-time = "2025-10-14T16:32:57.759Z" }, + { url = "https://files.pythonhosted.org/packages/f0/de/8c9a653922057b32fb1e2546ecd43ef44c9aa1a7cf460c87cae507eb2bc7/hiredis-3.3.0-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:fd8c438d9e1728f0085bf9b3c9484d19ec31f41002311464e75b69550c32ffa8", size = 180972, upload-time = "2025-10-14T16:32:58.737Z" }, + { url = "https://files.pythonhosted.org/packages/e4/a3/51e6e6afaef2990986d685ca6e254ffbd191f1635a59b2d06c9e5d10c8a2/hiredis-3.3.0-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:1bbc6b8a88bbe331e3ebf6685452cebca6dfe6d38a6d4efc5651d7e363ba28bd", size = 175315, upload-time = "2025-10-14T16:32:59.774Z" }, + { url = "https://files.pythonhosted.org/packages/96/54/e436312feb97601f70f8b39263b8da5ac4a5d18305ebdfb08ad7621f6119/hiredis-3.3.0-cp314-cp314t-musllinux_1_2_ppc64le.whl", hash = "sha256:55d8c18fe9a05496c5c04e6eccc695169d89bf358dff964bcad95696958ec05f", size = 185653, upload-time = "2025-10-14T16:33:00.749Z" }, + { url = "https://files.pythonhosted.org/packages/ed/a3/88e66030d066337c6c0f883a912c6d4b2d6d7173490fbbc113a6cbe414ff/hiredis-3.3.0-cp314-cp314t-musllinux_1_2_s390x.whl", hash = "sha256:4ddc79afa76b805d364e202a754666cb3c4d9c85153cbfed522871ff55827838", size = 179032, upload-time = "2025-10-14T16:33:01.711Z" }, + { url = "https://files.pythonhosted.org/packages/bc/1f/fb7375467e9adaa371cd617c2984fefe44bdce73add4c70b8dd8cab1b33a/hiredis-3.3.0-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:8e8a4b8540581dcd1b2b25827a54cfd538e0afeaa1a0e3ca87ad7126965981cc", size = 176127, upload-time = "2025-10-14T16:33:02.793Z" }, + { url = "https://files.pythonhosted.org/packages/66/14/0dc2b99209c400f3b8f24067273e9c3cb383d894e155830879108fb19e98/hiredis-3.3.0-cp314-cp314t-win32.whl", hash = "sha256:298593bb08487753b3afe6dc38bac2532e9bac8dcee8d992ef9977d539cc6776", size = 22024, upload-time = "2025-10-14T16:33:03.812Z" }, + { url = "https://files.pythonhosted.org/packages/b2/2f/8a0befeed8bbe142d5a6cf3b51e8cbe019c32a64a596b0ebcbc007a8f8f1/hiredis-3.3.0-cp314-cp314t-win_amd64.whl", hash = "sha256:b442b6ab038a6f3b5109874d2514c4edf389d8d8b553f10f12654548808683bc", size = 23808, upload-time = "2025-10-14T16:33:04.965Z" }, +] + +[[package]] +name = "horde-model-reference" +version = "2.1.5" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "aiofiles" }, + { name = "haidra-core" }, + { name = "httpx" }, + { name = "loguru" }, + { name = "mkdocs" }, + { name = "pydantic" }, + { name = "python-dotenv" }, + { name = "redis", extra = ["hiredis"] }, + { name = "requests" }, + { name = "strenum" }, + { name = "typing-extensions" }, + { name = "ujson" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/4e/37/0f480c0faa58257903a970804be9a712dfa269fed98f2ae023121b589552/horde_model_reference-2.1.5.tar.gz", hash = "sha256:61556b62bbb7678939960fb6b06af3692cf6643d788acf0ee213b1e560223d35", size = 555021, upload-time = "2025-12-22T15:52:02.422Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/7f/cb/4e0e5203b327360ff136fc7fd0a385236a90616719fc156a7ae267069cb1/horde_model_reference-2.1.5-py3-none-any.whl", hash = "sha256:07340bd88665497eb655b387b21d2fa33f2d12dcd85253db8334b8d6d5876f1d", size = 202222, upload-time = "2025-12-22T15:52:01.258Z" }, +] + +[[package]] +name = "horde-sdk" +source = { editable = "." } +dependencies = [ + { name = "aiodns" }, + { name = "aiofiles" }, + { name = "aiohttp" }, + { name = "certifi" }, + { name = "horde-model-reference" }, + { name = "logfire" }, + { name = "loguru" }, + { name = "networkx" }, + { name = "pillow" }, + { name = "pydantic" }, + { name = "python-dotenv" }, + { name = "requests" }, + { name = "strenum" }, +] + +[package.dev-dependencies] +dev = [ + { name = "babel" }, + { name = "black" }, + { name = "build" }, + { name = "coverage" }, + { name = "datamodel-code-generator" }, + { name = "gevent" }, + { name = "mypy" }, + { name = "pre-commit" }, + { name = "pytest" }, + { name = "pytest-asyncio" }, + { name = "pytest-cov" }, + { name = "pytest-xdist" }, + { name = "ruff" }, + { name = "tach" }, + { name = "tox" }, + { name = "types-aiofiles" }, + { name = "types-networkx" }, + { name = "types-pillow" }, + { name = "types-pytz" }, + { name = "types-pyyaml" }, + { name = "types-requests" }, + { name = "types-setuptools" }, + { name = "types-urllib3" }, +] +docs = [ + { name = "mkdocs-awesome-pages-plugin" }, + { name = "mkdocs-material" }, + { name = "mkdocs-material-extensions" }, + { name = "mkdocs-mermaid2-plugin" }, + { name = "mkdocstrings" }, + { name = "mkdocstrings-python" }, +] +full-telemetry = [ + { name = "opentelemetry-instrumentation-aiohttp-client" }, + { name = "opentelemetry-instrumentation-requests" }, +] + +[package.metadata] +requires-dist = [ + { name = "aiodns", specifier = ">=3.2.0" }, + { name = "aiofiles", specifier = ">=24.1.0" }, + { name = "aiohttp", specifier = ">=3.11.13" }, + { name = "certifi", specifier = ">=2025.1.31" }, + { name = "horde-model-reference", specifier = ">=0.10.0" }, + { name = "logfire", specifier = ">=3.7.1" }, + { name = "loguru", specifier = ">=0.7.3" }, + { name = "networkx", specifier = ">=3.4.2" }, + { name = "pillow", specifier = ">=11.1.0" }, + { name = "pydantic", specifier = ">=2.10.6" }, + { name = "python-dotenv", specifier = ">=1.0.1" }, + { name = "requests", specifier = ">=2.32.3" }, + { name = "strenum", specifier = ">=0.4.15" }, +] + +[package.metadata.requires-dev] +dev = [ + { name = "babel", specifier = ">=2.17.0" }, + { name = "black", specifier = ">=24.10.0" }, + { name = "build", specifier = ">=0.10.0" }, + { name = "coverage", specifier = ">=7.2.7" }, + { name = "datamodel-code-generator", specifier = ">=0.28.4" }, + { name = "gevent", specifier = ">=24.11.1" }, + { name = "mypy", specifier = ">=1.13.0" }, + { name = "pre-commit", specifier = "~=4.0.1" }, + { name = "pytest", specifier = ">=8.3.3" }, + { name = "pytest-asyncio", specifier = ">=0.25.3" }, + { name = "pytest-cov", specifier = ">=6.0.0" }, + { name = "pytest-xdist", specifier = ">=3.8.0" }, + { name = "ruff", specifier = ">=0.7.2" }, + { name = "tach", specifier = ">=0.29.0" }, + { name = "tox", specifier = "~=4.23.2" }, + { name = "types-aiofiles", specifier = ">=24.1.0.20241221" }, + { name = "types-networkx", specifier = ">=3.5.0.20251104" }, + { name = "types-pillow", specifier = ">=10.2.0.20240822" }, + { name = "types-pytz", specifier = ">=2025.1.0.20250204" }, + { name = "types-pyyaml", specifier = ">=6.0.12.20250402" }, + { name = "types-requests", specifier = ">=2.32.0.20250306" }, + { name = "types-setuptools", specifier = ">=75.8.2.20250305" }, + { name = "types-urllib3", specifier = ">=1.26.25.14" }, +] +docs = [ + { name = "mkdocs-awesome-pages-plugin", specifier = ">=2.10.1" }, + { name = "mkdocs-material", specifier = ">=9.6.7" }, + { name = "mkdocs-material-extensions", specifier = ">=1.3.1" }, + { name = "mkdocs-mermaid2-plugin", specifier = ">=1.2.1" }, + { name = "mkdocstrings", specifier = ">=0.29.0" }, + { name = "mkdocstrings-python", specifier = ">=1.16.5" }, +] +full-telemetry = [ + { name = "opentelemetry-instrumentation-aiohttp-client", specifier = ">=0.51b0" }, + { name = "opentelemetry-instrumentation-requests", specifier = ">=0.51b0" }, +] + +[[package]] +name = "httpcore" +version = "1.0.9" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "certifi" }, + { name = "h11" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/06/94/82699a10bca87a5556c9c59b5963f2d039dbd239f25bc2a63907a05a14cb/httpcore-1.0.9.tar.gz", hash = "sha256:6e34463af53fd2ab5d807f399a9b45ea31c3dfa2276f15a2c3f00afff6e176e8", size = 85484, upload-time = "2025-04-24T22:06:22.219Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/7e/f5/f66802a942d491edb555dd61e3a9961140fd64c90bce1eafd741609d334d/httpcore-1.0.9-py3-none-any.whl", hash = "sha256:2d400746a40668fc9dec9810239072b40b4484b640a8c38fd654a024c7a1bf55", size = 78784, upload-time = "2025-04-24T22:06:20.566Z" }, +] + +[[package]] +name = "httpx" +version = "0.28.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "anyio" }, + { name = "certifi" }, + { name = "httpcore" }, + { name = "idna" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/b1/df/48c586a5fe32a0f01324ee087459e112ebb7224f646c0b5023f5e79e9956/httpx-0.28.1.tar.gz", hash = "sha256:75e98c5f16b0f35b567856f597f06ff2270a374470a5c2392242528e3e3e42fc", size = 141406, upload-time = "2024-12-06T15:37:23.222Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/2a/39/e50c7c3a983047577ee07d2a9e53faf5a69493943ec3f6a384bdc792deb2/httpx-0.28.1-py3-none-any.whl", hash = "sha256:d909fcccc110f8c7faf814ca82a9a4d816bc5a6dbfea25d6591d6985b8ba59ad", size = 73517, upload-time = "2024-12-06T15:37:21.509Z" }, +] + +[[package]] +name = "identify" +version = "2.6.15" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/ff/e7/685de97986c916a6d93b3876139e00eef26ad5bbbd61925d670ae8013449/identify-2.6.15.tar.gz", hash = "sha256:e4f4864b96c6557ef2a1e1c951771838f4edc9df3a72ec7118b338801b11c7bf", size = 99311, upload-time = "2025-10-02T17:43:40.631Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/0f/1c/e5fd8f973d4f375adb21565739498e2e9a1e54c858a97b9a8ccfdc81da9b/identify-2.6.15-py2.py3-none-any.whl", hash = "sha256:1181ef7608e00704db228516541eb83a88a9f94433a8c80bb9b5bd54b1d81757", size = 99183, upload-time = "2025-10-02T17:43:39.137Z" }, +] + +[[package]] +name = "idna" +version = "3.11" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/6f/6d/0703ccc57f3a7233505399edb88de3cbd678da106337b9fcde432b65ed60/idna-3.11.tar.gz", hash = "sha256:795dafcc9c04ed0c1fb032c2aa73654d8e8c5023a7df64a53f39190ada629902", size = 194582, upload-time = "2025-10-12T14:55:20.501Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/0e/61/66938bbb5fc52dbdf84594873d5b51fb1f7c7794e9c0f5bd885f30bc507b/idna-3.11-py3-none-any.whl", hash = "sha256:771a87f49d9defaf64091e6e6fe9c18d4833f140bd19464795bc32d966ca37ea", size = 71008, upload-time = "2025-10-12T14:55:18.883Z" }, +] + +[[package]] +name = "importlib-metadata" +version = "8.7.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "zipp" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/f3/49/3b30cad09e7771a4982d9975a8cbf64f00d4a1ececb53297f1d9a7be1b10/importlib_metadata-8.7.1.tar.gz", hash = "sha256:49fef1ae6440c182052f407c8d34a68f72efc36db9ca90dc0113398f2fdde8bb", size = 57107, upload-time = "2025-12-21T10:00:19.278Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/fa/5e/f8e9a1d23b9c20a551a8a02ea3637b4642e22c2626e3a13a9a29cdea99eb/importlib_metadata-8.7.1-py3-none-any.whl", hash = "sha256:5a1f80bf1daa489495071efbb095d75a634cf28a8bc299581244063b53176151", size = 27865, upload-time = "2025-12-21T10:00:18.329Z" }, +] + +[[package]] +name = "inflect" +version = "7.5.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "more-itertools" }, + { name = "typeguard" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/78/c6/943357d44a21fd995723d07ccaddd78023eace03c1846049a2645d4324a3/inflect-7.5.0.tar.gz", hash = "sha256:faf19801c3742ed5a05a8ce388e0d8fe1a07f8d095c82201eb904f5d27ad571f", size = 73751, upload-time = "2024-12-28T17:11:18.897Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/8a/eb/427ed2b20a38a4ee29f24dbe4ae2dafab198674fe9a85e3d6adf9e5f5f41/inflect-7.5.0-py3-none-any.whl", hash = "sha256:2aea70e5e70c35d8350b8097396ec155ffd68def678c7ff97f51aa69c1d92344", size = 35197, upload-time = "2024-12-28T17:11:15.931Z" }, +] + +[[package]] +name = "iniconfig" +version = "2.3.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/72/34/14ca021ce8e5dfedc35312d08ba8bf51fdd999c576889fc2c24cb97f4f10/iniconfig-2.3.0.tar.gz", hash = "sha256:c76315c77db068650d49c5b56314774a7804df16fee4402c1f19d6d15d8c4730", size = 20503, upload-time = "2025-10-18T21:55:43.219Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/cb/b1/3846dd7f199d53cb17f49cba7e651e9ce294d8497c8c150530ed11865bb8/iniconfig-2.3.0-py3-none-any.whl", hash = "sha256:f631c04d2c48c52b84d0d0549c99ff3859c98df65b3101406327ecc7d53fbf12", size = 7484, upload-time = "2025-10-18T21:55:41.639Z" }, +] + +[[package]] +name = "isort" +version = "7.0.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/63/53/4f3c058e3bace40282876f9b553343376ee687f3c35a525dc79dbd450f88/isort-7.0.0.tar.gz", hash = "sha256:5513527951aadb3ac4292a41a16cbc50dd1642432f5e8c20057d414bdafb4187", size = 805049, upload-time = "2025-10-11T13:30:59.107Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/7f/ed/e3705d6d02b4f7aea715a353c8ce193efd0b5db13e204df895d38734c244/isort-7.0.0-py3-none-any.whl", hash = "sha256:1bcabac8bc3c36c7fb7b98a76c8abb18e0f841a3ba81decac7691008592499c1", size = 94672, upload-time = "2025-10-11T13:30:57.665Z" }, +] + +[[package]] +name = "jinja2" +version = "3.1.6" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "markupsafe" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/df/bf/f7da0350254c0ed7c72f3e33cef02e048281fec7ecec5f032d4aac52226b/jinja2-3.1.6.tar.gz", hash = "sha256:0137fb05990d35f1275a587e9aee6d56da821fc83491a0fb838183be43f66d6d", size = 245115, upload-time = "2025-03-05T20:05:02.478Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/62/a1/3d680cbfd5f4b8f15abc1d571870c5fc3e594bb582bc3b64ea099db13e56/jinja2-3.1.6-py3-none-any.whl", hash = "sha256:85ece4451f492d0c13c5dd7c13a64681a86afae63a5f347908daf103ce6d2f67", size = 134899, upload-time = "2025-03-05T20:05:00.369Z" }, +] + +[[package]] +name = "jsbeautifier" +version = "1.15.4" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "editorconfig" }, + { name = "six" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/ea/98/d6cadf4d5a1c03b2136837a435682418c29fdeb66be137128544cecc5b7a/jsbeautifier-1.15.4.tar.gz", hash = "sha256:5bb18d9efb9331d825735fbc5360ee8f1aac5e52780042803943aa7f854f7592", size = 75257, upload-time = "2025-02-27T17:53:53.252Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/2d/14/1c65fccf8413d5f5c6e8425f84675169654395098000d8bddc4e9d3390e1/jsbeautifier-1.15.4-py3-none-any.whl", hash = "sha256:72f65de312a3f10900d7685557f84cb61a9733c50dcc27271a39f5b0051bf528", size = 94707, upload-time = "2025-02-27T17:53:46.152Z" }, +] + +[[package]] +name = "librt" +version = "0.7.4" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/93/e4/b59bdf1197fdf9888452ea4d2048cdad61aef85eb83e99dc52551d7fdc04/librt-0.7.4.tar.gz", hash = "sha256:3871af56c59864d5fd21d1ac001eb2fb3b140d52ba0454720f2e4a19812404ba", size = 145862, upload-time = "2025-12-15T16:52:43.862Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/f3/e7/b805d868d21f425b7e76a0ea71a2700290f2266a4f3c8357fcf73efc36aa/librt-0.7.4-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:7dd3b5c37e0fb6666c27cf4e2c88ae43da904f2155c4cfc1e5a2fdce3b9fcf92", size = 55688, upload-time = "2025-12-15T16:51:31.571Z" }, + { url = "https://files.pythonhosted.org/packages/59/5e/69a2b02e62a14cfd5bfd9f1e9adea294d5bcfeea219c7555730e5d068ee4/librt-0.7.4-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:a9c5de1928c486201b23ed0cc4ac92e6e07be5cd7f3abc57c88a9cf4f0f32108", size = 57141, upload-time = "2025-12-15T16:51:32.714Z" }, + { url = "https://files.pythonhosted.org/packages/6e/6b/05dba608aae1272b8ea5ff8ef12c47a4a099a04d1e00e28a94687261d403/librt-0.7.4-cp312-cp312-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:078ae52ffb3f036396cc4aed558e5b61faedd504a3c1f62b8ae34bf95ae39d94", size = 165322, upload-time = "2025-12-15T16:51:33.986Z" }, + { url = "https://files.pythonhosted.org/packages/8f/bc/199533d3fc04a4cda8d7776ee0d79955ab0c64c79ca079366fbc2617e680/librt-0.7.4-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:ce58420e25097b2fc201aef9b9f6d65df1eb8438e51154e1a7feb8847e4a55ab", size = 174216, upload-time = "2025-12-15T16:51:35.384Z" }, + { url = "https://files.pythonhosted.org/packages/62/ec/09239b912a45a8ed117cb4a6616d9ff508f5d3131bd84329bf2f8d6564f1/librt-0.7.4-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:b719c8730c02a606dc0e8413287e8e94ac2d32a51153b300baf1f62347858fba", size = 189005, upload-time = "2025-12-15T16:51:36.687Z" }, + { url = "https://files.pythonhosted.org/packages/46/2e/e188313d54c02f5b0580dd31476bb4b0177514ff8d2be9f58d4a6dc3a7ba/librt-0.7.4-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:3749ef74c170809e6dee68addec9d2458700a8de703de081c888e92a8b015cf9", size = 183960, upload-time = "2025-12-15T16:51:37.977Z" }, + { url = "https://files.pythonhosted.org/packages/eb/84/f1d568d254518463d879161d3737b784137d236075215e56c7c9be191cee/librt-0.7.4-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:b35c63f557653c05b5b1b6559a074dbabe0afee28ee2a05b6c9ba21ad0d16a74", size = 177609, upload-time = "2025-12-15T16:51:40.584Z" }, + { url = "https://files.pythonhosted.org/packages/5d/43/060bbc1c002f0d757c33a1afe6bf6a565f947a04841139508fc7cef6c08b/librt-0.7.4-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:1ef704e01cb6ad39ad7af668d51677557ca7e5d377663286f0ee1b6b27c28e5f", size = 199269, upload-time = "2025-12-15T16:51:41.879Z" }, + { url = "https://files.pythonhosted.org/packages/ff/7f/708f8f02d8012ee9f366c07ea6a92882f48bd06cc1ff16a35e13d0fbfb08/librt-0.7.4-cp312-cp312-win32.whl", hash = "sha256:c66c2b245926ec15188aead25d395091cb5c9df008d3b3207268cd65557d6286", size = 43186, upload-time = "2025-12-15T16:51:43.149Z" }, + { url = "https://files.pythonhosted.org/packages/f1/a5/4e051b061c8b2509be31b2c7ad4682090502c0a8b6406edcf8c6b4fe1ef7/librt-0.7.4-cp312-cp312-win_amd64.whl", hash = "sha256:71a56f4671f7ff723451f26a6131754d7c1809e04e22ebfbac1db8c9e6767a20", size = 49455, upload-time = "2025-12-15T16:51:44.336Z" }, + { url = "https://files.pythonhosted.org/packages/d0/d2/90d84e9f919224a3c1f393af1636d8638f54925fdc6cd5ee47f1548461e5/librt-0.7.4-cp312-cp312-win_arm64.whl", hash = "sha256:419eea245e7ec0fe664eb7e85e7ff97dcdb2513ca4f6b45a8ec4a3346904f95a", size = 42828, upload-time = "2025-12-15T16:51:45.498Z" }, + { url = "https://files.pythonhosted.org/packages/fe/4d/46a53ccfbb39fd0b493fd4496eb76f3ebc15bb3e45d8c2e695a27587edf5/librt-0.7.4-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:d44a1b1ba44cbd2fc3cb77992bef6d6fdb1028849824e1dd5e4d746e1f7f7f0b", size = 55745, upload-time = "2025-12-15T16:51:46.636Z" }, + { url = "https://files.pythonhosted.org/packages/7f/2b/3ac7f5212b1828bf4f979cf87f547db948d3e28421d7a430d4db23346ce4/librt-0.7.4-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:c9cab4b3de1f55e6c30a84c8cee20e4d3b2476f4d547256694a1b0163da4fe32", size = 57166, upload-time = "2025-12-15T16:51:48.219Z" }, + { url = "https://files.pythonhosted.org/packages/e8/99/6523509097cbe25f363795f0c0d1c6a3746e30c2994e25b5aefdab119b21/librt-0.7.4-cp313-cp313-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:2857c875f1edd1feef3c371fbf830a61b632fb4d1e57160bb1e6a3206e6abe67", size = 165833, upload-time = "2025-12-15T16:51:49.443Z" }, + { url = "https://files.pythonhosted.org/packages/fe/35/323611e59f8fe032649b4fb7e77f746f96eb7588fcbb31af26bae9630571/librt-0.7.4-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:b370a77be0a16e1ad0270822c12c21462dc40496e891d3b0caf1617c8cc57e20", size = 174818, upload-time = "2025-12-15T16:51:51.015Z" }, + { url = "https://files.pythonhosted.org/packages/41/e6/40fb2bb21616c6e06b6a64022802228066e9a31618f493e03f6b9661548a/librt-0.7.4-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:d05acd46b9a52087bfc50c59dfdf96a2c480a601e8898a44821c7fd676598f74", size = 189607, upload-time = "2025-12-15T16:51:52.671Z" }, + { url = "https://files.pythonhosted.org/packages/32/48/1b47c7d5d28b775941e739ed2bfe564b091c49201b9503514d69e4ed96d7/librt-0.7.4-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:70969229cb23d9c1a80e14225838d56e464dc71fa34c8342c954fc50e7516dee", size = 184585, upload-time = "2025-12-15T16:51:54.027Z" }, + { url = "https://files.pythonhosted.org/packages/75/a6/ee135dfb5d3b54d5d9001dbe483806229c6beac3ee2ba1092582b7efeb1b/librt-0.7.4-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:4450c354b89dbb266730893862dbff06006c9ed5b06b6016d529b2bf644fc681", size = 178249, upload-time = "2025-12-15T16:51:55.248Z" }, + { url = "https://files.pythonhosted.org/packages/04/87/d5b84ec997338be26af982bcd6679be0c1db9a32faadab1cf4bb24f9e992/librt-0.7.4-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:adefe0d48ad35b90b6f361f6ff5a1bd95af80c17d18619c093c60a20e7a5b60c", size = 199851, upload-time = "2025-12-15T16:51:56.933Z" }, + { url = "https://files.pythonhosted.org/packages/86/63/ba1333bf48306fe398e3392a7427ce527f81b0b79d0d91618c4610ce9d15/librt-0.7.4-cp313-cp313-win32.whl", hash = "sha256:21ea710e96c1e050635700695095962a22ea420d4b3755a25e4909f2172b4ff2", size = 43249, upload-time = "2025-12-15T16:51:58.498Z" }, + { url = "https://files.pythonhosted.org/packages/f9/8a/de2c6df06cdfa9308c080e6b060fe192790b6a48a47320b215e860f0e98c/librt-0.7.4-cp313-cp313-win_amd64.whl", hash = "sha256:772e18696cf5a64afee908662fbcb1f907460ddc851336ee3a848ef7684c8e1e", size = 49417, upload-time = "2025-12-15T16:51:59.618Z" }, + { url = "https://files.pythonhosted.org/packages/31/66/8ee0949efc389691381ed686185e43536c20e7ad880c122dd1f31e65c658/librt-0.7.4-cp313-cp313-win_arm64.whl", hash = "sha256:52e34c6af84e12921748c8354aa6acf1912ca98ba60cdaa6920e34793f1a0788", size = 42824, upload-time = "2025-12-15T16:52:00.784Z" }, + { url = "https://files.pythonhosted.org/packages/74/81/6921e65c8708eb6636bbf383aa77e6c7dad33a598ed3b50c313306a2da9d/librt-0.7.4-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:4f1ee004942eaaed6e06c087d93ebc1c67e9a293e5f6b9b5da558df6bf23dc5d", size = 55191, upload-time = "2025-12-15T16:52:01.97Z" }, + { url = "https://files.pythonhosted.org/packages/0d/d6/3eb864af8a8de8b39cc8dd2e9ded1823979a27795d72c4eea0afa8c26c9f/librt-0.7.4-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:d854c6dc0f689bad7ed452d2a3ecff58029d80612d336a45b62c35e917f42d23", size = 56898, upload-time = "2025-12-15T16:52:03.356Z" }, + { url = "https://files.pythonhosted.org/packages/49/bc/b1d4c0711fdf79646225d576faee8747b8528a6ec1ceb6accfd89ade7102/librt-0.7.4-cp314-cp314-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:a4f7339d9e445280f23d63dea842c0c77379c4a47471c538fc8feedab9d8d063", size = 163725, upload-time = "2025-12-15T16:52:04.572Z" }, + { url = "https://files.pythonhosted.org/packages/2c/08/61c41cd8f0a6a41fc99ea78a2205b88187e45ba9800792410ed62f033584/librt-0.7.4-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:39003fc73f925e684f8521b2dbf34f61a5deb8a20a15dcf53e0d823190ce8848", size = 172469, upload-time = "2025-12-15T16:52:05.863Z" }, + { url = "https://files.pythonhosted.org/packages/8b/c7/4ee18b4d57f01444230bc18cf59103aeab8f8c0f45e84e0e540094df1df1/librt-0.7.4-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:6bb15ee29d95875ad697d449fe6071b67f730f15a6961913a2b0205015ca0843", size = 186804, upload-time = "2025-12-15T16:52:07.192Z" }, + { url = "https://files.pythonhosted.org/packages/a1/af/009e8ba3fbf830c936842da048eda1b34b99329f402e49d88fafff6525d1/librt-0.7.4-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:02a69369862099e37d00765583052a99d6a68af7e19b887e1b78fee0146b755a", size = 181807, upload-time = "2025-12-15T16:52:08.554Z" }, + { url = "https://files.pythonhosted.org/packages/85/26/51ae25f813656a8b117c27a974f25e8c1e90abcd5a791ac685bf5b489a1b/librt-0.7.4-cp314-cp314-musllinux_1_2_i686.whl", hash = "sha256:ec72342cc4d62f38b25a94e28b9efefce41839aecdecf5e9627473ed04b7be16", size = 175595, upload-time = "2025-12-15T16:52:10.186Z" }, + { url = "https://files.pythonhosted.org/packages/48/93/36d6c71f830305f88996b15c8e017aa8d1e03e2e947b40b55bbf1a34cf24/librt-0.7.4-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:776dbb9bfa0fc5ce64234b446995d8d9f04badf64f544ca036bd6cff6f0732ce", size = 196504, upload-time = "2025-12-15T16:52:11.472Z" }, + { url = "https://files.pythonhosted.org/packages/08/11/8299e70862bb9d704735bf132c6be09c17b00fbc7cda0429a9df222fdc1b/librt-0.7.4-cp314-cp314-win32.whl", hash = "sha256:0f8cac84196d0ffcadf8469d9ded4d4e3a8b1c666095c2a291e22bf58e1e8a9f", size = 39738, upload-time = "2025-12-15T16:52:12.962Z" }, + { url = "https://files.pythonhosted.org/packages/54/d5/656b0126e4e0f8e2725cd2d2a1ec40f71f37f6f03f135a26b663c0e1a737/librt-0.7.4-cp314-cp314-win_amd64.whl", hash = "sha256:037f5cb6fe5abe23f1dc058054d50e9699fcc90d0677eee4e4f74a8677636a1a", size = 45976, upload-time = "2025-12-15T16:52:14.441Z" }, + { url = "https://files.pythonhosted.org/packages/60/86/465ff07b75c1067da8fa7f02913c4ead096ef106cfac97a977f763783bfb/librt-0.7.4-cp314-cp314-win_arm64.whl", hash = "sha256:a5deebb53d7a4d7e2e758a96befcd8edaaca0633ae71857995a0f16033289e44", size = 39073, upload-time = "2025-12-15T16:52:15.621Z" }, + { url = "https://files.pythonhosted.org/packages/b3/a0/24941f85960774a80d4b3c2aec651d7d980466da8101cae89e8b032a3e21/librt-0.7.4-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:b4c25312c7f4e6ab35ab16211bdf819e6e4eddcba3b2ea632fb51c9a2a97e105", size = 57369, upload-time = "2025-12-15T16:52:16.782Z" }, + { url = "https://files.pythonhosted.org/packages/77/a0/ddb259cae86ab415786c1547d0fe1b40f04a7b089f564fd5c0242a3fafb2/librt-0.7.4-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:618b7459bb392bdf373f2327e477597fff8f9e6a1878fffc1b711c013d1b0da4", size = 59230, upload-time = "2025-12-15T16:52:18.259Z" }, + { url = "https://files.pythonhosted.org/packages/31/11/77823cb530ab8a0c6fac848ac65b745be446f6f301753b8990e8809080c9/librt-0.7.4-cp314-cp314t-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:1437c3f72a30c7047f16fd3e972ea58b90172c3c6ca309645c1c68984f05526a", size = 183869, upload-time = "2025-12-15T16:52:19.457Z" }, + { url = "https://files.pythonhosted.org/packages/a4/ce/157db3614cf3034b3f702ae5ba4fefda4686f11eea4b7b96542324a7a0e7/librt-0.7.4-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:c96cb76f055b33308f6858b9b594618f1b46e147a4d03a4d7f0c449e304b9b95", size = 194606, upload-time = "2025-12-15T16:52:20.795Z" }, + { url = "https://files.pythonhosted.org/packages/30/ef/6ec4c7e3d6490f69a4fd2803516fa5334a848a4173eac26d8ee6507bff6e/librt-0.7.4-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:28f990e6821204f516d09dc39966ef8b84556ffd648d5926c9a3f681e8de8906", size = 206776, upload-time = "2025-12-15T16:52:22.229Z" }, + { url = "https://files.pythonhosted.org/packages/ad/22/750b37bf549f60a4782ab80e9d1e9c44981374ab79a7ea68670159905918/librt-0.7.4-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:bc4aebecc79781a1b77d7d4e7d9fe080385a439e198d993b557b60f9117addaf", size = 203205, upload-time = "2025-12-15T16:52:23.603Z" }, + { url = "https://files.pythonhosted.org/packages/7a/87/2e8a0f584412a93df5faad46c5fa0a6825fdb5eba2ce482074b114877f44/librt-0.7.4-cp314-cp314t-musllinux_1_2_i686.whl", hash = "sha256:022cc673e69283a42621dd453e2407cf1647e77f8bd857d7ad7499901e62376f", size = 196696, upload-time = "2025-12-15T16:52:24.951Z" }, + { url = "https://files.pythonhosted.org/packages/e5/ca/7bf78fa950e43b564b7de52ceeb477fb211a11f5733227efa1591d05a307/librt-0.7.4-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:2b3ca211ae8ea540569e9c513da052699b7b06928dcda61247cb4f318122bdb5", size = 217191, upload-time = "2025-12-15T16:52:26.194Z" }, + { url = "https://files.pythonhosted.org/packages/d6/49/3732b0e8424ae35ad5c3166d9dd5bcdae43ce98775e0867a716ff5868064/librt-0.7.4-cp314-cp314t-win32.whl", hash = "sha256:8a461f6456981d8c8e971ff5a55f2e34f4e60871e665d2f5fde23ee74dea4eeb", size = 40276, upload-time = "2025-12-15T16:52:27.54Z" }, + { url = "https://files.pythonhosted.org/packages/35/d6/d8823e01bd069934525fddb343189c008b39828a429b473fb20d67d5cd36/librt-0.7.4-cp314-cp314t-win_amd64.whl", hash = "sha256:721a7b125a817d60bf4924e1eec2a7867bfcf64cfc333045de1df7a0629e4481", size = 46772, upload-time = "2025-12-15T16:52:28.653Z" }, + { url = "https://files.pythonhosted.org/packages/36/e9/a0aa60f5322814dd084a89614e9e31139702e342f8459ad8af1984a18168/librt-0.7.4-cp314-cp314t-win_arm64.whl", hash = "sha256:76b2ba71265c0102d11458879b4d53ccd0b32b0164d14deb8d2b598a018e502f", size = 39724, upload-time = "2025-12-15T16:52:29.836Z" }, +] + +[[package]] +name = "logfire" +version = "4.16.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "executing" }, + { name = "opentelemetry-exporter-otlp-proto-http" }, + { name = "opentelemetry-instrumentation" }, + { name = "opentelemetry-sdk" }, + { name = "protobuf" }, + { name = "rich" }, + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/e2/60/b8040db3598a55da64c45e3e689f2baa87389a4648a6f46ba80be3329f23/logfire-4.16.0.tar.gz", hash = "sha256:03a3ab8fdc13399309cb55d69cba7a6fcbad3526cfad85fc4f72e7d75e22b654", size = 550759, upload-time = "2025-12-04T16:16:39.477Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/53/f7/ffcf81eb4aea75e40c0646b9519947d2070626c5d533922df92975045181/logfire-4.16.0-py3-none-any.whl", hash = "sha256:8f895f6c2efa593ad6d49e1b06d8e6e351d3dd0cad61ce5def0c3d401f8ea707", size = 229122, upload-time = "2025-12-04T16:16:35.963Z" }, +] + +[[package]] +name = "loguru" +version = "0.7.3" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "colorama", marker = "sys_platform == 'win32'" }, + { name = "win32-setctime", marker = "sys_platform == 'win32'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/3a/05/a1dae3dffd1116099471c643b8924f5aa6524411dc6c63fdae648c4f1aca/loguru-0.7.3.tar.gz", hash = "sha256:19480589e77d47b8d85b2c827ad95d49bf31b0dcde16593892eb51dd18706eb6", size = 63559, upload-time = "2024-12-06T11:20:56.608Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/0c/29/0348de65b8cc732daa3e33e67806420b2ae89bdce2b04af740289c5c6c8c/loguru-0.7.3-py3-none-any.whl", hash = "sha256:31a33c10c8e1e10422bfd431aeb5d351c7cf7fa671e3c4df004162264b28220c", size = 61595, upload-time = "2024-12-06T11:20:54.538Z" }, +] + +[[package]] +name = "markdown" +version = "3.10" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/7d/ab/7dd27d9d863b3376fcf23a5a13cb5d024aed1db46f963f1b5735ae43b3be/markdown-3.10.tar.gz", hash = "sha256:37062d4f2aa4b2b6b32aefb80faa300f82cc790cb949a35b8caede34f2b68c0e", size = 364931, upload-time = "2025-11-03T19:51:15.007Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/70/81/54e3ce63502cd085a0c556652a4e1b919c45a446bd1e5300e10c44c8c521/markdown-3.10-py3-none-any.whl", hash = "sha256:b5b99d6951e2e4948d939255596523444c0e677c669700b1d17aa4a8a464cb7c", size = 107678, upload-time = "2025-11-03T19:51:13.887Z" }, +] + +[[package]] +name = "markdown-it-py" +version = "4.0.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "mdurl" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/5b/f5/4ec618ed16cc4f8fb3b701563655a69816155e79e24a17b651541804721d/markdown_it_py-4.0.0.tar.gz", hash = "sha256:cb0a2b4aa34f932c007117b194e945bd74e0ec24133ceb5bac59009cda1cb9f3", size = 73070, upload-time = "2025-08-11T12:57:52.854Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/94/54/e7d793b573f298e1c9013b8c4dade17d481164aa517d1d7148619c2cedbf/markdown_it_py-4.0.0-py3-none-any.whl", hash = "sha256:87327c59b172c5011896038353a81343b6754500a08cd7a4973bb48c6d578147", size = 87321, upload-time = "2025-08-11T12:57:51.923Z" }, +] + +[[package]] +name = "markupsafe" +version = "3.0.3" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/7e/99/7690b6d4034fffd95959cbe0c02de8deb3098cc577c67bb6a24fe5d7caa7/markupsafe-3.0.3.tar.gz", hash = "sha256:722695808f4b6457b320fdc131280796bdceb04ab50fe1795cd540799ebe1698", size = 80313, upload-time = "2025-09-27T18:37:40.426Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/5a/72/147da192e38635ada20e0a2e1a51cf8823d2119ce8883f7053879c2199b5/markupsafe-3.0.3-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:d53197da72cc091b024dd97249dfc7794d6a56530370992a5e1a08983ad9230e", size = 11615, upload-time = "2025-09-27T18:36:30.854Z" }, + { url = "https://files.pythonhosted.org/packages/9a/81/7e4e08678a1f98521201c3079f77db69fb552acd56067661f8c2f534a718/markupsafe-3.0.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:1872df69a4de6aead3491198eaf13810b565bdbeec3ae2dc8780f14458ec73ce", size = 12020, upload-time = "2025-09-27T18:36:31.971Z" }, + { url = "https://files.pythonhosted.org/packages/1e/2c/799f4742efc39633a1b54a92eec4082e4f815314869865d876824c257c1e/markupsafe-3.0.3-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:3a7e8ae81ae39e62a41ec302f972ba6ae23a5c5396c8e60113e9066ef893da0d", size = 24332, upload-time = "2025-09-27T18:36:32.813Z" }, + { url = "https://files.pythonhosted.org/packages/3c/2e/8d0c2ab90a8c1d9a24f0399058ab8519a3279d1bd4289511d74e909f060e/markupsafe-3.0.3-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:d6dd0be5b5b189d31db7cda48b91d7e0a9795f31430b7f271219ab30f1d3ac9d", size = 22947, upload-time = "2025-09-27T18:36:33.86Z" }, + { url = "https://files.pythonhosted.org/packages/2c/54/887f3092a85238093a0b2154bd629c89444f395618842e8b0c41783898ea/markupsafe-3.0.3-cp312-cp312-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:94c6f0bb423f739146aec64595853541634bde58b2135f27f61c1ffd1cd4d16a", size = 21962, upload-time = "2025-09-27T18:36:35.099Z" }, + { url = "https://files.pythonhosted.org/packages/c9/2f/336b8c7b6f4a4d95e91119dc8521402461b74a485558d8f238a68312f11c/markupsafe-3.0.3-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:be8813b57049a7dc738189df53d69395eba14fb99345e0a5994914a3864c8a4b", size = 23760, upload-time = "2025-09-27T18:36:36.001Z" }, + { url = "https://files.pythonhosted.org/packages/32/43/67935f2b7e4982ffb50a4d169b724d74b62a3964bc1a9a527f5ac4f1ee2b/markupsafe-3.0.3-cp312-cp312-musllinux_1_2_riscv64.whl", hash = "sha256:83891d0e9fb81a825d9a6d61e3f07550ca70a076484292a70fde82c4b807286f", size = 21529, upload-time = "2025-09-27T18:36:36.906Z" }, + { url = "https://files.pythonhosted.org/packages/89/e0/4486f11e51bbba8b0c041098859e869e304d1c261e59244baa3d295d47b7/markupsafe-3.0.3-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:77f0643abe7495da77fb436f50f8dab76dbc6e5fd25d39589a0f1fe6548bfa2b", size = 23015, upload-time = "2025-09-27T18:36:37.868Z" }, + { url = "https://files.pythonhosted.org/packages/2f/e1/78ee7a023dac597a5825441ebd17170785a9dab23de95d2c7508ade94e0e/markupsafe-3.0.3-cp312-cp312-win32.whl", hash = "sha256:d88b440e37a16e651bda4c7c2b930eb586fd15ca7406cb39e211fcff3bf3017d", size = 14540, upload-time = "2025-09-27T18:36:38.761Z" }, + { url = "https://files.pythonhosted.org/packages/aa/5b/bec5aa9bbbb2c946ca2733ef9c4ca91c91b6a24580193e891b5f7dbe8e1e/markupsafe-3.0.3-cp312-cp312-win_amd64.whl", hash = "sha256:26a5784ded40c9e318cfc2bdb30fe164bdb8665ded9cd64d500a34fb42067b1c", size = 15105, upload-time = "2025-09-27T18:36:39.701Z" }, + { url = "https://files.pythonhosted.org/packages/e5/f1/216fc1bbfd74011693a4fd837e7026152e89c4bcf3e77b6692fba9923123/markupsafe-3.0.3-cp312-cp312-win_arm64.whl", hash = "sha256:35add3b638a5d900e807944a078b51922212fb3dedb01633a8defc4b01a3c85f", size = 13906, upload-time = "2025-09-27T18:36:40.689Z" }, + { url = "https://files.pythonhosted.org/packages/38/2f/907b9c7bbba283e68f20259574b13d005c121a0fa4c175f9bed27c4597ff/markupsafe-3.0.3-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:e1cf1972137e83c5d4c136c43ced9ac51d0e124706ee1c8aa8532c1287fa8795", size = 11622, upload-time = "2025-09-27T18:36:41.777Z" }, + { url = "https://files.pythonhosted.org/packages/9c/d9/5f7756922cdd676869eca1c4e3c0cd0df60ed30199ffd775e319089cb3ed/markupsafe-3.0.3-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:116bb52f642a37c115f517494ea5feb03889e04df47eeff5b130b1808ce7c219", size = 12029, upload-time = "2025-09-27T18:36:43.257Z" }, + { url = "https://files.pythonhosted.org/packages/00/07/575a68c754943058c78f30db02ee03a64b3c638586fba6a6dd56830b30a3/markupsafe-3.0.3-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:133a43e73a802c5562be9bbcd03d090aa5a1fe899db609c29e8c8d815c5f6de6", size = 24374, upload-time = "2025-09-27T18:36:44.508Z" }, + { url = "https://files.pythonhosted.org/packages/a9/21/9b05698b46f218fc0e118e1f8168395c65c8a2c750ae2bab54fc4bd4e0e8/markupsafe-3.0.3-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:ccfcd093f13f0f0b7fdd0f198b90053bf7b2f02a3927a30e63f3ccc9df56b676", size = 22980, upload-time = "2025-09-27T18:36:45.385Z" }, + { url = "https://files.pythonhosted.org/packages/7f/71/544260864f893f18b6827315b988c146b559391e6e7e8f7252839b1b846a/markupsafe-3.0.3-cp313-cp313-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:509fa21c6deb7a7a273d629cf5ec029bc209d1a51178615ddf718f5918992ab9", size = 21990, upload-time = "2025-09-27T18:36:46.916Z" }, + { url = "https://files.pythonhosted.org/packages/c2/28/b50fc2f74d1ad761af2f5dcce7492648b983d00a65b8c0e0cb457c82ebbe/markupsafe-3.0.3-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:a4afe79fb3de0b7097d81da19090f4df4f8d3a2b3adaa8764138aac2e44f3af1", size = 23784, upload-time = "2025-09-27T18:36:47.884Z" }, + { url = "https://files.pythonhosted.org/packages/ed/76/104b2aa106a208da8b17a2fb72e033a5a9d7073c68f7e508b94916ed47a9/markupsafe-3.0.3-cp313-cp313-musllinux_1_2_riscv64.whl", hash = "sha256:795e7751525cae078558e679d646ae45574b47ed6e7771863fcc079a6171a0fc", size = 21588, upload-time = "2025-09-27T18:36:48.82Z" }, + { url = "https://files.pythonhosted.org/packages/b5/99/16a5eb2d140087ebd97180d95249b00a03aa87e29cc224056274f2e45fd6/markupsafe-3.0.3-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:8485f406a96febb5140bfeca44a73e3ce5116b2501ac54fe953e488fb1d03b12", size = 23041, upload-time = "2025-09-27T18:36:49.797Z" }, + { url = "https://files.pythonhosted.org/packages/19/bc/e7140ed90c5d61d77cea142eed9f9c303f4c4806f60a1044c13e3f1471d0/markupsafe-3.0.3-cp313-cp313-win32.whl", hash = "sha256:bdd37121970bfd8be76c5fb069c7751683bdf373db1ed6c010162b2a130248ed", size = 14543, upload-time = "2025-09-27T18:36:51.584Z" }, + { url = "https://files.pythonhosted.org/packages/05/73/c4abe620b841b6b791f2edc248f556900667a5a1cf023a6646967ae98335/markupsafe-3.0.3-cp313-cp313-win_amd64.whl", hash = "sha256:9a1abfdc021a164803f4d485104931fb8f8c1efd55bc6b748d2f5774e78b62c5", size = 15113, upload-time = "2025-09-27T18:36:52.537Z" }, + { url = "https://files.pythonhosted.org/packages/f0/3a/fa34a0f7cfef23cf9500d68cb7c32dd64ffd58a12b09225fb03dd37d5b80/markupsafe-3.0.3-cp313-cp313-win_arm64.whl", hash = "sha256:7e68f88e5b8799aa49c85cd116c932a1ac15caaa3f5db09087854d218359e485", size = 13911, upload-time = "2025-09-27T18:36:53.513Z" }, + { url = "https://files.pythonhosted.org/packages/e4/d7/e05cd7efe43a88a17a37b3ae96e79a19e846f3f456fe79c57ca61356ef01/markupsafe-3.0.3-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:218551f6df4868a8d527e3062d0fb968682fe92054e89978594c28e642c43a73", size = 11658, upload-time = "2025-09-27T18:36:54.819Z" }, + { url = "https://files.pythonhosted.org/packages/99/9e/e412117548182ce2148bdeacdda3bb494260c0b0184360fe0d56389b523b/markupsafe-3.0.3-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:3524b778fe5cfb3452a09d31e7b5adefeea8c5be1d43c4f810ba09f2ceb29d37", size = 12066, upload-time = "2025-09-27T18:36:55.714Z" }, + { url = "https://files.pythonhosted.org/packages/bc/e6/fa0ffcda717ef64a5108eaa7b4f5ed28d56122c9a6d70ab8b72f9f715c80/markupsafe-3.0.3-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:4e885a3d1efa2eadc93c894a21770e4bc67899e3543680313b09f139e149ab19", size = 25639, upload-time = "2025-09-27T18:36:56.908Z" }, + { url = "https://files.pythonhosted.org/packages/96/ec/2102e881fe9d25fc16cb4b25d5f5cde50970967ffa5dddafdb771237062d/markupsafe-3.0.3-cp313-cp313t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:8709b08f4a89aa7586de0aadc8da56180242ee0ada3999749b183aa23df95025", size = 23569, upload-time = "2025-09-27T18:36:57.913Z" }, + { url = "https://files.pythonhosted.org/packages/4b/30/6f2fce1f1f205fc9323255b216ca8a235b15860c34b6798f810f05828e32/markupsafe-3.0.3-cp313-cp313t-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:b8512a91625c9b3da6f127803b166b629725e68af71f8184ae7e7d54686a56d6", size = 23284, upload-time = "2025-09-27T18:36:58.833Z" }, + { url = "https://files.pythonhosted.org/packages/58/47/4a0ccea4ab9f5dcb6f79c0236d954acb382202721e704223a8aafa38b5c8/markupsafe-3.0.3-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:9b79b7a16f7fedff2495d684f2b59b0457c3b493778c9eed31111be64d58279f", size = 24801, upload-time = "2025-09-27T18:36:59.739Z" }, + { url = "https://files.pythonhosted.org/packages/6a/70/3780e9b72180b6fecb83a4814d84c3bf4b4ae4bf0b19c27196104149734c/markupsafe-3.0.3-cp313-cp313t-musllinux_1_2_riscv64.whl", hash = "sha256:12c63dfb4a98206f045aa9563db46507995f7ef6d83b2f68eda65c307c6829eb", size = 22769, upload-time = "2025-09-27T18:37:00.719Z" }, + { url = "https://files.pythonhosted.org/packages/98/c5/c03c7f4125180fc215220c035beac6b9cb684bc7a067c84fc69414d315f5/markupsafe-3.0.3-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:8f71bc33915be5186016f675cd83a1e08523649b0e33efdb898db577ef5bb009", size = 23642, upload-time = "2025-09-27T18:37:01.673Z" }, + { url = "https://files.pythonhosted.org/packages/80/d6/2d1b89f6ca4bff1036499b1e29a1d02d282259f3681540e16563f27ebc23/markupsafe-3.0.3-cp313-cp313t-win32.whl", hash = "sha256:69c0b73548bc525c8cb9a251cddf1931d1db4d2258e9599c28c07ef3580ef354", size = 14612, upload-time = "2025-09-27T18:37:02.639Z" }, + { url = "https://files.pythonhosted.org/packages/2b/98/e48a4bfba0a0ffcf9925fe2d69240bfaa19c6f7507b8cd09c70684a53c1e/markupsafe-3.0.3-cp313-cp313t-win_amd64.whl", hash = "sha256:1b4b79e8ebf6b55351f0d91fe80f893b4743f104bff22e90697db1590e47a218", size = 15200, upload-time = "2025-09-27T18:37:03.582Z" }, + { url = "https://files.pythonhosted.org/packages/0e/72/e3cc540f351f316e9ed0f092757459afbc595824ca724cbc5a5d4263713f/markupsafe-3.0.3-cp313-cp313t-win_arm64.whl", hash = "sha256:ad2cf8aa28b8c020ab2fc8287b0f823d0a7d8630784c31e9ee5edea20f406287", size = 13973, upload-time = "2025-09-27T18:37:04.929Z" }, + { url = "https://files.pythonhosted.org/packages/33/8a/8e42d4838cd89b7dde187011e97fe6c3af66d8c044997d2183fbd6d31352/markupsafe-3.0.3-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:eaa9599de571d72e2daf60164784109f19978b327a3910d3e9de8c97b5b70cfe", size = 11619, upload-time = "2025-09-27T18:37:06.342Z" }, + { url = "https://files.pythonhosted.org/packages/b5/64/7660f8a4a8e53c924d0fa05dc3a55c9cee10bbd82b11c5afb27d44b096ce/markupsafe-3.0.3-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:c47a551199eb8eb2121d4f0f15ae0f923d31350ab9280078d1e5f12b249e0026", size = 12029, upload-time = "2025-09-27T18:37:07.213Z" }, + { url = "https://files.pythonhosted.org/packages/da/ef/e648bfd021127bef5fa12e1720ffed0c6cbb8310c8d9bea7266337ff06de/markupsafe-3.0.3-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:f34c41761022dd093b4b6896d4810782ffbabe30f2d443ff5f083e0cbbb8c737", size = 24408, upload-time = "2025-09-27T18:37:09.572Z" }, + { url = "https://files.pythonhosted.org/packages/41/3c/a36c2450754618e62008bf7435ccb0f88053e07592e6028a34776213d877/markupsafe-3.0.3-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:457a69a9577064c05a97c41f4e65148652db078a3a509039e64d3467b9e7ef97", size = 23005, upload-time = "2025-09-27T18:37:10.58Z" }, + { url = "https://files.pythonhosted.org/packages/bc/20/b7fdf89a8456b099837cd1dc21974632a02a999ec9bf7ca3e490aacd98e7/markupsafe-3.0.3-cp314-cp314-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:e8afc3f2ccfa24215f8cb28dcf43f0113ac3c37c2f0f0806d8c70e4228c5cf4d", size = 22048, upload-time = "2025-09-27T18:37:11.547Z" }, + { url = "https://files.pythonhosted.org/packages/9a/a7/591f592afdc734f47db08a75793a55d7fbcc6902a723ae4cfbab61010cc5/markupsafe-3.0.3-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:ec15a59cf5af7be74194f7ab02d0f59a62bdcf1a537677ce67a2537c9b87fcda", size = 23821, upload-time = "2025-09-27T18:37:12.48Z" }, + { url = "https://files.pythonhosted.org/packages/7d/33/45b24e4f44195b26521bc6f1a82197118f74df348556594bd2262bda1038/markupsafe-3.0.3-cp314-cp314-musllinux_1_2_riscv64.whl", hash = "sha256:0eb9ff8191e8498cca014656ae6b8d61f39da5f95b488805da4bb029cccbfbaf", size = 21606, upload-time = "2025-09-27T18:37:13.485Z" }, + { url = "https://files.pythonhosted.org/packages/ff/0e/53dfaca23a69fbfbbf17a4b64072090e70717344c52eaaaa9c5ddff1e5f0/markupsafe-3.0.3-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:2713baf880df847f2bece4230d4d094280f4e67b1e813eec43b4c0e144a34ffe", size = 23043, upload-time = "2025-09-27T18:37:14.408Z" }, + { url = "https://files.pythonhosted.org/packages/46/11/f333a06fc16236d5238bfe74daccbca41459dcd8d1fa952e8fbd5dccfb70/markupsafe-3.0.3-cp314-cp314-win32.whl", hash = "sha256:729586769a26dbceff69f7a7dbbf59ab6572b99d94576a5592625d5b411576b9", size = 14747, upload-time = "2025-09-27T18:37:15.36Z" }, + { url = "https://files.pythonhosted.org/packages/28/52/182836104b33b444e400b14f797212f720cbc9ed6ba34c800639d154e821/markupsafe-3.0.3-cp314-cp314-win_amd64.whl", hash = "sha256:bdc919ead48f234740ad807933cdf545180bfbe9342c2bb451556db2ed958581", size = 15341, upload-time = "2025-09-27T18:37:16.496Z" }, + { url = "https://files.pythonhosted.org/packages/6f/18/acf23e91bd94fd7b3031558b1f013adfa21a8e407a3fdb32745538730382/markupsafe-3.0.3-cp314-cp314-win_arm64.whl", hash = "sha256:5a7d5dc5140555cf21a6fefbdbf8723f06fcd2f63ef108f2854de715e4422cb4", size = 14073, upload-time = "2025-09-27T18:37:17.476Z" }, + { url = "https://files.pythonhosted.org/packages/3c/f0/57689aa4076e1b43b15fdfa646b04653969d50cf30c32a102762be2485da/markupsafe-3.0.3-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:1353ef0c1b138e1907ae78e2f6c63ff67501122006b0f9abad68fda5f4ffc6ab", size = 11661, upload-time = "2025-09-27T18:37:18.453Z" }, + { url = "https://files.pythonhosted.org/packages/89/c3/2e67a7ca217c6912985ec766c6393b636fb0c2344443ff9d91404dc4c79f/markupsafe-3.0.3-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:1085e7fbddd3be5f89cc898938f42c0b3c711fdcb37d75221de2666af647c175", size = 12069, upload-time = "2025-09-27T18:37:19.332Z" }, + { url = "https://files.pythonhosted.org/packages/f0/00/be561dce4e6ca66b15276e184ce4b8aec61fe83662cce2f7d72bd3249d28/markupsafe-3.0.3-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:1b52b4fb9df4eb9ae465f8d0c228a00624de2334f216f178a995ccdcf82c4634", size = 25670, upload-time = "2025-09-27T18:37:20.245Z" }, + { url = "https://files.pythonhosted.org/packages/50/09/c419f6f5a92e5fadde27efd190eca90f05e1261b10dbd8cbcb39cd8ea1dc/markupsafe-3.0.3-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:fed51ac40f757d41b7c48425901843666a6677e3e8eb0abcff09e4ba6e664f50", size = 23598, upload-time = "2025-09-27T18:37:21.177Z" }, + { url = "https://files.pythonhosted.org/packages/22/44/a0681611106e0b2921b3033fc19bc53323e0b50bc70cffdd19f7d679bb66/markupsafe-3.0.3-cp314-cp314t-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:f190daf01f13c72eac4efd5c430a8de82489d9cff23c364c3ea822545032993e", size = 23261, upload-time = "2025-09-27T18:37:22.167Z" }, + { url = "https://files.pythonhosted.org/packages/5f/57/1b0b3f100259dc9fffe780cfb60d4be71375510e435efec3d116b6436d43/markupsafe-3.0.3-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:e56b7d45a839a697b5eb268c82a71bd8c7f6c94d6fd50c3d577fa39a9f1409f5", size = 24835, upload-time = "2025-09-27T18:37:23.296Z" }, + { url = "https://files.pythonhosted.org/packages/26/6a/4bf6d0c97c4920f1597cc14dd720705eca0bf7c787aebc6bb4d1bead5388/markupsafe-3.0.3-cp314-cp314t-musllinux_1_2_riscv64.whl", hash = "sha256:f3e98bb3798ead92273dc0e5fd0f31ade220f59a266ffd8a4f6065e0a3ce0523", size = 22733, upload-time = "2025-09-27T18:37:24.237Z" }, + { url = "https://files.pythonhosted.org/packages/14/c7/ca723101509b518797fedc2fdf79ba57f886b4aca8a7d31857ba3ee8281f/markupsafe-3.0.3-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:5678211cb9333a6468fb8d8be0305520aa073f50d17f089b5b4b477ea6e67fdc", size = 23672, upload-time = "2025-09-27T18:37:25.271Z" }, + { url = "https://files.pythonhosted.org/packages/fb/df/5bd7a48c256faecd1d36edc13133e51397e41b73bb77e1a69deab746ebac/markupsafe-3.0.3-cp314-cp314t-win32.whl", hash = "sha256:915c04ba3851909ce68ccc2b8e2cd691618c4dc4c4232fb7982bca3f41fd8c3d", size = 14819, upload-time = "2025-09-27T18:37:26.285Z" }, + { url = "https://files.pythonhosted.org/packages/1a/8a/0402ba61a2f16038b48b39bccca271134be00c5c9f0f623208399333c448/markupsafe-3.0.3-cp314-cp314t-win_amd64.whl", hash = "sha256:4faffd047e07c38848ce017e8725090413cd80cbc23d86e55c587bf979e579c9", size = 15426, upload-time = "2025-09-27T18:37:27.316Z" }, + { url = "https://files.pythonhosted.org/packages/70/bc/6f1c2f612465f5fa89b95bead1f44dcb607670fd42891d8fdcd5d039f4f4/markupsafe-3.0.3-cp314-cp314t-win_arm64.whl", hash = "sha256:32001d6a8fc98c8cb5c947787c5d08b0a50663d139f1305bac5885d98d9b40fa", size = 14146, upload-time = "2025-09-27T18:37:28.327Z" }, +] + +[[package]] +name = "mdurl" +version = "0.1.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/d6/54/cfe61301667036ec958cb99bd3efefba235e65cdeb9c84d24a8293ba1d90/mdurl-0.1.2.tar.gz", hash = "sha256:bb413d29f5eea38f31dd4754dd7377d4465116fb207585f97bf925588687c1ba", size = 8729, upload-time = "2022-08-14T12:40:10.846Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/b3/38/89ba8ad64ae25be8de66a6d463314cf1eb366222074cfda9ee839c56a4b4/mdurl-0.1.2-py3-none-any.whl", hash = "sha256:84008a41e51615a49fc9966191ff91509e3c40b939176e643fd50a5c2196b8f8", size = 9979, upload-time = "2022-08-14T12:40:09.779Z" }, +] + +[[package]] +name = "mergedeep" +version = "1.3.4" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/3a/41/580bb4006e3ed0361b8151a01d324fb03f420815446c7def45d02f74c270/mergedeep-1.3.4.tar.gz", hash = "sha256:0096d52e9dad9939c3d975a774666af186eda617e6ca84df4c94dec30004f2a8", size = 4661, upload-time = "2021-02-05T18:55:30.623Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/2c/19/04f9b178c2d8a15b076c8b5140708fa6ffc5601fb6f1e975537072df5b2a/mergedeep-1.3.4-py3-none-any.whl", hash = "sha256:70775750742b25c0d8f36c55aed03d24c3384d17c951b3175d898bd778ef0307", size = 6354, upload-time = "2021-02-05T18:55:29.583Z" }, +] + +[[package]] +name = "mkdocs" +version = "1.6.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "click" }, + { name = "colorama", marker = "sys_platform == 'win32'" }, + { name = "ghp-import" }, + { name = "jinja2" }, + { name = "markdown" }, + { name = "markupsafe" }, + { name = "mergedeep" }, + { name = "mkdocs-get-deps" }, + { name = "packaging" }, + { name = "pathspec" }, + { name = "pyyaml" }, + { name = "pyyaml-env-tag" }, + { name = "watchdog" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/bc/c6/bbd4f061bd16b378247f12953ffcb04786a618ce5e904b8c5a01a0309061/mkdocs-1.6.1.tar.gz", hash = "sha256:7b432f01d928c084353ab39c57282f29f92136665bdd6abf7c1ec8d822ef86f2", size = 3889159, upload-time = "2024-08-30T12:24:06.899Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/22/5b/dbc6a8cddc9cfa9c4971d59fb12bb8d42e161b7e7f8cc89e49137c5b279c/mkdocs-1.6.1-py3-none-any.whl", hash = "sha256:db91759624d1647f3f34aa0c3f327dd2601beae39a366d6e064c03468d35c20e", size = 3864451, upload-time = "2024-08-30T12:24:05.054Z" }, +] + +[[package]] +name = "mkdocs-autorefs" +version = "1.4.3" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "markdown" }, + { name = "markupsafe" }, + { name = "mkdocs" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/51/fa/9124cd63d822e2bcbea1450ae68cdc3faf3655c69b455f3a7ed36ce6c628/mkdocs_autorefs-1.4.3.tar.gz", hash = "sha256:beee715b254455c4aa93b6ef3c67579c399ca092259cc41b7d9342573ff1fc75", size = 55425, upload-time = "2025-08-26T14:23:17.223Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/9f/4d/7123b6fa2278000688ebd338e2a06d16870aaf9eceae6ba047ea05f92df1/mkdocs_autorefs-1.4.3-py3-none-any.whl", hash = "sha256:469d85eb3114801d08e9cc55d102b3ba65917a869b893403b8987b601cf55dc9", size = 25034, upload-time = "2025-08-26T14:23:15.906Z" }, +] + +[[package]] +name = "mkdocs-awesome-pages-plugin" +version = "2.10.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "mkdocs" }, + { name = "natsort" }, + { name = "wcmatch" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/92/e8/6ae9c18d8174a5d74ce4ade7a7f4c350955063968bc41ff1e5833cff4a2b/mkdocs_awesome_pages_plugin-2.10.1.tar.gz", hash = "sha256:cda2cb88c937ada81a4785225f20ef77ce532762f4500120b67a1433c1cdbb2f", size = 16303, upload-time = "2024-12-22T21:13:49.19Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/73/61/19fc1e9c579dbfd4e8a402748f1d63cab7aabe8f8d91eb0235e45b32d040/mkdocs_awesome_pages_plugin-2.10.1-py3-none-any.whl", hash = "sha256:c6939dbea37383fc3cf8c0a4e892144ec3d2f8a585e16fdc966b34e7c97042a7", size = 15118, upload-time = "2024-12-22T21:13:46.945Z" }, +] + +[[package]] +name = "mkdocs-get-deps" +version = "0.2.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "mergedeep" }, + { name = "platformdirs" }, + { name = "pyyaml" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/98/f5/ed29cd50067784976f25ed0ed6fcd3c2ce9eb90650aa3b2796ddf7b6870b/mkdocs_get_deps-0.2.0.tar.gz", hash = "sha256:162b3d129c7fad9b19abfdcb9c1458a651628e4b1dea628ac68790fb3061c60c", size = 10239, upload-time = "2023-11-20T17:51:09.981Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/9f/d4/029f984e8d3f3b6b726bd33cafc473b75e9e44c0f7e80a5b29abc466bdea/mkdocs_get_deps-0.2.0-py3-none-any.whl", hash = "sha256:2bf11d0b133e77a0dd036abeeb06dec8775e46efa526dc70667d8863eefc6134", size = 9521, upload-time = "2023-11-20T17:51:08.587Z" }, +] + +[[package]] +name = "mkdocs-material" +version = "9.7.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "babel" }, + { name = "backrefs" }, + { name = "colorama" }, + { name = "jinja2" }, + { name = "markdown" }, + { name = "mkdocs" }, + { name = "mkdocs-material-extensions" }, + { name = "paginate" }, + { name = "pygments" }, + { name = "pymdown-extensions" }, + { name = "requests" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/27/e2/2ffc356cd72f1473d07c7719d82a8f2cbd261666828614ecb95b12169f41/mkdocs_material-9.7.1.tar.gz", hash = "sha256:89601b8f2c3e6c6ee0a918cc3566cb201d40bf37c3cd3c2067e26fadb8cce2b8", size = 4094392, upload-time = "2025-12-18T09:49:00.308Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/3e/32/ed071cb721aca8c227718cffcf7bd539620e9799bbf2619e90c757bfd030/mkdocs_material-9.7.1-py3-none-any.whl", hash = "sha256:3f6100937d7d731f87f1e3e3b021c97f7239666b9ba1151ab476cabb96c60d5c", size = 9297166, upload-time = "2025-12-18T09:48:56.664Z" }, +] + +[[package]] +name = "mkdocs-material-extensions" +version = "1.3.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/79/9b/9b4c96d6593b2a541e1cb8b34899a6d021d208bb357042823d4d2cabdbe7/mkdocs_material_extensions-1.3.1.tar.gz", hash = "sha256:10c9511cea88f568257f960358a467d12b970e1f7b2c0e5fb2bb48cab1928443", size = 11847, upload-time = "2023-11-22T19:09:45.208Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/5b/54/662a4743aa81d9582ee9339d4ffa3c8fd40a4965e033d77b9da9774d3960/mkdocs_material_extensions-1.3.1-py3-none-any.whl", hash = "sha256:adff8b62700b25cb77b53358dad940f3ef973dd6db797907c49e3c2ef3ab4e31", size = 8728, upload-time = "2023-11-22T19:09:43.465Z" }, +] + +[[package]] +name = "mkdocs-mermaid2-plugin" +version = "1.2.3" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "beautifulsoup4" }, + { name = "jsbeautifier" }, + { name = "mkdocs" }, + { name = "pymdown-extensions" }, + { name = "requests" }, + { name = "setuptools" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/2a/6d/308f443a558b6a97ce55782658174c0d07c414405cfc0a44d36ad37e36f9/mkdocs_mermaid2_plugin-1.2.3.tar.gz", hash = "sha256:fb6f901d53e5191e93db78f93f219cad926ccc4d51e176271ca5161b6cc5368c", size = 16220, upload-time = "2025-10-17T19:38:53.047Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/1a/4b/6fd6dd632019b7f522f1b1f794ab6115cd79890330986614be56fd18f0eb/mkdocs_mermaid2_plugin-1.2.3-py3-none-any.whl", hash = "sha256:33f60c582be623ed53829a96e19284fc7f1b74a1dbae78d4d2e47fe00c3e190d", size = 17299, upload-time = "2025-10-17T19:38:51.874Z" }, +] + +[[package]] +name = "mkdocstrings" +version = "1.0.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "jinja2" }, + { name = "markdown" }, + { name = "markupsafe" }, + { name = "mkdocs" }, + { name = "mkdocs-autorefs" }, + { name = "pymdown-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/e5/13/10bbf9d56565fd91b91e6f5a8cd9b9d8a2b101c4e8ad6eeafa35a706301d/mkdocstrings-1.0.0.tar.gz", hash = "sha256:351a006dbb27aefce241ade110d3cd040c1145b7a3eb5fd5ac23f03ed67f401a", size = 101086, upload-time = "2025-11-27T15:39:40.534Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ec/fc/80aa31b79133634721cf7855d37b76ea49773599214896f2ff10be03de2a/mkdocstrings-1.0.0-py3-none-any.whl", hash = "sha256:4c50eb960bff6e05dfc631f6bc00dfabffbcb29c5ff25f676d64daae05ed82fa", size = 35135, upload-time = "2025-11-27T15:39:39.301Z" }, +] + +[[package]] +name = "mkdocstrings-python" +version = "2.0.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "griffe" }, + { name = "mkdocs-autorefs" }, + { name = "mkdocstrings" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/24/75/d30af27a2906f00eb90143470272376d728521997800f5dce5b340ba35bc/mkdocstrings_python-2.0.1.tar.gz", hash = "sha256:843a562221e6a471fefdd4b45cc6c22d2607ccbad632879234fa9692e9cf7732", size = 199345, upload-time = "2025-12-03T14:26:11.755Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/81/06/c5f8deba7d2cbdfa7967a716ae801aa9ca5f734b8f54fd473ef77a088dbe/mkdocstrings_python-2.0.1-py3-none-any.whl", hash = "sha256:66ecff45c5f8b71bf174e11d49afc845c2dfc7fc0ab17a86b6b337e0f24d8d90", size = 105055, upload-time = "2025-12-03T14:26:10.184Z" }, +] + +[[package]] +name = "more-itertools" +version = "10.8.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/ea/5d/38b681d3fce7a266dd9ab73c66959406d565b3e85f21d5e66e1181d93721/more_itertools-10.8.0.tar.gz", hash = "sha256:f638ddf8a1a0d134181275fb5d58b086ead7c6a72429ad725c67503f13ba30bd", size = 137431, upload-time = "2025-09-02T15:23:11.018Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/a4/8e/469e5a4a2f5855992e425f3cb33804cc07bf18d48f2db061aec61ce50270/more_itertools-10.8.0-py3-none-any.whl", hash = "sha256:52d4362373dcf7c52546bc4af9a86ee7c4579df9a8dc268be0a2f949d376cc9b", size = 69667, upload-time = "2025-09-02T15:23:09.635Z" }, +] + +[[package]] +name = "multidict" +version = "6.7.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/80/1e/5492c365f222f907de1039b91f922b93fa4f764c713ee858d235495d8f50/multidict-6.7.0.tar.gz", hash = "sha256:c6e99d9a65ca282e578dfea819cfa9c0a62b2499d8677392e09feaf305e9e6f5", size = 101834, upload-time = "2025-10-06T14:52:30.657Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/c2/9e/9f61ac18d9c8b475889f32ccfa91c9f59363480613fc807b6e3023d6f60b/multidict-6.7.0-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:8a3862568a36d26e650a19bb5cbbba14b71789032aebc0423f8cc5f150730184", size = 76877, upload-time = "2025-10-06T14:49:20.884Z" }, + { url = "https://files.pythonhosted.org/packages/38/6f/614f09a04e6184f8824268fce4bc925e9849edfa654ddd59f0b64508c595/multidict-6.7.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:960c60b5849b9b4f9dcc9bea6e3626143c252c74113df2c1540aebce70209b45", size = 45467, upload-time = "2025-10-06T14:49:22.054Z" }, + { url = "https://files.pythonhosted.org/packages/b3/93/c4f67a436dd026f2e780c433277fff72be79152894d9fc36f44569cab1a6/multidict-6.7.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:2049be98fb57a31b4ccf870bf377af2504d4ae35646a19037ec271e4c07998aa", size = 43834, upload-time = "2025-10-06T14:49:23.566Z" }, + { url = "https://files.pythonhosted.org/packages/7f/f5/013798161ca665e4a422afbc5e2d9e4070142a9ff8905e482139cd09e4d0/multidict-6.7.0-cp312-cp312-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:0934f3843a1860dd465d38895c17fce1f1cb37295149ab05cd1b9a03afacb2a7", size = 250545, upload-time = "2025-10-06T14:49:24.882Z" }, + { url = "https://files.pythonhosted.org/packages/71/2f/91dbac13e0ba94669ea5119ba267c9a832f0cb65419aca75549fcf09a3dc/multidict-6.7.0-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:b3e34f3a1b8131ba06f1a73adab24f30934d148afcd5f5de9a73565a4404384e", size = 258305, upload-time = "2025-10-06T14:49:26.778Z" }, + { url = "https://files.pythonhosted.org/packages/ef/b0/754038b26f6e04488b48ac621f779c341338d78503fb45403755af2df477/multidict-6.7.0-cp312-cp312-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:efbb54e98446892590dc2458c19c10344ee9a883a79b5cec4bc34d6656e8d546", size = 242363, upload-time = "2025-10-06T14:49:28.562Z" }, + { url = "https://files.pythonhosted.org/packages/87/15/9da40b9336a7c9fa606c4cf2ed80a649dffeb42b905d4f63a1d7eb17d746/multidict-6.7.0-cp312-cp312-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:a35c5fc61d4f51eb045061e7967cfe3123d622cd500e8868e7c0c592a09fedc4", size = 268375, upload-time = "2025-10-06T14:49:29.96Z" }, + { url = "https://files.pythonhosted.org/packages/82/72/c53fcade0cc94dfaad583105fd92b3a783af2091eddcb41a6d5a52474000/multidict-6.7.0-cp312-cp312-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:29fe6740ebccba4175af1b9b87bf553e9c15cd5868ee967e010efcf94e4fd0f1", size = 269346, upload-time = "2025-10-06T14:49:31.404Z" }, + { url = "https://files.pythonhosted.org/packages/0d/e2/9baffdae21a76f77ef8447f1a05a96ec4bc0a24dae08767abc0a2fe680b8/multidict-6.7.0-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:123e2a72e20537add2f33a79e605f6191fba2afda4cbb876e35c1a7074298a7d", size = 256107, upload-time = "2025-10-06T14:49:32.974Z" }, + { url = "https://files.pythonhosted.org/packages/3c/06/3f06f611087dc60d65ef775f1fb5aca7c6d61c6db4990e7cda0cef9b1651/multidict-6.7.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:b284e319754366c1aee2267a2036248b24eeb17ecd5dc16022095e747f2f4304", size = 253592, upload-time = "2025-10-06T14:49:34.52Z" }, + { url = "https://files.pythonhosted.org/packages/20/24/54e804ec7945b6023b340c412ce9c3f81e91b3bf5fa5ce65558740141bee/multidict-6.7.0-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:803d685de7be4303b5a657b76e2f6d1240e7e0a8aa2968ad5811fa2285553a12", size = 251024, upload-time = "2025-10-06T14:49:35.956Z" }, + { url = "https://files.pythonhosted.org/packages/14/48/011cba467ea0b17ceb938315d219391d3e421dfd35928e5dbdc3f4ae76ef/multidict-6.7.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:c04a328260dfd5db8c39538f999f02779012268f54614902d0afc775d44e0a62", size = 251484, upload-time = "2025-10-06T14:49:37.631Z" }, + { url = "https://files.pythonhosted.org/packages/0d/2f/919258b43bb35b99fa127435cfb2d91798eb3a943396631ef43e3720dcf4/multidict-6.7.0-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:8a19cdb57cd3df4cd865849d93ee14920fb97224300c88501f16ecfa2604b4e0", size = 263579, upload-time = "2025-10-06T14:49:39.502Z" }, + { url = "https://files.pythonhosted.org/packages/31/22/a0e884d86b5242b5a74cf08e876bdf299e413016b66e55511f7a804a366e/multidict-6.7.0-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:9b2fd74c52accced7e75de26023b7dccee62511a600e62311b918ec5c168fc2a", size = 259654, upload-time = "2025-10-06T14:49:41.32Z" }, + { url = "https://files.pythonhosted.org/packages/b2/e5/17e10e1b5c5f5a40f2fcbb45953c9b215f8a4098003915e46a93f5fcaa8f/multidict-6.7.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:3e8bfdd0e487acf992407a140d2589fe598238eaeffa3da8448d63a63cd363f8", size = 251511, upload-time = "2025-10-06T14:49:46.021Z" }, + { url = "https://files.pythonhosted.org/packages/e3/9a/201bb1e17e7af53139597069c375e7b0dcbd47594604f65c2d5359508566/multidict-6.7.0-cp312-cp312-win32.whl", hash = "sha256:dd32a49400a2c3d52088e120ee00c1e3576cbff7e10b98467962c74fdb762ed4", size = 41895, upload-time = "2025-10-06T14:49:48.718Z" }, + { url = "https://files.pythonhosted.org/packages/46/e2/348cd32faad84eaf1d20cce80e2bb0ef8d312c55bca1f7fa9865e7770aaf/multidict-6.7.0-cp312-cp312-win_amd64.whl", hash = "sha256:92abb658ef2d7ef22ac9f8bb88e8b6c3e571671534e029359b6d9e845923eb1b", size = 46073, upload-time = "2025-10-06T14:49:50.28Z" }, + { url = "https://files.pythonhosted.org/packages/25/ec/aad2613c1910dce907480e0c3aa306905830f25df2e54ccc9dea450cb5aa/multidict-6.7.0-cp312-cp312-win_arm64.whl", hash = "sha256:490dab541a6a642ce1a9d61a4781656b346a55c13038f0b1244653828e3a83ec", size = 43226, upload-time = "2025-10-06T14:49:52.304Z" }, + { url = "https://files.pythonhosted.org/packages/d2/86/33272a544eeb36d66e4d9a920602d1a2f57d4ebea4ef3cdfe5a912574c95/multidict-6.7.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:bee7c0588aa0076ce77c0ea5d19a68d76ad81fcd9fe8501003b9a24f9d4000f6", size = 76135, upload-time = "2025-10-06T14:49:54.26Z" }, + { url = "https://files.pythonhosted.org/packages/91/1c/eb97db117a1ebe46d457a3d235a7b9d2e6dcab174f42d1b67663dd9e5371/multidict-6.7.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:7ef6b61cad77091056ce0e7ce69814ef72afacb150b7ac6a3e9470def2198159", size = 45117, upload-time = "2025-10-06T14:49:55.82Z" }, + { url = "https://files.pythonhosted.org/packages/f1/d8/6c3442322e41fb1dd4de8bd67bfd11cd72352ac131f6368315617de752f1/multidict-6.7.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:9c0359b1ec12b1d6849c59f9d319610b7f20ef990a6d454ab151aa0e3b9f78ca", size = 43472, upload-time = "2025-10-06T14:49:57.048Z" }, + { url = "https://files.pythonhosted.org/packages/75/3f/e2639e80325af0b6c6febdf8e57cc07043ff15f57fa1ef808f4ccb5ac4cd/multidict-6.7.0-cp313-cp313-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:cd240939f71c64bd658f186330603aac1a9a81bf6273f523fca63673cb7378a8", size = 249342, upload-time = "2025-10-06T14:49:58.368Z" }, + { url = "https://files.pythonhosted.org/packages/5d/cc/84e0585f805cbeaa9cbdaa95f9a3d6aed745b9d25700623ac89a6ecff400/multidict-6.7.0-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:a60a4d75718a5efa473ebd5ab685786ba0c67b8381f781d1be14da49f1a2dc60", size = 257082, upload-time = "2025-10-06T14:49:59.89Z" }, + { url = "https://files.pythonhosted.org/packages/b0/9c/ac851c107c92289acbbf5cfb485694084690c1b17e555f44952c26ddc5bd/multidict-6.7.0-cp313-cp313-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:53a42d364f323275126aff81fb67c5ca1b7a04fda0546245730a55c8c5f24bc4", size = 240704, upload-time = "2025-10-06T14:50:01.485Z" }, + { url = "https://files.pythonhosted.org/packages/50/cc/5f93e99427248c09da95b62d64b25748a5f5c98c7c2ab09825a1d6af0e15/multidict-6.7.0-cp313-cp313-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:3b29b980d0ddbecb736735ee5bef69bb2ddca56eff603c86f3f29a1128299b4f", size = 266355, upload-time = "2025-10-06T14:50:02.955Z" }, + { url = "https://files.pythonhosted.org/packages/ec/0c/2ec1d883ceb79c6f7f6d7ad90c919c898f5d1c6ea96d322751420211e072/multidict-6.7.0-cp313-cp313-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:f8a93b1c0ed2d04b97a5e9336fd2d33371b9a6e29ab7dd6503d63407c20ffbaf", size = 267259, upload-time = "2025-10-06T14:50:04.446Z" }, + { url = "https://files.pythonhosted.org/packages/c6/2d/f0b184fa88d6630aa267680bdb8623fb69cb0d024b8c6f0d23f9a0f406d3/multidict-6.7.0-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:9ff96e8815eecacc6645da76c413eb3b3d34cfca256c70b16b286a687d013c32", size = 254903, upload-time = "2025-10-06T14:50:05.98Z" }, + { url = "https://files.pythonhosted.org/packages/06/c9/11ea263ad0df7dfabcad404feb3c0dd40b131bc7f232d5537f2fb1356951/multidict-6.7.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:7516c579652f6a6be0e266aec0acd0db80829ca305c3d771ed898538804c2036", size = 252365, upload-time = "2025-10-06T14:50:07.511Z" }, + { url = "https://files.pythonhosted.org/packages/41/88/d714b86ee2c17d6e09850c70c9d310abac3d808ab49dfa16b43aba9d53fd/multidict-6.7.0-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:040f393368e63fb0f3330e70c26bfd336656bed925e5cbe17c9da839a6ab13ec", size = 250062, upload-time = "2025-10-06T14:50:09.074Z" }, + { url = "https://files.pythonhosted.org/packages/15/fe/ad407bb9e818c2b31383f6131ca19ea7e35ce93cf1310fce69f12e89de75/multidict-6.7.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:b3bc26a951007b1057a1c543af845f1c7e3e71cc240ed1ace7bf4484aa99196e", size = 249683, upload-time = "2025-10-06T14:50:10.714Z" }, + { url = "https://files.pythonhosted.org/packages/8c/a4/a89abdb0229e533fb925e7c6e5c40201c2873efebc9abaf14046a4536ee6/multidict-6.7.0-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:7b022717c748dd1992a83e219587aabe45980d88969f01b316e78683e6285f64", size = 261254, upload-time = "2025-10-06T14:50:12.28Z" }, + { url = "https://files.pythonhosted.org/packages/8d/aa/0e2b27bd88b40a4fb8dc53dd74eecac70edaa4c1dd0707eb2164da3675b3/multidict-6.7.0-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:9600082733859f00d79dee64effc7aef1beb26adb297416a4ad2116fd61374bd", size = 257967, upload-time = "2025-10-06T14:50:14.16Z" }, + { url = "https://files.pythonhosted.org/packages/d0/8e/0c67b7120d5d5f6d874ed85a085f9dc770a7f9d8813e80f44a9fec820bb7/multidict-6.7.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:94218fcec4d72bc61df51c198d098ce2b378e0ccbac41ddbed5ef44092913288", size = 250085, upload-time = "2025-10-06T14:50:15.639Z" }, + { url = "https://files.pythonhosted.org/packages/ba/55/b73e1d624ea4b8fd4dd07a3bb70f6e4c7c6c5d9d640a41c6ffe5cdbd2a55/multidict-6.7.0-cp313-cp313-win32.whl", hash = "sha256:a37bd74c3fa9d00be2d7b8eca074dc56bd8077ddd2917a839bd989612671ed17", size = 41713, upload-time = "2025-10-06T14:50:17.066Z" }, + { url = "https://files.pythonhosted.org/packages/32/31/75c59e7d3b4205075b4c183fa4ca398a2daf2303ddf616b04ae6ef55cffe/multidict-6.7.0-cp313-cp313-win_amd64.whl", hash = "sha256:30d193c6cc6d559db42b6bcec8a5d395d34d60c9877a0b71ecd7c204fcf15390", size = 45915, upload-time = "2025-10-06T14:50:18.264Z" }, + { url = "https://files.pythonhosted.org/packages/31/2a/8987831e811f1184c22bc2e45844934385363ee61c0a2dcfa8f71b87e608/multidict-6.7.0-cp313-cp313-win_arm64.whl", hash = "sha256:ea3334cabe4d41b7ccd01e4d349828678794edbc2d3ae97fc162a3312095092e", size = 43077, upload-time = "2025-10-06T14:50:19.853Z" }, + { url = "https://files.pythonhosted.org/packages/e8/68/7b3a5170a382a340147337b300b9eb25a9ddb573bcdfff19c0fa3f31ffba/multidict-6.7.0-cp313-cp313t-macosx_10_13_universal2.whl", hash = "sha256:ad9ce259f50abd98a1ca0aa6e490b58c316a0fce0617f609723e40804add2c00", size = 83114, upload-time = "2025-10-06T14:50:21.223Z" }, + { url = "https://files.pythonhosted.org/packages/55/5c/3fa2d07c84df4e302060f555bbf539310980362236ad49f50eeb0a1c1eb9/multidict-6.7.0-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:07f5594ac6d084cbb5de2df218d78baf55ef150b91f0ff8a21cc7a2e3a5a58eb", size = 48442, upload-time = "2025-10-06T14:50:22.871Z" }, + { url = "https://files.pythonhosted.org/packages/fc/56/67212d33239797f9bd91962bb899d72bb0f4c35a8652dcdb8ed049bef878/multidict-6.7.0-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:0591b48acf279821a579282444814a2d8d0af624ae0bc600aa4d1b920b6e924b", size = 46885, upload-time = "2025-10-06T14:50:24.258Z" }, + { url = "https://files.pythonhosted.org/packages/46/d1/908f896224290350721597a61a69cd19b89ad8ee0ae1f38b3f5cd12ea2ac/multidict-6.7.0-cp313-cp313t-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:749a72584761531d2b9467cfbdfd29487ee21124c304c4b6cb760d8777b27f9c", size = 242588, upload-time = "2025-10-06T14:50:25.716Z" }, + { url = "https://files.pythonhosted.org/packages/ab/67/8604288bbd68680eee0ab568fdcb56171d8b23a01bcd5cb0c8fedf6e5d99/multidict-6.7.0-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:6b4c3d199f953acd5b446bf7c0de1fe25d94e09e79086f8dc2f48a11a129cdf1", size = 249966, upload-time = "2025-10-06T14:50:28.192Z" }, + { url = "https://files.pythonhosted.org/packages/20/33/9228d76339f1ba51e3efef7da3ebd91964d3006217aae13211653193c3ff/multidict-6.7.0-cp313-cp313t-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:9fb0211dfc3b51efea2f349ec92c114d7754dd62c01f81c3e32b765b70c45c9b", size = 228618, upload-time = "2025-10-06T14:50:29.82Z" }, + { url = "https://files.pythonhosted.org/packages/f8/2d/25d9b566d10cab1c42b3b9e5b11ef79c9111eaf4463b8c257a3bd89e0ead/multidict-6.7.0-cp313-cp313t-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:a027ec240fe73a8d6281872690b988eed307cd7d91b23998ff35ff577ca688b5", size = 257539, upload-time = "2025-10-06T14:50:31.731Z" }, + { url = "https://files.pythonhosted.org/packages/b6/b1/8d1a965e6637fc33de3c0d8f414485c2b7e4af00f42cab3d84e7b955c222/multidict-6.7.0-cp313-cp313t-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:d1d964afecdf3a8288789df2f5751dc0a8261138c3768d9af117ed384e538fad", size = 256345, upload-time = "2025-10-06T14:50:33.26Z" }, + { url = "https://files.pythonhosted.org/packages/ba/0c/06b5a8adbdeedada6f4fb8d8f193d44a347223b11939b42953eeb6530b6b/multidict-6.7.0-cp313-cp313t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:caf53b15b1b7df9fbd0709aa01409000a2b4dd03a5f6f5cc548183c7c8f8b63c", size = 247934, upload-time = "2025-10-06T14:50:34.808Z" }, + { url = "https://files.pythonhosted.org/packages/8f/31/b2491b5fe167ca044c6eb4b8f2c9f3b8a00b24c432c365358eadac5d7625/multidict-6.7.0-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:654030da3197d927f05a536a66186070e98765aa5142794c9904555d3a9d8fb5", size = 245243, upload-time = "2025-10-06T14:50:36.436Z" }, + { url = "https://files.pythonhosted.org/packages/61/1a/982913957cb90406c8c94f53001abd9eafc271cb3e70ff6371590bec478e/multidict-6.7.0-cp313-cp313t-musllinux_1_2_armv7l.whl", hash = "sha256:2090d3718829d1e484706a2f525e50c892237b2bf9b17a79b059cb98cddc2f10", size = 235878, upload-time = "2025-10-06T14:50:37.953Z" }, + { url = "https://files.pythonhosted.org/packages/be/c0/21435d804c1a1cf7a2608593f4d19bca5bcbd7a81a70b253fdd1c12af9c0/multidict-6.7.0-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:2d2cfeec3f6f45651b3d408c4acec0ebf3daa9bc8a112a084206f5db5d05b754", size = 243452, upload-time = "2025-10-06T14:50:39.574Z" }, + { url = "https://files.pythonhosted.org/packages/54/0a/4349d540d4a883863191be6eb9a928846d4ec0ea007d3dcd36323bb058ac/multidict-6.7.0-cp313-cp313t-musllinux_1_2_ppc64le.whl", hash = "sha256:4ef089f985b8c194d341eb2c24ae6e7408c9a0e2e5658699c92f497437d88c3c", size = 252312, upload-time = "2025-10-06T14:50:41.612Z" }, + { url = "https://files.pythonhosted.org/packages/26/64/d5416038dbda1488daf16b676e4dbfd9674dde10a0cc8f4fc2b502d8125d/multidict-6.7.0-cp313-cp313t-musllinux_1_2_s390x.whl", hash = "sha256:e93a0617cd16998784bf4414c7e40f17a35d2350e5c6f0bd900d3a8e02bd3762", size = 246935, upload-time = "2025-10-06T14:50:43.972Z" }, + { url = "https://files.pythonhosted.org/packages/9f/8c/8290c50d14e49f35e0bd4abc25e1bc7711149ca9588ab7d04f886cdf03d9/multidict-6.7.0-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:f0feece2ef8ebc42ed9e2e8c78fc4aa3cf455733b507c09ef7406364c94376c6", size = 243385, upload-time = "2025-10-06T14:50:45.648Z" }, + { url = "https://files.pythonhosted.org/packages/ef/a0/f83ae75e42d694b3fbad3e047670e511c138be747bc713cf1b10d5096416/multidict-6.7.0-cp313-cp313t-win32.whl", hash = "sha256:19a1d55338ec1be74ef62440ca9e04a2f001a04d0cc49a4983dc320ff0f3212d", size = 47777, upload-time = "2025-10-06T14:50:47.154Z" }, + { url = "https://files.pythonhosted.org/packages/dc/80/9b174a92814a3830b7357307a792300f42c9e94664b01dee8e457551fa66/multidict-6.7.0-cp313-cp313t-win_amd64.whl", hash = "sha256:3da4fb467498df97e986af166b12d01f05d2e04f978a9c1c680ea1988e0bc4b6", size = 53104, upload-time = "2025-10-06T14:50:48.851Z" }, + { url = "https://files.pythonhosted.org/packages/cc/28/04baeaf0428d95bb7a7bea0e691ba2f31394338ba424fb0679a9ed0f4c09/multidict-6.7.0-cp313-cp313t-win_arm64.whl", hash = "sha256:b4121773c49a0776461f4a904cdf6264c88e42218aaa8407e803ca8025872792", size = 45503, upload-time = "2025-10-06T14:50:50.16Z" }, + { url = "https://files.pythonhosted.org/packages/e2/b1/3da6934455dd4b261d4c72f897e3a5728eba81db59959f3a639245891baa/multidict-6.7.0-cp314-cp314-macosx_10_13_universal2.whl", hash = "sha256:3bab1e4aff7adaa34410f93b1f8e57c4b36b9af0426a76003f441ee1d3c7e842", size = 75128, upload-time = "2025-10-06T14:50:51.92Z" }, + { url = "https://files.pythonhosted.org/packages/14/2c/f069cab5b51d175a1a2cb4ccdf7a2c2dabd58aa5bd933fa036a8d15e2404/multidict-6.7.0-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:b8512bac933afc3e45fb2b18da8e59b78d4f408399a960339598374d4ae3b56b", size = 44410, upload-time = "2025-10-06T14:50:53.275Z" }, + { url = "https://files.pythonhosted.org/packages/42/e2/64bb41266427af6642b6b128e8774ed84c11b80a90702c13ac0a86bb10cc/multidict-6.7.0-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:79dcf9e477bc65414ebfea98ffd013cb39552b5ecd62908752e0e413d6d06e38", size = 43205, upload-time = "2025-10-06T14:50:54.911Z" }, + { url = "https://files.pythonhosted.org/packages/02/68/6b086fef8a3f1a8541b9236c594f0c9245617c29841f2e0395d979485cde/multidict-6.7.0-cp314-cp314-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:31bae522710064b5cbeddaf2e9f32b1abab70ac6ac91d42572502299e9953128", size = 245084, upload-time = "2025-10-06T14:50:56.369Z" }, + { url = "https://files.pythonhosted.org/packages/15/ee/f524093232007cd7a75c1d132df70f235cfd590a7c9eaccd7ff422ef4ae8/multidict-6.7.0-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:4a0df7ff02397bb63e2fd22af2c87dfa39e8c7f12947bc524dbdc528282c7e34", size = 252667, upload-time = "2025-10-06T14:50:57.991Z" }, + { url = "https://files.pythonhosted.org/packages/02/a5/eeb3f43ab45878f1895118c3ef157a480db58ede3f248e29b5354139c2c9/multidict-6.7.0-cp314-cp314-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:7a0222514e8e4c514660e182d5156a415c13ef0aabbd71682fc714e327b95e99", size = 233590, upload-time = "2025-10-06T14:50:59.589Z" }, + { url = "https://files.pythonhosted.org/packages/6a/1e/76d02f8270b97269d7e3dbd45644b1785bda457b474315f8cf999525a193/multidict-6.7.0-cp314-cp314-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:2397ab4daaf2698eb51a76721e98db21ce4f52339e535725de03ea962b5a3202", size = 264112, upload-time = "2025-10-06T14:51:01.183Z" }, + { url = "https://files.pythonhosted.org/packages/76/0b/c28a70ecb58963847c2a8efe334904cd254812b10e535aefb3bcce513918/multidict-6.7.0-cp314-cp314-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:8891681594162635948a636c9fe0ff21746aeb3dd5463f6e25d9bea3a8a39ca1", size = 261194, upload-time = "2025-10-06T14:51:02.794Z" }, + { url = "https://files.pythonhosted.org/packages/b4/63/2ab26e4209773223159b83aa32721b4021ffb08102f8ac7d689c943fded1/multidict-6.7.0-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:18706cc31dbf402a7945916dd5cddf160251b6dab8a2c5f3d6d5a55949f676b3", size = 248510, upload-time = "2025-10-06T14:51:04.724Z" }, + { url = "https://files.pythonhosted.org/packages/93/cd/06c1fa8282af1d1c46fd55c10a7930af652afdce43999501d4d68664170c/multidict-6.7.0-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:f844a1bbf1d207dd311a56f383f7eda2d0e134921d45751842d8235e7778965d", size = 248395, upload-time = "2025-10-06T14:51:06.306Z" }, + { url = "https://files.pythonhosted.org/packages/99/ac/82cb419dd6b04ccf9e7e61befc00c77614fc8134362488b553402ecd55ce/multidict-6.7.0-cp314-cp314-musllinux_1_2_armv7l.whl", hash = "sha256:d4393e3581e84e5645506923816b9cc81f5609a778c7e7534054091acc64d1c6", size = 239520, upload-time = "2025-10-06T14:51:08.091Z" }, + { url = "https://files.pythonhosted.org/packages/fa/f3/a0f9bf09493421bd8716a362e0cd1d244f5a6550f5beffdd6b47e885b331/multidict-6.7.0-cp314-cp314-musllinux_1_2_i686.whl", hash = "sha256:fbd18dc82d7bf274b37aa48d664534330af744e03bccf696d6f4c6042e7d19e7", size = 245479, upload-time = "2025-10-06T14:51:10.365Z" }, + { url = "https://files.pythonhosted.org/packages/8d/01/476d38fc73a212843f43c852b0eee266b6971f0e28329c2184a8df90c376/multidict-6.7.0-cp314-cp314-musllinux_1_2_ppc64le.whl", hash = "sha256:b6234e14f9314731ec45c42fc4554b88133ad53a09092cc48a88e771c125dadb", size = 258903, upload-time = "2025-10-06T14:51:12.466Z" }, + { url = "https://files.pythonhosted.org/packages/49/6d/23faeb0868adba613b817d0e69c5f15531b24d462af8012c4f6de4fa8dc3/multidict-6.7.0-cp314-cp314-musllinux_1_2_s390x.whl", hash = "sha256:08d4379f9744d8f78d98c8673c06e202ffa88296f009c71bbafe8a6bf847d01f", size = 252333, upload-time = "2025-10-06T14:51:14.48Z" }, + { url = "https://files.pythonhosted.org/packages/1e/cc/48d02ac22b30fa247f7dad82866e4b1015431092f4ba6ebc7e77596e0b18/multidict-6.7.0-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:9fe04da3f79387f450fd0061d4dd2e45a72749d31bf634aecc9e27f24fdc4b3f", size = 243411, upload-time = "2025-10-06T14:51:16.072Z" }, + { url = "https://files.pythonhosted.org/packages/4a/03/29a8bf5a18abf1fe34535c88adbdfa88c9fb869b5a3b120692c64abe8284/multidict-6.7.0-cp314-cp314-win32.whl", hash = "sha256:fbafe31d191dfa7c4c51f7a6149c9fb7e914dcf9ffead27dcfd9f1ae382b3885", size = 40940, upload-time = "2025-10-06T14:51:17.544Z" }, + { url = "https://files.pythonhosted.org/packages/82/16/7ed27b680791b939de138f906d5cf2b4657b0d45ca6f5dd6236fdddafb1a/multidict-6.7.0-cp314-cp314-win_amd64.whl", hash = "sha256:2f67396ec0310764b9222a1728ced1ab638f61aadc6226f17a71dd9324f9a99c", size = 45087, upload-time = "2025-10-06T14:51:18.875Z" }, + { url = "https://files.pythonhosted.org/packages/cd/3c/e3e62eb35a1950292fe39315d3c89941e30a9d07d5d2df42965ab041da43/multidict-6.7.0-cp314-cp314-win_arm64.whl", hash = "sha256:ba672b26069957ee369cfa7fc180dde1fc6f176eaf1e6beaf61fbebbd3d9c000", size = 42368, upload-time = "2025-10-06T14:51:20.225Z" }, + { url = "https://files.pythonhosted.org/packages/8b/40/cd499bd0dbc5f1136726db3153042a735fffd0d77268e2ee20d5f33c010f/multidict-6.7.0-cp314-cp314t-macosx_10_13_universal2.whl", hash = "sha256:c1dcc7524066fa918c6a27d61444d4ee7900ec635779058571f70d042d86ed63", size = 82326, upload-time = "2025-10-06T14:51:21.588Z" }, + { url = "https://files.pythonhosted.org/packages/13/8a/18e031eca251c8df76daf0288e6790561806e439f5ce99a170b4af30676b/multidict-6.7.0-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:27e0b36c2d388dc7b6ced3406671b401e84ad7eb0656b8f3a2f46ed0ce483718", size = 48065, upload-time = "2025-10-06T14:51:22.93Z" }, + { url = "https://files.pythonhosted.org/packages/40/71/5e6701277470a87d234e433fb0a3a7deaf3bcd92566e421e7ae9776319de/multidict-6.7.0-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:2a7baa46a22e77f0988e3b23d4ede5513ebec1929e34ee9495be535662c0dfe2", size = 46475, upload-time = "2025-10-06T14:51:24.352Z" }, + { url = "https://files.pythonhosted.org/packages/fe/6a/bab00cbab6d9cfb57afe1663318f72ec28289ea03fd4e8236bb78429893a/multidict-6.7.0-cp314-cp314t-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:7bf77f54997a9166a2f5675d1201520586439424c2511723a7312bdb4bcc034e", size = 239324, upload-time = "2025-10-06T14:51:25.822Z" }, + { url = "https://files.pythonhosted.org/packages/2a/5f/8de95f629fc22a7769ade8b41028e3e5a822c1f8904f618d175945a81ad3/multidict-6.7.0-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:e011555abada53f1578d63389610ac8a5400fc70ce71156b0aa30d326f1a5064", size = 246877, upload-time = "2025-10-06T14:51:27.604Z" }, + { url = "https://files.pythonhosted.org/packages/23/b4/38881a960458f25b89e9f4a4fdcb02ac101cfa710190db6e5528841e67de/multidict-6.7.0-cp314-cp314t-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:28b37063541b897fd6a318007373930a75ca6d6ac7c940dbe14731ffdd8d498e", size = 225824, upload-time = "2025-10-06T14:51:29.664Z" }, + { url = "https://files.pythonhosted.org/packages/1e/39/6566210c83f8a261575f18e7144736059f0c460b362e96e9cf797a24b8e7/multidict-6.7.0-cp314-cp314t-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:05047ada7a2fde2631a0ed706f1fd68b169a681dfe5e4cf0f8e4cb6618bbc2cd", size = 253558, upload-time = "2025-10-06T14:51:31.684Z" }, + { url = "https://files.pythonhosted.org/packages/00/a3/67f18315100f64c269f46e6c0319fa87ba68f0f64f2b8e7fd7c72b913a0b/multidict-6.7.0-cp314-cp314t-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:716133f7d1d946a4e1b91b1756b23c088881e70ff180c24e864c26192ad7534a", size = 252339, upload-time = "2025-10-06T14:51:33.699Z" }, + { url = "https://files.pythonhosted.org/packages/c8/2a/1cb77266afee2458d82f50da41beba02159b1d6b1f7973afc9a1cad1499b/multidict-6.7.0-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:d1bed1b467ef657f2a0ae62844a607909ef1c6889562de5e1d505f74457d0b96", size = 244895, upload-time = "2025-10-06T14:51:36.189Z" }, + { url = "https://files.pythonhosted.org/packages/dd/72/09fa7dd487f119b2eb9524946ddd36e2067c08510576d43ff68469563b3b/multidict-6.7.0-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:ca43bdfa5d37bd6aee89d85e1d0831fb86e25541be7e9d376ead1b28974f8e5e", size = 241862, upload-time = "2025-10-06T14:51:41.291Z" }, + { url = "https://files.pythonhosted.org/packages/65/92/bc1f8bd0853d8669300f732c801974dfc3702c3eeadae2f60cef54dc69d7/multidict-6.7.0-cp314-cp314t-musllinux_1_2_armv7l.whl", hash = "sha256:44b546bd3eb645fd26fb949e43c02a25a2e632e2ca21a35e2e132c8105dc8599", size = 232376, upload-time = "2025-10-06T14:51:43.55Z" }, + { url = "https://files.pythonhosted.org/packages/09/86/ac39399e5cb9d0c2ac8ef6e10a768e4d3bc933ac808d49c41f9dc23337eb/multidict-6.7.0-cp314-cp314t-musllinux_1_2_i686.whl", hash = "sha256:a6ef16328011d3f468e7ebc326f24c1445f001ca1dec335b2f8e66bed3006394", size = 240272, upload-time = "2025-10-06T14:51:45.265Z" }, + { url = "https://files.pythonhosted.org/packages/3d/b6/fed5ac6b8563ec72df6cb1ea8dac6d17f0a4a1f65045f66b6d3bf1497c02/multidict-6.7.0-cp314-cp314t-musllinux_1_2_ppc64le.whl", hash = "sha256:5aa873cbc8e593d361ae65c68f85faadd755c3295ea2c12040ee146802f23b38", size = 248774, upload-time = "2025-10-06T14:51:46.836Z" }, + { url = "https://files.pythonhosted.org/packages/6b/8d/b954d8c0dc132b68f760aefd45870978deec6818897389dace00fcde32ff/multidict-6.7.0-cp314-cp314t-musllinux_1_2_s390x.whl", hash = "sha256:3d7b6ccce016e29df4b7ca819659f516f0bc7a4b3efa3bb2012ba06431b044f9", size = 242731, upload-time = "2025-10-06T14:51:48.541Z" }, + { url = "https://files.pythonhosted.org/packages/16/9d/a2dac7009125d3540c2f54e194829ea18ac53716c61b655d8ed300120b0f/multidict-6.7.0-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:171b73bd4ee683d307599b66793ac80981b06f069b62eea1c9e29c9241aa66b0", size = 240193, upload-time = "2025-10-06T14:51:50.355Z" }, + { url = "https://files.pythonhosted.org/packages/39/ca/c05f144128ea232ae2178b008d5011d4e2cea86e4ee8c85c2631b1b94802/multidict-6.7.0-cp314-cp314t-win32.whl", hash = "sha256:b2d7f80c4e1fd010b07cb26820aae86b7e73b681ee4889684fb8d2d4537aab13", size = 48023, upload-time = "2025-10-06T14:51:51.883Z" }, + { url = "https://files.pythonhosted.org/packages/ba/8f/0a60e501584145588be1af5cc829265701ba3c35a64aec8e07cbb71d39bb/multidict-6.7.0-cp314-cp314t-win_amd64.whl", hash = "sha256:09929cab6fcb68122776d575e03c6cc64ee0b8fca48d17e135474b042ce515cd", size = 53507, upload-time = "2025-10-06T14:51:53.672Z" }, + { url = "https://files.pythonhosted.org/packages/7f/ae/3148b988a9c6239903e786eac19c889fab607c31d6efa7fb2147e5680f23/multidict-6.7.0-cp314-cp314t-win_arm64.whl", hash = "sha256:cc41db090ed742f32bd2d2c721861725e6109681eddf835d0a82bd3a5c382827", size = 44804, upload-time = "2025-10-06T14:51:55.415Z" }, + { url = "https://files.pythonhosted.org/packages/b7/da/7d22601b625e241d4f23ef1ebff8acfc60da633c9e7e7922e24d10f592b3/multidict-6.7.0-py3-none-any.whl", hash = "sha256:394fc5c42a333c9ffc3e421a4c85e08580d990e08b99f6bf35b4132114c5dcb3", size = 12317, upload-time = "2025-10-06T14:52:29.272Z" }, +] + +[[package]] +name = "mypy" +version = "1.19.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "librt", marker = "platform_python_implementation != 'PyPy'" }, + { name = "mypy-extensions" }, + { name = "pathspec" }, + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/f5/db/4efed9504bc01309ab9c2da7e352cc223569f05478012b5d9ece38fd44d2/mypy-1.19.1.tar.gz", hash = "sha256:19d88bb05303fe63f71dd2c6270daca27cb9401c4ca8255fe50d1d920e0eb9ba", size = 3582404, upload-time = "2025-12-15T05:03:48.42Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/06/8a/19bfae96f6615aa8a0604915512e0289b1fad33d5909bf7244f02935d33a/mypy-1.19.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:a8174a03289288c1f6c46d55cef02379b478bfbc8e358e02047487cad44c6ca1", size = 13206053, upload-time = "2025-12-15T05:03:46.622Z" }, + { url = "https://files.pythonhosted.org/packages/a5/34/3e63879ab041602154ba2a9f99817bb0c85c4df19a23a1443c8986e4d565/mypy-1.19.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:ffcebe56eb09ff0c0885e750036a095e23793ba6c2e894e7e63f6d89ad51f22e", size = 12219134, upload-time = "2025-12-15T05:03:24.367Z" }, + { url = "https://files.pythonhosted.org/packages/89/cc/2db6f0e95366b630364e09845672dbee0cbf0bbe753a204b29a944967cd9/mypy-1.19.1-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:b64d987153888790bcdb03a6473d321820597ab8dd9243b27a92153c4fa50fd2", size = 12731616, upload-time = "2025-12-15T05:02:44.725Z" }, + { url = "https://files.pythonhosted.org/packages/00/be/dd56c1fd4807bc1eba1cf18b2a850d0de7bacb55e158755eb79f77c41f8e/mypy-1.19.1-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:c35d298c2c4bba75feb2195655dfea8124d855dfd7343bf8b8c055421eaf0cf8", size = 13620847, upload-time = "2025-12-15T05:03:39.633Z" }, + { url = "https://files.pythonhosted.org/packages/6d/42/332951aae42b79329f743bf1da088cd75d8d4d9acc18fbcbd84f26c1af4e/mypy-1.19.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:34c81968774648ab5ac09c29a375fdede03ba253f8f8287847bd480782f73a6a", size = 13834976, upload-time = "2025-12-15T05:03:08.786Z" }, + { url = "https://files.pythonhosted.org/packages/6f/63/e7493e5f90e1e085c562bb06e2eb32cae27c5057b9653348d38b47daaecc/mypy-1.19.1-cp312-cp312-win_amd64.whl", hash = "sha256:b10e7c2cd7870ba4ad9b2d8a6102eb5ffc1f16ca35e3de6bfa390c1113029d13", size = 10118104, upload-time = "2025-12-15T05:03:10.834Z" }, + { url = "https://files.pythonhosted.org/packages/de/9f/a6abae693f7a0c697dbb435aac52e958dc8da44e92e08ba88d2e42326176/mypy-1.19.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:e3157c7594ff2ef1634ee058aafc56a82db665c9438fd41b390f3bde1ab12250", size = 13201927, upload-time = "2025-12-15T05:02:29.138Z" }, + { url = "https://files.pythonhosted.org/packages/9a/a4/45c35ccf6e1c65afc23a069f50e2c66f46bd3798cbe0d680c12d12935caa/mypy-1.19.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:bdb12f69bcc02700c2b47e070238f42cb87f18c0bc1fc4cdb4fb2bc5fd7a3b8b", size = 12206730, upload-time = "2025-12-15T05:03:01.325Z" }, + { url = "https://files.pythonhosted.org/packages/05/bb/cdcf89678e26b187650512620eec8368fded4cfd99cfcb431e4cdfd19dec/mypy-1.19.1-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:f859fb09d9583a985be9a493d5cfc5515b56b08f7447759a0c5deaf68d80506e", size = 12724581, upload-time = "2025-12-15T05:03:20.087Z" }, + { url = "https://files.pythonhosted.org/packages/d1/32/dd260d52babf67bad8e6770f8e1102021877ce0edea106e72df5626bb0ec/mypy-1.19.1-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:c9a6538e0415310aad77cb94004ca6482330fece18036b5f360b62c45814c4ef", size = 13616252, upload-time = "2025-12-15T05:02:49.036Z" }, + { url = "https://files.pythonhosted.org/packages/71/d0/5e60a9d2e3bd48432ae2b454b7ef2b62a960ab51292b1eda2a95edd78198/mypy-1.19.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:da4869fc5e7f62a88f3fe0b5c919d1d9f7ea3cef92d3689de2823fd27e40aa75", size = 13840848, upload-time = "2025-12-15T05:02:55.95Z" }, + { url = "https://files.pythonhosted.org/packages/98/76/d32051fa65ecf6cc8c6610956473abdc9b4c43301107476ac03559507843/mypy-1.19.1-cp313-cp313-win_amd64.whl", hash = "sha256:016f2246209095e8eda7538944daa1d60e1e8134d98983b9fc1e92c1fc0cb8dd", size = 10135510, upload-time = "2025-12-15T05:02:58.438Z" }, + { url = "https://files.pythonhosted.org/packages/de/eb/b83e75f4c820c4247a58580ef86fcd35165028f191e7e1ba57128c52782d/mypy-1.19.1-cp314-cp314-macosx_10_15_x86_64.whl", hash = "sha256:06e6170bd5836770e8104c8fdd58e5e725cfeb309f0a6c681a811f557e97eac1", size = 13199744, upload-time = "2025-12-15T05:03:30.823Z" }, + { url = "https://files.pythonhosted.org/packages/94/28/52785ab7bfa165f87fcbb61547a93f98bb20e7f82f90f165a1f69bce7b3d/mypy-1.19.1-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:804bd67b8054a85447c8954215a906d6eff9cabeabe493fb6334b24f4bfff718", size = 12215815, upload-time = "2025-12-15T05:02:42.323Z" }, + { url = "https://files.pythonhosted.org/packages/0a/c6/bdd60774a0dbfb05122e3e925f2e9e846c009e479dcec4821dad881f5b52/mypy-1.19.1-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:21761006a7f497cb0d4de3d8ef4ca70532256688b0523eee02baf9eec895e27b", size = 12740047, upload-time = "2025-12-15T05:03:33.168Z" }, + { url = "https://files.pythonhosted.org/packages/32/2a/66ba933fe6c76bd40d1fe916a83f04fed253152f451a877520b3c4a5e41e/mypy-1.19.1-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:28902ee51f12e0f19e1e16fbe2f8f06b6637f482c459dd393efddd0ec7f82045", size = 13601998, upload-time = "2025-12-15T05:03:13.056Z" }, + { url = "https://files.pythonhosted.org/packages/e3/da/5055c63e377c5c2418760411fd6a63ee2b96cf95397259038756c042574f/mypy-1.19.1-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:481daf36a4c443332e2ae9c137dfee878fcea781a2e3f895d54bd3002a900957", size = 13807476, upload-time = "2025-12-15T05:03:17.977Z" }, + { url = "https://files.pythonhosted.org/packages/cd/09/4ebd873390a063176f06b0dbf1f7783dd87bd120eae7727fa4ae4179b685/mypy-1.19.1-cp314-cp314-win_amd64.whl", hash = "sha256:8bb5c6f6d043655e055be9b542aa5f3bdd30e4f3589163e85f93f3640060509f", size = 10281872, upload-time = "2025-12-15T05:03:05.549Z" }, + { url = "https://files.pythonhosted.org/packages/8d/f4/4ce9a05ce5ded1de3ec1c1d96cf9f9504a04e54ce0ed55cfa38619a32b8d/mypy-1.19.1-py3-none-any.whl", hash = "sha256:f1235f5ea01b7db5468d53ece6aaddf1ad0b88d9e7462b86ef96fe04995d7247", size = 2471239, upload-time = "2025-12-15T05:03:07.248Z" }, +] + +[[package]] +name = "mypy-extensions" +version = "1.1.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/a2/6e/371856a3fb9d31ca8dac321cda606860fa4548858c0cc45d9d1d4ca2628b/mypy_extensions-1.1.0.tar.gz", hash = "sha256:52e68efc3284861e772bbcd66823fde5ae21fd2fdb51c62a211403730b916558", size = 6343, upload-time = "2025-04-22T14:54:24.164Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/79/7b/2c79738432f5c924bef5071f933bcc9efd0473bac3b4aa584a6f7c1c8df8/mypy_extensions-1.1.0-py3-none-any.whl", hash = "sha256:1be4cccdb0f2482337c4743e60421de3a356cd97508abadd57d47403e94f5505", size = 4963, upload-time = "2025-04-22T14:54:22.983Z" }, +] + +[[package]] +name = "natsort" +version = "8.4.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/e2/a9/a0c57aee75f77794adaf35322f8b6404cbd0f89ad45c87197a937764b7d0/natsort-8.4.0.tar.gz", hash = "sha256:45312c4a0e5507593da193dedd04abb1469253b601ecaf63445ad80f0a1ea581", size = 76575, upload-time = "2023-06-20T04:17:19.925Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ef/82/7a9d0550484a62c6da82858ee9419f3dd1ccc9aa1c26a1e43da3ecd20b0d/natsort-8.4.0-py3-none-any.whl", hash = "sha256:4732914fb471f56b5cce04d7bae6f164a592c7712e1c85f9ef585e197299521c", size = 38268, upload-time = "2023-06-20T04:17:17.522Z" }, +] + +[[package]] +name = "networkx" +version = "3.6.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/6a/51/63fe664f3908c97be9d2e4f1158eb633317598cfa6e1fc14af5383f17512/networkx-3.6.1.tar.gz", hash = "sha256:26b7c357accc0c8cde558ad486283728b65b6a95d85ee1cd66bafab4c8168509", size = 2517025, upload-time = "2025-12-08T17:02:39.908Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/9e/c9/b2622292ea83fbb4ec318f5b9ab867d0a28ab43c5717bb85b0a5f6b3b0a4/networkx-3.6.1-py3-none-any.whl", hash = "sha256:d47fbf302e7d9cbbb9e2555a0d267983d2aa476bac30e90dfbe5669bd57f3762", size = 2068504, upload-time = "2025-12-08T17:02:38.159Z" }, +] + +[[package]] +name = "nodeenv" +version = "1.10.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/24/bf/d1bda4f6168e0b2e9e5958945e01910052158313224ada5ce1fb2e1113b8/nodeenv-1.10.0.tar.gz", hash = "sha256:996c191ad80897d076bdfba80a41994c2b47c68e224c542b48feba42ba00f8bb", size = 55611, upload-time = "2025-12-20T14:08:54.006Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/88/b2/d0896bdcdc8d28a7fc5717c305f1a861c26e18c05047949fb371034d98bd/nodeenv-1.10.0-py2.py3-none-any.whl", hash = "sha256:5bb13e3eed2923615535339b3c620e76779af4cb4c6a90deccc9e36b274d3827", size = 23438, upload-time = "2025-12-20T14:08:52.782Z" }, +] + +[[package]] +name = "numpy" +version = "2.4.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/a4/7a/6a3d14e205d292b738db449d0de649b373a59edb0d0b4493821d0a3e8718/numpy-2.4.0.tar.gz", hash = "sha256:6e504f7b16118198f138ef31ba24d985b124c2c469fe8467007cf30fd992f934", size = 20685720, upload-time = "2025-12-20T16:18:19.023Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/8b/ff/f6400ffec95de41c74b8e73df32e3fff1830633193a7b1e409be7fb1bb8c/numpy-2.4.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:2a8b6bb8369abefb8bd1801b054ad50e02b3275c8614dc6e5b0373c305291037", size = 16653117, upload-time = "2025-12-20T16:16:06.709Z" }, + { url = "https://files.pythonhosted.org/packages/fd/28/6c23e97450035072e8d830a3c411bf1abd1f42c611ff9d29e3d8f55c6252/numpy-2.4.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:2e284ca13d5a8367e43734148622caf0b261b275673823593e3e3634a6490f83", size = 12369711, upload-time = "2025-12-20T16:16:08.758Z" }, + { url = "https://files.pythonhosted.org/packages/bc/af/acbef97b630ab1bb45e6a7d01d1452e4251aa88ce680ac36e56c272120ec/numpy-2.4.0-cp312-cp312-macosx_14_0_arm64.whl", hash = "sha256:49ff32b09f5aa0cd30a20c2b39db3e669c845589f2b7fc910365210887e39344", size = 5198355, upload-time = "2025-12-20T16:16:10.902Z" }, + { url = "https://files.pythonhosted.org/packages/c1/c8/4e0d436b66b826f2e53330adaa6311f5cac9871a5b5c31ad773b27f25a74/numpy-2.4.0-cp312-cp312-macosx_14_0_x86_64.whl", hash = "sha256:36cbfb13c152b1c7c184ddac43765db8ad672567e7bafff2cc755a09917ed2e6", size = 6545298, upload-time = "2025-12-20T16:16:12.607Z" }, + { url = "https://files.pythonhosted.org/packages/ef/27/e1f5d144ab54eac34875e79037011d511ac57b21b220063310cb96c80fbc/numpy-2.4.0-cp312-cp312-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:35ddc8f4914466e6fc954c76527aa91aa763682a4f6d73249ef20b418fe6effb", size = 14398387, upload-time = "2025-12-20T16:16:14.257Z" }, + { url = "https://files.pythonhosted.org/packages/67/64/4cb909dd5ab09a9a5d086eff9586e69e827b88a5585517386879474f4cf7/numpy-2.4.0-cp312-cp312-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:dc578891de1db95b2a35001b695451767b580bb45753717498213c5ff3c41d63", size = 16363091, upload-time = "2025-12-20T16:16:17.32Z" }, + { url = "https://files.pythonhosted.org/packages/9d/9c/8efe24577523ec6809261859737cf117b0eb6fdb655abdfdc81b2e468ce4/numpy-2.4.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:98e81648e0b36e325ab67e46b5400a7a6d4a22b8a7c8e8bbfe20e7db7906bf95", size = 16176394, upload-time = "2025-12-20T16:16:19.524Z" }, + { url = "https://files.pythonhosted.org/packages/61/f0/1687441ece7b47a62e45a1f82015352c240765c707928edd8aef875d5951/numpy-2.4.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:d57b5046c120561ba8fa8e4030fbb8b822f3063910fa901ffadf16e2b7128ad6", size = 18287378, upload-time = "2025-12-20T16:16:22.866Z" }, + { url = "https://files.pythonhosted.org/packages/d3/6f/f868765d44e6fc466467ed810ba9d8d6db1add7d4a748abfa2a4c99a3194/numpy-2.4.0-cp312-cp312-win32.whl", hash = "sha256:92190db305a6f48734d3982f2c60fa30d6b5ee9bff10f2887b930d7b40119f4c", size = 5955432, upload-time = "2025-12-20T16:16:25.06Z" }, + { url = "https://files.pythonhosted.org/packages/d4/b5/94c1e79fcbab38d1ca15e13777477b2914dd2d559b410f96949d6637b085/numpy-2.4.0-cp312-cp312-win_amd64.whl", hash = "sha256:680060061adb2d74ce352628cb798cfdec399068aa7f07ba9fb818b2b3305f98", size = 12306201, upload-time = "2025-12-20T16:16:26.979Z" }, + { url = "https://files.pythonhosted.org/packages/70/09/c39dadf0b13bb0768cd29d6a3aaff1fb7c6905ac40e9aaeca26b1c086e06/numpy-2.4.0-cp312-cp312-win_arm64.whl", hash = "sha256:39699233bc72dd482da1415dcb06076e32f60eddc796a796c5fb6c5efce94667", size = 10308234, upload-time = "2025-12-20T16:16:29.417Z" }, + { url = "https://files.pythonhosted.org/packages/a7/0d/853fd96372eda07c824d24adf02e8bc92bb3731b43a9b2a39161c3667cc4/numpy-2.4.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:a152d86a3ae00ba5f47b3acf3b827509fd0b6cb7d3259665e63dafbad22a75ea", size = 16649088, upload-time = "2025-12-20T16:16:31.421Z" }, + { url = "https://files.pythonhosted.org/packages/e3/37/cc636f1f2a9f585434e20a3e6e63422f70bfe4f7f6698e941db52ea1ac9a/numpy-2.4.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:39b19251dec4de8ff8496cd0806cbe27bf0684f765abb1f4809554de93785f2d", size = 12364065, upload-time = "2025-12-20T16:16:33.491Z" }, + { url = "https://files.pythonhosted.org/packages/ed/69/0b78f37ca3690969beee54103ce5f6021709134e8020767e93ba691a72f1/numpy-2.4.0-cp313-cp313-macosx_14_0_arm64.whl", hash = "sha256:009bd0ea12d3c784b6639a8457537016ce5172109e585338e11334f6a7bb88ee", size = 5192640, upload-time = "2025-12-20T16:16:35.636Z" }, + { url = "https://files.pythonhosted.org/packages/1d/2a/08569f8252abf590294dbb09a430543ec8f8cc710383abfb3e75cc73aeda/numpy-2.4.0-cp313-cp313-macosx_14_0_x86_64.whl", hash = "sha256:5fe44e277225fd3dff6882d86d3d447205d43532c3627313d17e754fb3905a0e", size = 6541556, upload-time = "2025-12-20T16:16:37.276Z" }, + { url = "https://files.pythonhosted.org/packages/93/e9/a949885a4e177493d61519377952186b6cbfdf1d6002764c664ba28349b5/numpy-2.4.0-cp313-cp313-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:f935c4493eda9069851058fa0d9e39dbf6286be690066509305e52912714dbb2", size = 14396562, upload-time = "2025-12-20T16:16:38.953Z" }, + { url = "https://files.pythonhosted.org/packages/99/98/9d4ad53b0e9ef901c2ef1d550d2136f5ac42d3fd2988390a6def32e23e48/numpy-2.4.0-cp313-cp313-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:8cfa5f29a695cb7438965e6c3e8d06e0416060cf0d709c1b1c1653a939bf5c2a", size = 16351719, upload-time = "2025-12-20T16:16:41.503Z" }, + { url = "https://files.pythonhosted.org/packages/28/de/5f3711a38341d6e8dd619f6353251a0cdd07f3d6d101a8fd46f4ef87f895/numpy-2.4.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:ba0cb30acd3ef11c94dc27fbfba68940652492bc107075e7ffe23057f9425681", size = 16176053, upload-time = "2025-12-20T16:16:44.552Z" }, + { url = "https://files.pythonhosted.org/packages/2a/5b/2a3753dc43916501b4183532e7ace862e13211042bceafa253afb5c71272/numpy-2.4.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:60e8c196cd82cbbd4f130b5290007e13e6de3eca79f0d4d38014769d96a7c475", size = 18277859, upload-time = "2025-12-20T16:16:47.174Z" }, + { url = "https://files.pythonhosted.org/packages/2c/c5/a18bcdd07a941db3076ef489d036ab16d2bfc2eae0cf27e5a26e29189434/numpy-2.4.0-cp313-cp313-win32.whl", hash = "sha256:5f48cb3e88fbc294dc90e215d86fbaf1c852c63dbdb6c3a3e63f45c4b57f7344", size = 5953849, upload-time = "2025-12-20T16:16:49.554Z" }, + { url = "https://files.pythonhosted.org/packages/4f/f1/719010ff8061da6e8a26e1980cf090412d4f5f8060b31f0c45d77dd67a01/numpy-2.4.0-cp313-cp313-win_amd64.whl", hash = "sha256:a899699294f28f7be8992853c0c60741f16ff199205e2e6cdca155762cbaa59d", size = 12302840, upload-time = "2025-12-20T16:16:51.227Z" }, + { url = "https://files.pythonhosted.org/packages/f5/5a/b3d259083ed8b4d335270c76966cb6cf14a5d1b69e1a608994ac57a659e6/numpy-2.4.0-cp313-cp313-win_arm64.whl", hash = "sha256:9198f447e1dc5647d07c9a6bbe2063cc0132728cc7175b39dbc796da5b54920d", size = 10308509, upload-time = "2025-12-20T16:16:53.313Z" }, + { url = "https://files.pythonhosted.org/packages/31/01/95edcffd1bb6c0633df4e808130545c4f07383ab629ac7e316fb44fff677/numpy-2.4.0-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:74623f2ab5cc3f7c886add4f735d1031a1d2be4a4ae63c0546cfd74e7a31ddf6", size = 12491815, upload-time = "2025-12-20T16:16:55.496Z" }, + { url = "https://files.pythonhosted.org/packages/59/ea/5644b8baa92cc1c7163b4b4458c8679852733fa74ca49c942cfa82ded4e0/numpy-2.4.0-cp313-cp313t-macosx_14_0_arm64.whl", hash = "sha256:0804a8e4ab070d1d35496e65ffd3cf8114c136a2b81f61dfab0de4b218aacfd5", size = 5320321, upload-time = "2025-12-20T16:16:57.468Z" }, + { url = "https://files.pythonhosted.org/packages/26/4e/e10938106d70bc21319bd6a86ae726da37edc802ce35a3a71ecdf1fdfe7f/numpy-2.4.0-cp313-cp313t-macosx_14_0_x86_64.whl", hash = "sha256:02a2038eb27f9443a8b266a66911e926566b5a6ffd1a689b588f7f35b81e7dc3", size = 6641635, upload-time = "2025-12-20T16:16:59.379Z" }, + { url = "https://files.pythonhosted.org/packages/b3/8d/a8828e3eaf5c0b4ab116924df82f24ce3416fa38d0674d8f708ddc6c8aac/numpy-2.4.0-cp313-cp313t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:1889b3a3f47a7b5bee16bc25a2145bd7cb91897f815ce3499db64c7458b6d91d", size = 14456053, upload-time = "2025-12-20T16:17:01.768Z" }, + { url = "https://files.pythonhosted.org/packages/68/a1/17d97609d87d4520aa5ae2dcfb32305654550ac6a35effb946d303e594ce/numpy-2.4.0-cp313-cp313t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:85eef4cb5625c47ee6425c58a3502555e10f45ee973da878ac8248ad58c136f3", size = 16401702, upload-time = "2025-12-20T16:17:04.235Z" }, + { url = "https://files.pythonhosted.org/packages/18/32/0f13c1b2d22bea1118356b8b963195446f3af124ed7a5adfa8fdecb1b6ca/numpy-2.4.0-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:6dc8b7e2f4eb184b37655195f421836cfae6f58197b67e3ffc501f1333d993fa", size = 16242493, upload-time = "2025-12-20T16:17:06.856Z" }, + { url = "https://files.pythonhosted.org/packages/ae/23/48f21e3d309fbc137c068a1475358cbd3a901b3987dcfc97a029ab3068e2/numpy-2.4.0-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:44aba2f0cafd287871a495fb3163408b0bd25bbce135c6f621534a07f4f7875c", size = 18324222, upload-time = "2025-12-20T16:17:09.392Z" }, + { url = "https://files.pythonhosted.org/packages/ac/52/41f3d71296a3dcaa4f456aaa3c6fc8e745b43d0552b6bde56571bb4b4a0f/numpy-2.4.0-cp313-cp313t-win32.whl", hash = "sha256:20c115517513831860c573996e395707aa9fb691eb179200125c250e895fcd93", size = 6076216, upload-time = "2025-12-20T16:17:11.437Z" }, + { url = "https://files.pythonhosted.org/packages/35/ff/46fbfe60ab0710d2a2b16995f708750307d30eccbb4c38371ea9e986866e/numpy-2.4.0-cp313-cp313t-win_amd64.whl", hash = "sha256:b48e35f4ab6f6a7597c46e301126ceba4c44cd3280e3750f85db48b082624fa4", size = 12444263, upload-time = "2025-12-20T16:17:13.182Z" }, + { url = "https://files.pythonhosted.org/packages/a3/e3/9189ab319c01d2ed556c932ccf55064c5d75bb5850d1df7a482ce0badead/numpy-2.4.0-cp313-cp313t-win_arm64.whl", hash = "sha256:4d1cfce39e511069b11e67cd0bd78ceff31443b7c9e5c04db73c7a19f572967c", size = 10378265, upload-time = "2025-12-20T16:17:15.211Z" }, + { url = "https://files.pythonhosted.org/packages/ab/ed/52eac27de39d5e5a6c9aadabe672bc06f55e24a3d9010cd1183948055d76/numpy-2.4.0-cp314-cp314-macosx_10_15_x86_64.whl", hash = "sha256:c95eb6db2884917d86cde0b4d4cf31adf485c8ec36bf8696dd66fa70de96f36b", size = 16647476, upload-time = "2025-12-20T16:17:17.671Z" }, + { url = "https://files.pythonhosted.org/packages/77/c0/990ce1b7fcd4e09aeaa574e2a0a839589e4b08b2ca68070f1acb1fea6736/numpy-2.4.0-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:65167da969cd1ec3a1df31cb221ca3a19a8aaa25370ecb17d428415e93c1935e", size = 12374563, upload-time = "2025-12-20T16:17:20.216Z" }, + { url = "https://files.pythonhosted.org/packages/37/7c/8c5e389c6ae8f5fd2277a988600d79e9625db3fff011a2d87ac80b881a4c/numpy-2.4.0-cp314-cp314-macosx_14_0_arm64.whl", hash = "sha256:3de19cfecd1465d0dcf8a5b5ea8b3155b42ed0b639dba4b71e323d74f2a3be5e", size = 5203107, upload-time = "2025-12-20T16:17:22.47Z" }, + { url = "https://files.pythonhosted.org/packages/e6/94/ca5b3bd6a8a70a5eec9a0b8dd7f980c1eff4b8a54970a9a7fef248ef564f/numpy-2.4.0-cp314-cp314-macosx_14_0_x86_64.whl", hash = "sha256:6c05483c3136ac4c91b4e81903cb53a8707d316f488124d0398499a4f8e8ef51", size = 6538067, upload-time = "2025-12-20T16:17:24.001Z" }, + { url = "https://files.pythonhosted.org/packages/79/43/993eb7bb5be6761dde2b3a3a594d689cec83398e3f58f4758010f3b85727/numpy-2.4.0-cp314-cp314-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:36667db4d6c1cea79c8930ab72fadfb4060feb4bfe724141cd4bd064d2e5f8ce", size = 14411926, upload-time = "2025-12-20T16:17:25.822Z" }, + { url = "https://files.pythonhosted.org/packages/03/75/d4c43b61de473912496317a854dac54f1efec3eeb158438da6884b70bb90/numpy-2.4.0-cp314-cp314-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:9a818668b674047fd88c4cddada7ab8f1c298812783e8328e956b78dc4807f9f", size = 16354295, upload-time = "2025-12-20T16:17:28.308Z" }, + { url = "https://files.pythonhosted.org/packages/b8/0a/b54615b47ee8736a6461a4bb6749128dd3435c5a759d5663f11f0e9af4ac/numpy-2.4.0-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:1ee32359fb7543b7b7bd0b2f46294db27e29e7bbdf70541e81b190836cd83ded", size = 16190242, upload-time = "2025-12-20T16:17:30.993Z" }, + { url = "https://files.pythonhosted.org/packages/98/ce/ea207769aacad6246525ec6c6bbd66a2bf56c72443dc10e2f90feed29290/numpy-2.4.0-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:e493962256a38f58283de033d8af176c5c91c084ea30f15834f7545451c42059", size = 18280875, upload-time = "2025-12-20T16:17:33.327Z" }, + { url = "https://files.pythonhosted.org/packages/17/ef/ec409437aa962ea372ed601c519a2b141701683ff028f894b7466f0ab42b/numpy-2.4.0-cp314-cp314-win32.whl", hash = "sha256:6bbaebf0d11567fa8926215ae731e1d58e6ec28a8a25235b8a47405d301332db", size = 6002530, upload-time = "2025-12-20T16:17:35.729Z" }, + { url = "https://files.pythonhosted.org/packages/5f/4a/5cb94c787a3ed1ac65e1271b968686521169a7b3ec0b6544bb3ca32960b0/numpy-2.4.0-cp314-cp314-win_amd64.whl", hash = "sha256:3d857f55e7fdf7c38ab96c4558c95b97d1c685be6b05c249f5fdafcbd6f9899e", size = 12435890, upload-time = "2025-12-20T16:17:37.599Z" }, + { url = "https://files.pythonhosted.org/packages/48/a0/04b89db963af9de1104975e2544f30de89adbf75b9e75f7dd2599be12c79/numpy-2.4.0-cp314-cp314-win_arm64.whl", hash = "sha256:bb50ce5fb202a26fd5404620e7ef820ad1ab3558b444cb0b55beb7ef66cd2d63", size = 10591892, upload-time = "2025-12-20T16:17:39.649Z" }, + { url = "https://files.pythonhosted.org/packages/53/e5/d74b5ccf6712c06c7a545025a6a71bfa03bdc7e0568b405b0d655232fd92/numpy-2.4.0-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:355354388cba60f2132df297e2d53053d4063f79077b67b481d21276d61fc4df", size = 12494312, upload-time = "2025-12-20T16:17:41.714Z" }, + { url = "https://files.pythonhosted.org/packages/c2/08/3ca9cc2ddf54dfee7ae9a6479c071092a228c68aef08252aa08dac2af002/numpy-2.4.0-cp314-cp314t-macosx_14_0_arm64.whl", hash = "sha256:1d8f9fde5f6dc1b6fc34df8162f3b3079365468703fee7f31d4e0cc8c63baed9", size = 5322862, upload-time = "2025-12-20T16:17:44.145Z" }, + { url = "https://files.pythonhosted.org/packages/87/74/0bb63a68394c0c1e52670cfff2e309afa41edbe11b3327d9af29e4383f34/numpy-2.4.0-cp314-cp314t-macosx_14_0_x86_64.whl", hash = "sha256:e0434aa22c821f44eeb4c650b81c7fbdd8c0122c6c4b5a576a76d5a35625ecd9", size = 6644986, upload-time = "2025-12-20T16:17:46.203Z" }, + { url = "https://files.pythonhosted.org/packages/06/8f/9264d9bdbcf8236af2823623fe2f3981d740fc3461e2787e231d97c38c28/numpy-2.4.0-cp314-cp314t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:40483b2f2d3ba7aad426443767ff5632ec3156ef09742b96913787d13c336471", size = 14457958, upload-time = "2025-12-20T16:17:48.017Z" }, + { url = "https://files.pythonhosted.org/packages/8c/d9/f9a69ae564bbc7236a35aa883319364ef5fd41f72aa320cc1cbe66148fe2/numpy-2.4.0-cp314-cp314t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:d9e6a7664ddd9746e20b7325351fe1a8408d0a2bf9c63b5e898290ddc8f09544", size = 16398394, upload-time = "2025-12-20T16:17:50.409Z" }, + { url = "https://files.pythonhosted.org/packages/34/c7/39241501408dde7f885d241a98caba5421061a2c6d2b2197ac5e3aa842d8/numpy-2.4.0-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:ecb0019d44f4cdb50b676c5d0cb4b1eae8e15d1ed3d3e6639f986fc92b2ec52c", size = 16241044, upload-time = "2025-12-20T16:17:52.661Z" }, + { url = "https://files.pythonhosted.org/packages/7c/95/cae7effd90e065a95e59fe710eeee05d7328ed169776dfdd9f789e032125/numpy-2.4.0-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:d0ffd9e2e4441c96a9c91ec1783285d80bf835b677853fc2770a89d50c1e48ac", size = 18321772, upload-time = "2025-12-20T16:17:54.947Z" }, + { url = "https://files.pythonhosted.org/packages/96/df/3c6c279accd2bfb968a76298e5b276310bd55d243df4fa8ac5816d79347d/numpy-2.4.0-cp314-cp314t-win32.whl", hash = "sha256:77f0d13fa87036d7553bf81f0e1fe3ce68d14c9976c9851744e4d3e91127e95f", size = 6148320, upload-time = "2025-12-20T16:17:57.249Z" }, + { url = "https://files.pythonhosted.org/packages/92/8d/f23033cce252e7a75cae853d17f582e86534c46404dea1c8ee094a9d6d84/numpy-2.4.0-cp314-cp314t-win_amd64.whl", hash = "sha256:b1f5b45829ac1848893f0ddf5cb326110604d6df96cdc255b0bf9edd154104d4", size = 12623460, upload-time = "2025-12-20T16:17:58.963Z" }, + { url = "https://files.pythonhosted.org/packages/a4/4f/1f8475907d1a7c4ef9020edf7f39ea2422ec896849245f00688e4b268a71/numpy-2.4.0-cp314-cp314t-win_arm64.whl", hash = "sha256:23a3e9d1a6f360267e8fbb38ba5db355a6a7e9be71d7fce7ab3125e88bb646c8", size = 10661799, upload-time = "2025-12-20T16:18:01.078Z" }, +] + +[[package]] +name = "opentelemetry-api" +version = "1.39.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "importlib-metadata" }, + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/97/b9/3161be15bb8e3ad01be8be5a968a9237c3027c5be504362ff800fca3e442/opentelemetry_api-1.39.1.tar.gz", hash = "sha256:fbde8c80e1b937a2c61f20347e91c0c18a1940cecf012d62e65a7caf08967c9c", size = 65767, upload-time = "2025-12-11T13:32:39.182Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/cf/df/d3f1ddf4bb4cb50ed9b1139cc7b1c54c34a1e7ce8fd1b9a37c0d1551a6bd/opentelemetry_api-1.39.1-py3-none-any.whl", hash = "sha256:2edd8463432a7f8443edce90972169b195e7d6a05500cd29e6d13898187c9950", size = 66356, upload-time = "2025-12-11T13:32:17.304Z" }, +] + +[[package]] +name = "opentelemetry-exporter-otlp-proto-common" +version = "1.39.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "opentelemetry-proto" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/e9/9d/22d241b66f7bbde88a3bfa6847a351d2c46b84de23e71222c6aae25c7050/opentelemetry_exporter_otlp_proto_common-1.39.1.tar.gz", hash = "sha256:763370d4737a59741c89a67b50f9e39271639ee4afc999dadfe768541c027464", size = 20409, upload-time = "2025-12-11T13:32:40.885Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/8c/02/ffc3e143d89a27ac21fd557365b98bd0653b98de8a101151d5805b5d4c33/opentelemetry_exporter_otlp_proto_common-1.39.1-py3-none-any.whl", hash = "sha256:08f8a5862d64cc3435105686d0216c1365dc5701f86844a8cd56597d0c764fde", size = 18366, upload-time = "2025-12-11T13:32:20.2Z" }, +] + +[[package]] +name = "opentelemetry-exporter-otlp-proto-http" +version = "1.39.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "googleapis-common-protos" }, + { name = "opentelemetry-api" }, + { name = "opentelemetry-exporter-otlp-proto-common" }, + { name = "opentelemetry-proto" }, + { name = "opentelemetry-sdk" }, + { name = "requests" }, + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/80/04/2a08fa9c0214ae38880df01e8bfae12b067ec0793446578575e5080d6545/opentelemetry_exporter_otlp_proto_http-1.39.1.tar.gz", hash = "sha256:31bdab9745c709ce90a49a0624c2bd445d31a28ba34275951a6a362d16a0b9cb", size = 17288, upload-time = "2025-12-11T13:32:42.029Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/95/f1/b27d3e2e003cd9a3592c43d099d2ed8d0a947c15281bf8463a256db0b46c/opentelemetry_exporter_otlp_proto_http-1.39.1-py3-none-any.whl", hash = "sha256:d9f5207183dd752a412c4cd564ca8875ececba13be6e9c6c370ffb752fd59985", size = 19641, upload-time = "2025-12-11T13:32:22.248Z" }, +] + +[[package]] +name = "opentelemetry-instrumentation" +version = "0.60b1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "opentelemetry-api" }, + { name = "opentelemetry-semantic-conventions" }, + { name = "packaging" }, + { name = "wrapt" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/41/0f/7e6b713ac117c1f5e4e3300748af699b9902a2e5e34c9cf443dde25a01fa/opentelemetry_instrumentation-0.60b1.tar.gz", hash = "sha256:57ddc7974c6eb35865af0426d1a17132b88b2ed8586897fee187fd5b8944bd6a", size = 31706, upload-time = "2025-12-11T13:36:42.515Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/77/d2/6788e83c5c86a2690101681aeef27eeb2a6bf22df52d3f263a22cee20915/opentelemetry_instrumentation-0.60b1-py3-none-any.whl", hash = "sha256:04480db952b48fb1ed0073f822f0ee26012b7be7c3eac1a3793122737c78632d", size = 33096, upload-time = "2025-12-11T13:35:33.067Z" }, +] + +[[package]] +name = "opentelemetry-instrumentation-aiohttp-client" +version = "0.60b1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "opentelemetry-api" }, + { name = "opentelemetry-instrumentation" }, + { name = "opentelemetry-semantic-conventions" }, + { name = "opentelemetry-util-http" }, + { name = "wrapt" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/c0/79/95be90c555fd7efde79dcba36ea5c668815aa2d0a4250b63687e0f91c74a/opentelemetry_instrumentation_aiohttp_client-0.60b1.tar.gz", hash = "sha256:d0e7d5aa057791ca4d9090b0d3c9982f253c1a24b6bc78a734fc18d8dd97927b", size = 15907, upload-time = "2025-12-11T13:36:44.434Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ca/f4/1a1ec632c86269750ae833c8fbdd4c8d15316eb1c21e3544e34791c805ee/opentelemetry_instrumentation_aiohttp_client-0.60b1-py3-none-any.whl", hash = "sha256:34c5097256a30b16c5a2a88a409ed82b92972a494c43212c85632d204a78c2a1", size = 12694, upload-time = "2025-12-11T13:35:35.034Z" }, +] + +[[package]] +name = "opentelemetry-instrumentation-requests" +version = "0.60b1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "opentelemetry-api" }, + { name = "opentelemetry-instrumentation" }, + { name = "opentelemetry-semantic-conventions" }, + { name = "opentelemetry-util-http" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/9d/4a/bb9d47d7424fc33aeba75275256ae6e6031f44b6a9a3f778d611c0c3ac27/opentelemetry_instrumentation_requests-0.60b1.tar.gz", hash = "sha256:9a1063c16c44a3ba6e81870c4fa42a0fac3ecef5a4d60a11d0976eec9046f3d4", size = 16366, upload-time = "2025-12-11T13:37:12.456Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/f2/7f/969b59a5acccb4c35317421843d63d7853ad7a18078ca3a9b80c248be448/opentelemetry_instrumentation_requests-0.60b1-py3-none-any.whl", hash = "sha256:eec9fac3fab84737f663a2e08b12cb095b4bd67643b24587a8ecfa3cf4d0ca4c", size = 13141, upload-time = "2025-12-11T13:36:23.696Z" }, +] + +[[package]] +name = "opentelemetry-proto" +version = "1.39.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "protobuf" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/49/1d/f25d76d8260c156c40c97c9ed4511ec0f9ce353f8108ca6e7561f82a06b2/opentelemetry_proto-1.39.1.tar.gz", hash = "sha256:6c8e05144fc0d3ed4d22c2289c6b126e03bcd0e6a7da0f16cedd2e1c2772e2c8", size = 46152, upload-time = "2025-12-11T13:32:48.681Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/51/95/b40c96a7b5203005a0b03d8ce8cd212ff23f1793d5ba289c87a097571b18/opentelemetry_proto-1.39.1-py3-none-any.whl", hash = "sha256:22cdc78efd3b3765d09e68bfbd010d4fc254c9818afd0b6b423387d9dee46007", size = 72535, upload-time = "2025-12-11T13:32:33.866Z" }, +] + +[[package]] +name = "opentelemetry-sdk" +version = "1.39.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "opentelemetry-api" }, + { name = "opentelemetry-semantic-conventions" }, + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/eb/fb/c76080c9ba07e1e8235d24cdcc4d125ef7aa3edf23eb4e497c2e50889adc/opentelemetry_sdk-1.39.1.tar.gz", hash = "sha256:cf4d4563caf7bff906c9f7967e2be22d0d6b349b908be0d90fb21c8e9c995cc6", size = 171460, upload-time = "2025-12-11T13:32:49.369Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/7c/98/e91cf858f203d86f4eccdf763dcf01cf03f1dae80c3750f7e635bfa206b6/opentelemetry_sdk-1.39.1-py3-none-any.whl", hash = "sha256:4d5482c478513ecb0a5d938dcc61394e647066e0cc2676bee9f3af3f3f45f01c", size = 132565, upload-time = "2025-12-11T13:32:35.069Z" }, +] + +[[package]] +name = "opentelemetry-semantic-conventions" +version = "0.60b1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "opentelemetry-api" }, + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/91/df/553f93ed38bf22f4b999d9be9c185adb558982214f33eae539d3b5cd0858/opentelemetry_semantic_conventions-0.60b1.tar.gz", hash = "sha256:87c228b5a0669b748c76d76df6c364c369c28f1c465e50f661e39737e84bc953", size = 137935, upload-time = "2025-12-11T13:32:50.487Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/7a/5e/5958555e09635d09b75de3c4f8b9cae7335ca545d77392ffe7331534c402/opentelemetry_semantic_conventions-0.60b1-py3-none-any.whl", hash = "sha256:9fa8c8b0c110da289809292b0591220d3a7b53c1526a23021e977d68597893fb", size = 219982, upload-time = "2025-12-11T13:32:36.955Z" }, +] + +[[package]] +name = "opentelemetry-util-http" +version = "0.60b1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/50/fc/c47bb04a1d8a941a4061307e1eddfa331ed4d0ab13d8a9781e6db256940a/opentelemetry_util_http-0.60b1.tar.gz", hash = "sha256:0d97152ca8c8a41ced7172d29d3622a219317f74ae6bb3027cfbdcf22c3cc0d6", size = 11053, upload-time = "2025-12-11T13:37:25.115Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/16/5c/d3f1733665f7cd582ef0842fb1d2ed0bc1fba10875160593342d22bba375/opentelemetry_util_http-0.60b1-py3-none-any.whl", hash = "sha256:66381ba28550c91bee14dcba8979ace443444af1ed609226634596b4b0faf199", size = 8947, upload-time = "2025-12-11T13:36:37.151Z" }, +] + +[[package]] +name = "packaging" +version = "25.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/a1/d4/1fc4078c65507b51b96ca8f8c3ba19e6a61c8253c72794544580a7b6c24d/packaging-25.0.tar.gz", hash = "sha256:d443872c98d677bf60f6a1f2f8c1cb748e8fe762d2bf9d3148b5599295b0fc4f", size = 165727, upload-time = "2025-04-19T11:48:59.673Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/20/12/38679034af332785aac8774540895e234f4d07f7545804097de4b666afd8/packaging-25.0-py3-none-any.whl", hash = "sha256:29572ef2b1f17581046b3a2227d5c611fb25ec70ca1ba8554b24b0e69331a484", size = 66469, upload-time = "2025-04-19T11:48:57.875Z" }, +] + +[[package]] +name = "paginate" +version = "0.5.7" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/ec/46/68dde5b6bc00c1296ec6466ab27dddede6aec9af1b99090e1107091b3b84/paginate-0.5.7.tar.gz", hash = "sha256:22bd083ab41e1a8b4f3690544afb2c60c25e5c9a63a30fa2f483f6c60c8e5945", size = 19252, upload-time = "2024-08-25T14:17:24.139Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/90/96/04b8e52da071d28f5e21a805b19cb9390aa17a47462ac87f5e2696b9566d/paginate-0.5.7-py2.py3-none-any.whl", hash = "sha256:b885e2af73abcf01d9559fd5216b57ef722f8c42affbb63942377668e35c7591", size = 13746, upload-time = "2024-08-25T14:17:22.55Z" }, +] + +[[package]] +name = "pathspec" +version = "0.12.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/ca/bc/f35b8446f4531a7cb215605d100cd88b7ac6f44ab3fc94870c120ab3adbf/pathspec-0.12.1.tar.gz", hash = "sha256:a482d51503a1ab33b1c67a6c3813a26953dbdc71c31dacaef9a838c4e29f5712", size = 51043, upload-time = "2023-12-10T22:30:45Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/cc/20/ff623b09d963f88bfde16306a54e12ee5ea43e9b597108672ff3a408aad6/pathspec-0.12.1-py3-none-any.whl", hash = "sha256:a0d503e138a4c123b27490a4f7beda6a01c6f288df0e4a8b79c7eb0dc7b4cc08", size = 31191, upload-time = "2023-12-10T22:30:43.14Z" }, +] + +[[package]] +name = "pillow" +version = "12.0.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/5a/b0/cace85a1b0c9775a9f8f5d5423c8261c858760e2466c79b2dd184638b056/pillow-12.0.0.tar.gz", hash = "sha256:87d4f8125c9988bfbed67af47dd7a953e2fc7b0cc1e7800ec6d2080d490bb353", size = 47008828, upload-time = "2025-10-15T18:24:14.008Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/2c/90/4fcce2c22caf044e660a198d740e7fbc14395619e3cb1abad12192c0826c/pillow-12.0.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:53561a4ddc36facb432fae7a9d8afbfaf94795414f5cdc5fc52f28c1dca90371", size = 5249377, upload-time = "2025-10-15T18:22:05.993Z" }, + { url = "https://files.pythonhosted.org/packages/fd/e0/ed960067543d080691d47d6938ebccbf3976a931c9567ab2fbfab983a5dd/pillow-12.0.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:71db6b4c1653045dacc1585c1b0d184004f0d7e694c7b34ac165ca70c0838082", size = 4650343, upload-time = "2025-10-15T18:22:07.718Z" }, + { url = "https://files.pythonhosted.org/packages/e7/a1/f81fdeddcb99c044bf7d6faa47e12850f13cee0849537a7d27eeab5534d4/pillow-12.0.0-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:2fa5f0b6716fc88f11380b88b31fe591a06c6315e955c096c35715788b339e3f", size = 6232981, upload-time = "2025-10-15T18:22:09.287Z" }, + { url = "https://files.pythonhosted.org/packages/88/e1/9098d3ce341a8750b55b0e00c03f1630d6178f38ac191c81c97a3b047b44/pillow-12.0.0-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:82240051c6ca513c616f7f9da06e871f61bfd7805f566275841af15015b8f98d", size = 8041399, upload-time = "2025-10-15T18:22:10.872Z" }, + { url = "https://files.pythonhosted.org/packages/a7/62/a22e8d3b602ae8cc01446d0c57a54e982737f44b6f2e1e019a925143771d/pillow-12.0.0-cp312-cp312-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:55f818bd74fe2f11d4d7cbc65880a843c4075e0ac7226bc1a23261dbea531953", size = 6347740, upload-time = "2025-10-15T18:22:12.769Z" }, + { url = "https://files.pythonhosted.org/packages/4f/87/424511bdcd02c8d7acf9f65caa09f291a519b16bd83c3fb3374b3d4ae951/pillow-12.0.0-cp312-cp312-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:b87843e225e74576437fd5b6a4c2205d422754f84a06942cfaf1dc32243e45a8", size = 7040201, upload-time = "2025-10-15T18:22:14.813Z" }, + { url = "https://files.pythonhosted.org/packages/dc/4d/435c8ac688c54d11755aedfdd9f29c9eeddf68d150fe42d1d3dbd2365149/pillow-12.0.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:c607c90ba67533e1b2355b821fef6764d1dd2cbe26b8c1005ae84f7aea25ff79", size = 6462334, upload-time = "2025-10-15T18:22:16.375Z" }, + { url = "https://files.pythonhosted.org/packages/2b/f2/ad34167a8059a59b8ad10bc5c72d4d9b35acc6b7c0877af8ac885b5f2044/pillow-12.0.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:21f241bdd5080a15bc86d3466a9f6074a9c2c2b314100dd896ac81ee6db2f1ba", size = 7134162, upload-time = "2025-10-15T18:22:17.996Z" }, + { url = "https://files.pythonhosted.org/packages/0c/b1/a7391df6adacf0a5c2cf6ac1cf1fcc1369e7d439d28f637a847f8803beb3/pillow-12.0.0-cp312-cp312-win32.whl", hash = "sha256:dd333073e0cacdc3089525c7df7d39b211bcdf31fc2824e49d01c6b6187b07d0", size = 6298769, upload-time = "2025-10-15T18:22:19.923Z" }, + { url = "https://files.pythonhosted.org/packages/a2/0b/d87733741526541c909bbf159e338dcace4f982daac6e5a8d6be225ca32d/pillow-12.0.0-cp312-cp312-win_amd64.whl", hash = "sha256:9fe611163f6303d1619bbcb653540a4d60f9e55e622d60a3108be0d5b441017a", size = 7001107, upload-time = "2025-10-15T18:22:21.644Z" }, + { url = "https://files.pythonhosted.org/packages/bc/96/aaa61ce33cc98421fb6088af2a03be4157b1e7e0e87087c888e2370a7f45/pillow-12.0.0-cp312-cp312-win_arm64.whl", hash = "sha256:7dfb439562f234f7d57b1ac6bc8fe7f838a4bd49c79230e0f6a1da93e82f1fad", size = 2436012, upload-time = "2025-10-15T18:22:23.621Z" }, + { url = "https://files.pythonhosted.org/packages/62/f2/de993bb2d21b33a98d031ecf6a978e4b61da207bef02f7b43093774c480d/pillow-12.0.0-cp313-cp313-ios_13_0_arm64_iphoneos.whl", hash = "sha256:0869154a2d0546545cde61d1789a6524319fc1897d9ee31218eae7a60ccc5643", size = 4045493, upload-time = "2025-10-15T18:22:25.758Z" }, + { url = "https://files.pythonhosted.org/packages/0e/b6/bc8d0c4c9f6f111a783d045310945deb769b806d7574764234ffd50bc5ea/pillow-12.0.0-cp313-cp313-ios_13_0_arm64_iphonesimulator.whl", hash = "sha256:a7921c5a6d31b3d756ec980f2f47c0cfdbce0fc48c22a39347a895f41f4a6ea4", size = 4120461, upload-time = "2025-10-15T18:22:27.286Z" }, + { url = "https://files.pythonhosted.org/packages/5d/57/d60d343709366a353dc56adb4ee1e7d8a2cc34e3fbc22905f4167cfec119/pillow-12.0.0-cp313-cp313-ios_13_0_x86_64_iphonesimulator.whl", hash = "sha256:1ee80a59f6ce048ae13cda1abf7fbd2a34ab9ee7d401c46be3ca685d1999a399", size = 3576912, upload-time = "2025-10-15T18:22:28.751Z" }, + { url = "https://files.pythonhosted.org/packages/a4/a4/a0a31467e3f83b94d37568294b01d22b43ae3c5d85f2811769b9c66389dd/pillow-12.0.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:c50f36a62a22d350c96e49ad02d0da41dbd17ddc2e29750dbdba4323f85eb4a5", size = 5249132, upload-time = "2025-10-15T18:22:30.641Z" }, + { url = "https://files.pythonhosted.org/packages/83/06/48eab21dd561de2914242711434c0c0eb992ed08ff3f6107a5f44527f5e9/pillow-12.0.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:5193fde9a5f23c331ea26d0cf171fbf67e3f247585f50c08b3e205c7aeb4589b", size = 4650099, upload-time = "2025-10-15T18:22:32.73Z" }, + { url = "https://files.pythonhosted.org/packages/fc/bd/69ed99fd46a8dba7c1887156d3572fe4484e3f031405fcc5a92e31c04035/pillow-12.0.0-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:bde737cff1a975b70652b62d626f7785e0480918dece11e8fef3c0cf057351c3", size = 6230808, upload-time = "2025-10-15T18:22:34.337Z" }, + { url = "https://files.pythonhosted.org/packages/ea/94/8fad659bcdbf86ed70099cb60ae40be6acca434bbc8c4c0d4ef356d7e0de/pillow-12.0.0-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:a6597ff2b61d121172f5844b53f21467f7082f5fb385a9a29c01414463f93b07", size = 8037804, upload-time = "2025-10-15T18:22:36.402Z" }, + { url = "https://files.pythonhosted.org/packages/20/39/c685d05c06deecfd4e2d1950e9a908aa2ca8bc4e6c3b12d93b9cafbd7837/pillow-12.0.0-cp313-cp313-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:0b817e7035ea7f6b942c13aa03bb554fc44fea70838ea21f8eb31c638326584e", size = 6345553, upload-time = "2025-10-15T18:22:38.066Z" }, + { url = "https://files.pythonhosted.org/packages/38/57/755dbd06530a27a5ed74f8cb0a7a44a21722ebf318edbe67ddbd7fb28f88/pillow-12.0.0-cp313-cp313-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:f4f1231b7dec408e8670264ce63e9c71409d9583dd21d32c163e25213ee2a344", size = 7037729, upload-time = "2025-10-15T18:22:39.769Z" }, + { url = "https://files.pythonhosted.org/packages/ca/b6/7e94f4c41d238615674d06ed677c14883103dce1c52e4af16f000338cfd7/pillow-12.0.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:6e51b71417049ad6ab14c49608b4a24d8fb3fe605e5dfabfe523b58064dc3d27", size = 6459789, upload-time = "2025-10-15T18:22:41.437Z" }, + { url = "https://files.pythonhosted.org/packages/9c/14/4448bb0b5e0f22dd865290536d20ec8a23b64e2d04280b89139f09a36bb6/pillow-12.0.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:d120c38a42c234dc9a8c5de7ceaaf899cf33561956acb4941653f8bdc657aa79", size = 7130917, upload-time = "2025-10-15T18:22:43.152Z" }, + { url = "https://files.pythonhosted.org/packages/dd/ca/16c6926cc1c015845745d5c16c9358e24282f1e588237a4c36d2b30f182f/pillow-12.0.0-cp313-cp313-win32.whl", hash = "sha256:4cc6b3b2efff105c6a1656cfe59da4fdde2cda9af1c5e0b58529b24525d0a098", size = 6302391, upload-time = "2025-10-15T18:22:44.753Z" }, + { url = "https://files.pythonhosted.org/packages/6d/2a/dd43dcfd6dae9b6a49ee28a8eedb98c7d5ff2de94a5d834565164667b97b/pillow-12.0.0-cp313-cp313-win_amd64.whl", hash = "sha256:4cf7fed4b4580601c4345ceb5d4cbf5a980d030fd5ad07c4d2ec589f95f09905", size = 7007477, upload-time = "2025-10-15T18:22:46.838Z" }, + { url = "https://files.pythonhosted.org/packages/77/f0/72ea067f4b5ae5ead653053212af05ce3705807906ba3f3e8f58ddf617e6/pillow-12.0.0-cp313-cp313-win_arm64.whl", hash = "sha256:9f0b04c6b8584c2c193babcccc908b38ed29524b29dd464bc8801bf10d746a3a", size = 2435918, upload-time = "2025-10-15T18:22:48.399Z" }, + { url = "https://files.pythonhosted.org/packages/f5/5e/9046b423735c21f0487ea6cb5b10f89ea8f8dfbe32576fe052b5ba9d4e5b/pillow-12.0.0-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:7fa22993bac7b77b78cae22bad1e2a987ddf0d9015c63358032f84a53f23cdc3", size = 5251406, upload-time = "2025-10-15T18:22:49.905Z" }, + { url = "https://files.pythonhosted.org/packages/12/66/982ceebcdb13c97270ef7a56c3969635b4ee7cd45227fa707c94719229c5/pillow-12.0.0-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:f135c702ac42262573fe9714dfe99c944b4ba307af5eb507abef1667e2cbbced", size = 4653218, upload-time = "2025-10-15T18:22:51.587Z" }, + { url = "https://files.pythonhosted.org/packages/16/b3/81e625524688c31859450119bf12674619429cab3119eec0e30a7a1029cb/pillow-12.0.0-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:c85de1136429c524e55cfa4e033b4a7940ac5c8ee4d9401cc2d1bf48154bbc7b", size = 6266564, upload-time = "2025-10-15T18:22:53.215Z" }, + { url = "https://files.pythonhosted.org/packages/98/59/dfb38f2a41240d2408096e1a76c671d0a105a4a8471b1871c6902719450c/pillow-12.0.0-cp313-cp313t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:38df9b4bfd3db902c9c2bd369bcacaf9d935b2fff73709429d95cc41554f7b3d", size = 8069260, upload-time = "2025-10-15T18:22:54.933Z" }, + { url = "https://files.pythonhosted.org/packages/dc/3d/378dbea5cd1874b94c312425ca77b0f47776c78e0df2df751b820c8c1d6c/pillow-12.0.0-cp313-cp313t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:7d87ef5795da03d742bf49439f9ca4d027cde49c82c5371ba52464aee266699a", size = 6379248, upload-time = "2025-10-15T18:22:56.605Z" }, + { url = "https://files.pythonhosted.org/packages/84/b0/d525ef47d71590f1621510327acec75ae58c721dc071b17d8d652ca494d8/pillow-12.0.0-cp313-cp313t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:aff9e4d82d082ff9513bdd6acd4f5bd359f5b2c870907d2b0a9c5e10d40c88fe", size = 7066043, upload-time = "2025-10-15T18:22:58.53Z" }, + { url = "https://files.pythonhosted.org/packages/61/2c/aced60e9cf9d0cde341d54bf7932c9ffc33ddb4a1595798b3a5150c7ec4e/pillow-12.0.0-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:8d8ca2b210ada074d57fcee40c30446c9562e542fc46aedc19baf758a93532ee", size = 6490915, upload-time = "2025-10-15T18:23:00.582Z" }, + { url = "https://files.pythonhosted.org/packages/ef/26/69dcb9b91f4e59f8f34b2332a4a0a951b44f547c4ed39d3e4dcfcff48f89/pillow-12.0.0-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:99a7f72fb6249302aa62245680754862a44179b545ded638cf1fef59befb57ef", size = 7157998, upload-time = "2025-10-15T18:23:02.627Z" }, + { url = "https://files.pythonhosted.org/packages/61/2b/726235842220ca95fa441ddf55dd2382b52ab5b8d9c0596fe6b3f23dafe8/pillow-12.0.0-cp313-cp313t-win32.whl", hash = "sha256:4078242472387600b2ce8d93ade8899c12bf33fa89e55ec89fe126e9d6d5d9e9", size = 6306201, upload-time = "2025-10-15T18:23:04.709Z" }, + { url = "https://files.pythonhosted.org/packages/c0/3d/2afaf4e840b2df71344ababf2f8edd75a705ce500e5dc1e7227808312ae1/pillow-12.0.0-cp313-cp313t-win_amd64.whl", hash = "sha256:2c54c1a783d6d60595d3514f0efe9b37c8808746a66920315bfd34a938d7994b", size = 7013165, upload-time = "2025-10-15T18:23:06.46Z" }, + { url = "https://files.pythonhosted.org/packages/6f/75/3fa09aa5cf6ed04bee3fa575798ddf1ce0bace8edb47249c798077a81f7f/pillow-12.0.0-cp313-cp313t-win_arm64.whl", hash = "sha256:26d9f7d2b604cd23aba3e9faf795787456ac25634d82cd060556998e39c6fa47", size = 2437834, upload-time = "2025-10-15T18:23:08.194Z" }, + { url = "https://files.pythonhosted.org/packages/54/2a/9a8c6ba2c2c07b71bec92cf63e03370ca5e5f5c5b119b742bcc0cde3f9c5/pillow-12.0.0-cp314-cp314-ios_13_0_arm64_iphoneos.whl", hash = "sha256:beeae3f27f62308f1ddbcfb0690bf44b10732f2ef43758f169d5e9303165d3f9", size = 4045531, upload-time = "2025-10-15T18:23:10.121Z" }, + { url = "https://files.pythonhosted.org/packages/84/54/836fdbf1bfb3d66a59f0189ff0b9f5f666cee09c6188309300df04ad71fa/pillow-12.0.0-cp314-cp314-ios_13_0_arm64_iphonesimulator.whl", hash = "sha256:d4827615da15cd59784ce39d3388275ec093ae3ee8d7f0c089b76fa87af756c2", size = 4120554, upload-time = "2025-10-15T18:23:12.14Z" }, + { url = "https://files.pythonhosted.org/packages/0d/cd/16aec9f0da4793e98e6b54778a5fbce4f375c6646fe662e80600b8797379/pillow-12.0.0-cp314-cp314-ios_13_0_x86_64_iphonesimulator.whl", hash = "sha256:3e42edad50b6909089750e65c91aa09aaf1e0a71310d383f11321b27c224ed8a", size = 3576812, upload-time = "2025-10-15T18:23:13.962Z" }, + { url = "https://files.pythonhosted.org/packages/f6/b7/13957fda356dc46339298b351cae0d327704986337c3c69bb54628c88155/pillow-12.0.0-cp314-cp314-macosx_10_15_x86_64.whl", hash = "sha256:e5d8efac84c9afcb40914ab49ba063d94f5dbdf5066db4482c66a992f47a3a3b", size = 5252689, upload-time = "2025-10-15T18:23:15.562Z" }, + { url = "https://files.pythonhosted.org/packages/fc/f5/eae31a306341d8f331f43edb2e9122c7661b975433de5e447939ae61c5da/pillow-12.0.0-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:266cd5f2b63ff316d5a1bba46268e603c9caf5606d44f38c2873c380950576ad", size = 4650186, upload-time = "2025-10-15T18:23:17.379Z" }, + { url = "https://files.pythonhosted.org/packages/86/62/2a88339aa40c4c77e79108facbd307d6091e2c0eb5b8d3cf4977cfca2fe6/pillow-12.0.0-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:58eea5ebe51504057dd95c5b77d21700b77615ab0243d8152793dc00eb4faf01", size = 6230308, upload-time = "2025-10-15T18:23:18.971Z" }, + { url = "https://files.pythonhosted.org/packages/c7/33/5425a8992bcb32d1cb9fa3dd39a89e613d09a22f2c8083b7bf43c455f760/pillow-12.0.0-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:f13711b1a5ba512d647a0e4ba79280d3a9a045aaf7e0cc6fbe96b91d4cdf6b0c", size = 8039222, upload-time = "2025-10-15T18:23:20.909Z" }, + { url = "https://files.pythonhosted.org/packages/d8/61/3f5d3b35c5728f37953d3eec5b5f3e77111949523bd2dd7f31a851e50690/pillow-12.0.0-cp314-cp314-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:6846bd2d116ff42cba6b646edf5bf61d37e5cbd256425fa089fee4ff5c07a99e", size = 6346657, upload-time = "2025-10-15T18:23:23.077Z" }, + { url = "https://files.pythonhosted.org/packages/3a/be/ee90a3d79271227e0f0a33c453531efd6ed14b2e708596ba5dd9be948da3/pillow-12.0.0-cp314-cp314-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:c98fa880d695de164b4135a52fd2e9cd7b7c90a9d8ac5e9e443a24a95ef9248e", size = 7038482, upload-time = "2025-10-15T18:23:25.005Z" }, + { url = "https://files.pythonhosted.org/packages/44/34/a16b6a4d1ad727de390e9bd9f19f5f669e079e5826ec0f329010ddea492f/pillow-12.0.0-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:fa3ed2a29a9e9d2d488b4da81dcb54720ac3104a20bf0bd273f1e4648aff5af9", size = 6461416, upload-time = "2025-10-15T18:23:27.009Z" }, + { url = "https://files.pythonhosted.org/packages/b6/39/1aa5850d2ade7d7ba9f54e4e4c17077244ff7a2d9e25998c38a29749eb3f/pillow-12.0.0-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:d034140032870024e6b9892c692fe2968493790dd57208b2c37e3fb35f6df3ab", size = 7131584, upload-time = "2025-10-15T18:23:29.752Z" }, + { url = "https://files.pythonhosted.org/packages/bf/db/4fae862f8fad0167073a7733973bfa955f47e2cac3dc3e3e6257d10fab4a/pillow-12.0.0-cp314-cp314-win32.whl", hash = "sha256:1b1b133e6e16105f524a8dec491e0586d072948ce15c9b914e41cdadd209052b", size = 6400621, upload-time = "2025-10-15T18:23:32.06Z" }, + { url = "https://files.pythonhosted.org/packages/2b/24/b350c31543fb0107ab2599464d7e28e6f856027aadda995022e695313d94/pillow-12.0.0-cp314-cp314-win_amd64.whl", hash = "sha256:8dc232e39d409036af549c86f24aed8273a40ffa459981146829a324e0848b4b", size = 7142916, upload-time = "2025-10-15T18:23:34.71Z" }, + { url = "https://files.pythonhosted.org/packages/0f/9b/0ba5a6fd9351793996ef7487c4fdbde8d3f5f75dbedc093bb598648fddf0/pillow-12.0.0-cp314-cp314-win_arm64.whl", hash = "sha256:d52610d51e265a51518692045e372a4c363056130d922a7351429ac9f27e70b0", size = 2523836, upload-time = "2025-10-15T18:23:36.967Z" }, + { url = "https://files.pythonhosted.org/packages/f5/7a/ceee0840aebc579af529b523d530840338ecf63992395842e54edc805987/pillow-12.0.0-cp314-cp314t-macosx_10_15_x86_64.whl", hash = "sha256:1979f4566bb96c1e50a62d9831e2ea2d1211761e5662afc545fa766f996632f6", size = 5255092, upload-time = "2025-10-15T18:23:38.573Z" }, + { url = "https://files.pythonhosted.org/packages/44/76/20776057b4bfd1aef4eeca992ebde0f53a4dce874f3ae693d0ec90a4f79b/pillow-12.0.0-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:b2e4b27a6e15b04832fe9bf292b94b5ca156016bbc1ea9c2c20098a0320d6cf6", size = 4653158, upload-time = "2025-10-15T18:23:40.238Z" }, + { url = "https://files.pythonhosted.org/packages/82/3f/d9ff92ace07be8836b4e7e87e6a4c7a8318d47c2f1463ffcf121fc57d9cb/pillow-12.0.0-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:fb3096c30df99fd01c7bf8e544f392103d0795b9f98ba71a8054bcbf56b255f1", size = 6267882, upload-time = "2025-10-15T18:23:42.434Z" }, + { url = "https://files.pythonhosted.org/packages/9f/7a/4f7ff87f00d3ad33ba21af78bfcd2f032107710baf8280e3722ceec28cda/pillow-12.0.0-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:7438839e9e053ef79f7112c881cef684013855016f928b168b81ed5835f3e75e", size = 8071001, upload-time = "2025-10-15T18:23:44.29Z" }, + { url = "https://files.pythonhosted.org/packages/75/87/fcea108944a52dad8cca0715ae6247e271eb80459364a98518f1e4f480c1/pillow-12.0.0-cp314-cp314t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:5d5c411a8eaa2299322b647cd932586b1427367fd3184ffbb8f7a219ea2041ca", size = 6380146, upload-time = "2025-10-15T18:23:46.065Z" }, + { url = "https://files.pythonhosted.org/packages/91/52/0d31b5e571ef5fd111d2978b84603fce26aba1b6092f28e941cb46570745/pillow-12.0.0-cp314-cp314t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:d7e091d464ac59d2c7ad8e7e08105eaf9dafbc3883fd7265ffccc2baad6ac925", size = 7067344, upload-time = "2025-10-15T18:23:47.898Z" }, + { url = "https://files.pythonhosted.org/packages/7b/f4/2dd3d721f875f928d48e83bb30a434dee75a2531bca839bb996bb0aa5a91/pillow-12.0.0-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:792a2c0be4dcc18af9d4a2dfd8a11a17d5e25274a1062b0ec1c2d79c76f3e7f8", size = 6491864, upload-time = "2025-10-15T18:23:49.607Z" }, + { url = "https://files.pythonhosted.org/packages/30/4b/667dfcf3d61fc309ba5a15b141845cece5915e39b99c1ceab0f34bf1d124/pillow-12.0.0-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:afbefa430092f71a9593a99ab6a4e7538bc9eabbf7bf94f91510d3503943edc4", size = 7158911, upload-time = "2025-10-15T18:23:51.351Z" }, + { url = "https://files.pythonhosted.org/packages/a2/2f/16cabcc6426c32218ace36bf0d55955e813f2958afddbf1d391849fee9d1/pillow-12.0.0-cp314-cp314t-win32.whl", hash = "sha256:3830c769decf88f1289680a59d4f4c46c72573446352e2befec9a8512104fa52", size = 6408045, upload-time = "2025-10-15T18:23:53.177Z" }, + { url = "https://files.pythonhosted.org/packages/35/73/e29aa0c9c666cf787628d3f0dcf379f4791fba79f4936d02f8b37165bdf8/pillow-12.0.0-cp314-cp314t-win_amd64.whl", hash = "sha256:905b0365b210c73afb0ebe9101a32572152dfd1c144c7e28968a331b9217b94a", size = 7148282, upload-time = "2025-10-15T18:23:55.316Z" }, + { url = "https://files.pythonhosted.org/packages/c1/70/6b41bdcddf541b437bbb9f47f94d2db5d9ddef6c37ccab8c9107743748a4/pillow-12.0.0-cp314-cp314t-win_arm64.whl", hash = "sha256:99353a06902c2e43b43e8ff74ee65a7d90307d82370604746738a1e0661ccca7", size = 2525630, upload-time = "2025-10-15T18:23:57.149Z" }, +] + +[[package]] +name = "platformdirs" +version = "4.5.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/cf/86/0248f086a84f01b37aaec0fa567b397df1a119f73c16f6c7a9aac73ea309/platformdirs-4.5.1.tar.gz", hash = "sha256:61d5cdcc6065745cdd94f0f878977f8de9437be93de97c1c12f853c9c0cdcbda", size = 21715, upload-time = "2025-12-05T13:52:58.638Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/cb/28/3bfe2fa5a7b9c46fe7e13c97bda14c895fb10fa2ebf1d0abb90e0cea7ee1/platformdirs-4.5.1-py3-none-any.whl", hash = "sha256:d03afa3963c806a9bed9d5125c8f4cb2fdaf74a55ab60e5d59b3fde758104d31", size = 18731, upload-time = "2025-12-05T13:52:56.823Z" }, +] + +[[package]] +name = "pluggy" +version = "1.6.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/f9/e2/3e91f31a7d2b083fe6ef3fa267035b518369d9511ffab804f839851d2779/pluggy-1.6.0.tar.gz", hash = "sha256:7dcc130b76258d33b90f61b658791dede3486c3e6bfb003ee5c9bfb396dd22f3", size = 69412, upload-time = "2025-05-15T12:30:07.975Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/54/20/4d324d65cc6d9205fabedc306948156824eb9f0ee1633355a8f7ec5c66bf/pluggy-1.6.0-py3-none-any.whl", hash = "sha256:e920276dd6813095e9377c0bc5566d94c932c33b27a3e3945d8389c374dd4746", size = 20538, upload-time = "2025-05-15T12:30:06.134Z" }, +] + +[[package]] +name = "pre-commit" +version = "4.0.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "cfgv" }, + { name = "identify" }, + { name = "nodeenv" }, + { name = "pyyaml" }, + { name = "virtualenv" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/2e/c8/e22c292035f1bac8b9f5237a2622305bc0304e776080b246f3df57c4ff9f/pre_commit-4.0.1.tar.gz", hash = "sha256:80905ac375958c0444c65e9cebebd948b3cdb518f335a091a670a89d652139d2", size = 191678, upload-time = "2024-10-08T16:09:37.641Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/16/8f/496e10d51edd6671ebe0432e33ff800aa86775d2d147ce7d43389324a525/pre_commit-4.0.1-py2.py3-none-any.whl", hash = "sha256:efde913840816312445dc98787724647c65473daefe420785f885e8ed9a06878", size = 218713, upload-time = "2024-10-08T16:09:35.726Z" }, +] + +[[package]] +name = "prompt-toolkit" +version = "3.0.52" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "wcwidth" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/a1/96/06e01a7b38dce6fe1db213e061a4602dd6032a8a97ef6c1a862537732421/prompt_toolkit-3.0.52.tar.gz", hash = "sha256:28cde192929c8e7321de85de1ddbe736f1375148b02f2e17edd840042b1be855", size = 434198, upload-time = "2025-08-27T15:24:02.057Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/84/03/0d3ce49e2505ae70cf43bc5bb3033955d2fc9f932163e84dc0779cc47f48/prompt_toolkit-3.0.52-py3-none-any.whl", hash = "sha256:9aac639a3bbd33284347de5ad8d68ecc044b91a762dc39b7c21095fcd6a19955", size = 391431, upload-time = "2025-08-27T15:23:59.498Z" }, +] + +[[package]] +name = "propcache" +version = "0.4.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/9e/da/e9fc233cf63743258bff22b3dfa7ea5baef7b5bc324af47a0ad89b8ffc6f/propcache-0.4.1.tar.gz", hash = "sha256:f48107a8c637e80362555f37ecf49abe20370e557cc4ab374f04ec4423c97c3d", size = 46442, upload-time = "2025-10-08T19:49:02.291Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/a2/0f/f17b1b2b221d5ca28b4b876e8bb046ac40466513960646bda8e1853cdfa2/propcache-0.4.1-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:e153e9cd40cc8945138822807139367f256f89c6810c2634a4f6902b52d3b4e2", size = 80061, upload-time = "2025-10-08T19:46:46.075Z" }, + { url = "https://files.pythonhosted.org/packages/76/47/8ccf75935f51448ba9a16a71b783eb7ef6b9ee60f5d14c7f8a8a79fbeed7/propcache-0.4.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:cd547953428f7abb73c5ad82cbb32109566204260d98e41e5dfdc682eb7f8403", size = 46037, upload-time = "2025-10-08T19:46:47.23Z" }, + { url = "https://files.pythonhosted.org/packages/0a/b6/5c9a0e42df4d00bfb4a3cbbe5cf9f54260300c88a0e9af1f47ca5ce17ac0/propcache-0.4.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:f048da1b4f243fc44f205dfd320933a951b8d89e0afd4c7cacc762a8b9165207", size = 47324, upload-time = "2025-10-08T19:46:48.384Z" }, + { url = "https://files.pythonhosted.org/packages/9e/d3/6c7ee328b39a81ee877c962469f1e795f9db87f925251efeb0545e0020d0/propcache-0.4.1-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:ec17c65562a827bba85e3872ead335f95405ea1674860d96483a02f5c698fa72", size = 225505, upload-time = "2025-10-08T19:46:50.055Z" }, + { url = "https://files.pythonhosted.org/packages/01/5d/1c53f4563490b1d06a684742cc6076ef944bc6457df6051b7d1a877c057b/propcache-0.4.1-cp312-cp312-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:405aac25c6394ef275dee4c709be43745d36674b223ba4eb7144bf4d691b7367", size = 230242, upload-time = "2025-10-08T19:46:51.815Z" }, + { url = "https://files.pythonhosted.org/packages/20/e1/ce4620633b0e2422207c3cb774a0ee61cac13abc6217763a7b9e2e3f4a12/propcache-0.4.1-cp312-cp312-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:0013cb6f8dde4b2a2f66903b8ba740bdfe378c943c4377a200551ceb27f379e4", size = 238474, upload-time = "2025-10-08T19:46:53.208Z" }, + { url = "https://files.pythonhosted.org/packages/46/4b/3aae6835b8e5f44ea6a68348ad90f78134047b503765087be2f9912140ea/propcache-0.4.1-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:15932ab57837c3368b024473a525e25d316d8353016e7cc0e5ba9eb343fbb1cf", size = 221575, upload-time = "2025-10-08T19:46:54.511Z" }, + { url = "https://files.pythonhosted.org/packages/6e/a5/8a5e8678bcc9d3a1a15b9a29165640d64762d424a16af543f00629c87338/propcache-0.4.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:031dce78b9dc099f4c29785d9cf5577a3faf9ebf74ecbd3c856a7b92768c3df3", size = 216736, upload-time = "2025-10-08T19:46:56.212Z" }, + { url = "https://files.pythonhosted.org/packages/f1/63/b7b215eddeac83ca1c6b934f89d09a625aa9ee4ba158338854c87210cc36/propcache-0.4.1-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:ab08df6c9a035bee56e31af99be621526bd237bea9f32def431c656b29e41778", size = 213019, upload-time = "2025-10-08T19:46:57.595Z" }, + { url = "https://files.pythonhosted.org/packages/57/74/f580099a58c8af587cac7ba19ee7cb418506342fbbe2d4a4401661cca886/propcache-0.4.1-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:4d7af63f9f93fe593afbf104c21b3b15868efb2c21d07d8732c0c4287e66b6a6", size = 220376, upload-time = "2025-10-08T19:46:59.067Z" }, + { url = "https://files.pythonhosted.org/packages/c4/ee/542f1313aff7eaf19c2bb758c5d0560d2683dac001a1c96d0774af799843/propcache-0.4.1-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:cfc27c945f422e8b5071b6e93169679e4eb5bf73bbcbf1ba3ae3a83d2f78ebd9", size = 226988, upload-time = "2025-10-08T19:47:00.544Z" }, + { url = "https://files.pythonhosted.org/packages/8f/18/9c6b015dd9c6930f6ce2229e1f02fb35298b847f2087ea2b436a5bfa7287/propcache-0.4.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:35c3277624a080cc6ec6f847cbbbb5b49affa3598c4535a0a4682a697aaa5c75", size = 215615, upload-time = "2025-10-08T19:47:01.968Z" }, + { url = "https://files.pythonhosted.org/packages/80/9e/e7b85720b98c45a45e1fca6a177024934dc9bc5f4d5dd04207f216fc33ed/propcache-0.4.1-cp312-cp312-win32.whl", hash = "sha256:671538c2262dadb5ba6395e26c1731e1d52534bfe9ae56d0b5573ce539266aa8", size = 38066, upload-time = "2025-10-08T19:47:03.503Z" }, + { url = "https://files.pythonhosted.org/packages/54/09/d19cff2a5aaac632ec8fc03737b223597b1e347416934c1b3a7df079784c/propcache-0.4.1-cp312-cp312-win_amd64.whl", hash = "sha256:cb2d222e72399fcf5890d1d5cc1060857b9b236adff2792ff48ca2dfd46c81db", size = 41655, upload-time = "2025-10-08T19:47:04.973Z" }, + { url = "https://files.pythonhosted.org/packages/68/ab/6b5c191bb5de08036a8c697b265d4ca76148efb10fa162f14af14fb5f076/propcache-0.4.1-cp312-cp312-win_arm64.whl", hash = "sha256:204483131fb222bdaaeeea9f9e6c6ed0cac32731f75dfc1d4a567fc1926477c1", size = 37789, upload-time = "2025-10-08T19:47:06.077Z" }, + { url = "https://files.pythonhosted.org/packages/bf/df/6d9c1b6ac12b003837dde8a10231a7344512186e87b36e855bef32241942/propcache-0.4.1-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:43eedf29202c08550aac1d14e0ee619b0430aaef78f85864c1a892294fbc28cf", size = 77750, upload-time = "2025-10-08T19:47:07.648Z" }, + { url = "https://files.pythonhosted.org/packages/8b/e8/677a0025e8a2acf07d3418a2e7ba529c9c33caf09d3c1f25513023c1db56/propcache-0.4.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:d62cdfcfd89ccb8de04e0eda998535c406bf5e060ffd56be6c586cbcc05b3311", size = 44780, upload-time = "2025-10-08T19:47:08.851Z" }, + { url = "https://files.pythonhosted.org/packages/89/a4/92380f7ca60f99ebae761936bc48a72a639e8a47b29050615eef757cb2a7/propcache-0.4.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:cae65ad55793da34db5f54e4029b89d3b9b9490d8abe1b4c7ab5d4b8ec7ebf74", size = 46308, upload-time = "2025-10-08T19:47:09.982Z" }, + { url = "https://files.pythonhosted.org/packages/2d/48/c5ac64dee5262044348d1d78a5f85dd1a57464a60d30daee946699963eb3/propcache-0.4.1-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:333ddb9031d2704a301ee3e506dc46b1fe5f294ec198ed6435ad5b6a085facfe", size = 208182, upload-time = "2025-10-08T19:47:11.319Z" }, + { url = "https://files.pythonhosted.org/packages/c6/0c/cd762dd011a9287389a6a3eb43aa30207bde253610cca06824aeabfe9653/propcache-0.4.1-cp313-cp313-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:fd0858c20f078a32cf55f7e81473d96dcf3b93fd2ccdb3d40fdf54b8573df3af", size = 211215, upload-time = "2025-10-08T19:47:13.146Z" }, + { url = "https://files.pythonhosted.org/packages/30/3e/49861e90233ba36890ae0ca4c660e95df565b2cd15d4a68556ab5865974e/propcache-0.4.1-cp313-cp313-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:678ae89ebc632c5c204c794f8dab2837c5f159aeb59e6ed0539500400577298c", size = 218112, upload-time = "2025-10-08T19:47:14.913Z" }, + { url = "https://files.pythonhosted.org/packages/f1/8b/544bc867e24e1bd48f3118cecd3b05c694e160a168478fa28770f22fd094/propcache-0.4.1-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:d472aeb4fbf9865e0c6d622d7f4d54a4e101a89715d8904282bb5f9a2f476c3f", size = 204442, upload-time = "2025-10-08T19:47:16.277Z" }, + { url = "https://files.pythonhosted.org/packages/50/a6/4282772fd016a76d3e5c0df58380a5ea64900afd836cec2c2f662d1b9bb3/propcache-0.4.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:4d3df5fa7e36b3225954fba85589da77a0fe6a53e3976de39caf04a0db4c36f1", size = 199398, upload-time = "2025-10-08T19:47:17.962Z" }, + { url = "https://files.pythonhosted.org/packages/3e/ec/d8a7cd406ee1ddb705db2139f8a10a8a427100347bd698e7014351c7af09/propcache-0.4.1-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:ee17f18d2498f2673e432faaa71698032b0127ebf23ae5974eeaf806c279df24", size = 196920, upload-time = "2025-10-08T19:47:19.355Z" }, + { url = "https://files.pythonhosted.org/packages/f6/6c/f38ab64af3764f431e359f8baf9e0a21013e24329e8b85d2da32e8ed07ca/propcache-0.4.1-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:580e97762b950f993ae618e167e7be9256b8353c2dcd8b99ec100eb50f5286aa", size = 203748, upload-time = "2025-10-08T19:47:21.338Z" }, + { url = "https://files.pythonhosted.org/packages/d6/e3/fa846bd70f6534d647886621388f0a265254d30e3ce47e5c8e6e27dbf153/propcache-0.4.1-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:501d20b891688eb8e7aa903021f0b72d5a55db40ffaab27edefd1027caaafa61", size = 205877, upload-time = "2025-10-08T19:47:23.059Z" }, + { url = "https://files.pythonhosted.org/packages/e2/39/8163fc6f3133fea7b5f2827e8eba2029a0277ab2c5beee6c1db7b10fc23d/propcache-0.4.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:9a0bd56e5b100aef69bd8562b74b46254e7c8812918d3baa700c8a8009b0af66", size = 199437, upload-time = "2025-10-08T19:47:24.445Z" }, + { url = "https://files.pythonhosted.org/packages/93/89/caa9089970ca49c7c01662bd0eeedfe85494e863e8043565aeb6472ce8fe/propcache-0.4.1-cp313-cp313-win32.whl", hash = "sha256:bcc9aaa5d80322bc2fb24bb7accb4a30f81e90ab8d6ba187aec0744bc302ad81", size = 37586, upload-time = "2025-10-08T19:47:25.736Z" }, + { url = "https://files.pythonhosted.org/packages/f5/ab/f76ec3c3627c883215b5c8080debb4394ef5a7a29be811f786415fc1e6fd/propcache-0.4.1-cp313-cp313-win_amd64.whl", hash = "sha256:381914df18634f5494334d201e98245c0596067504b9372d8cf93f4bb23e025e", size = 40790, upload-time = "2025-10-08T19:47:26.847Z" }, + { url = "https://files.pythonhosted.org/packages/59/1b/e71ae98235f8e2ba5004d8cb19765a74877abf189bc53fc0c80d799e56c3/propcache-0.4.1-cp313-cp313-win_arm64.whl", hash = "sha256:8873eb4460fd55333ea49b7d189749ecf6e55bf85080f11b1c4530ed3034cba1", size = 37158, upload-time = "2025-10-08T19:47:27.961Z" }, + { url = "https://files.pythonhosted.org/packages/83/ce/a31bbdfc24ee0dcbba458c8175ed26089cf109a55bbe7b7640ed2470cfe9/propcache-0.4.1-cp313-cp313t-macosx_10_13_universal2.whl", hash = "sha256:92d1935ee1f8d7442da9c0c4fa7ac20d07e94064184811b685f5c4fada64553b", size = 81451, upload-time = "2025-10-08T19:47:29.445Z" }, + { url = "https://files.pythonhosted.org/packages/25/9c/442a45a470a68456e710d96cacd3573ef26a1d0a60067e6a7d5e655621ed/propcache-0.4.1-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:473c61b39e1460d386479b9b2f337da492042447c9b685f28be4f74d3529e566", size = 46374, upload-time = "2025-10-08T19:47:30.579Z" }, + { url = "https://files.pythonhosted.org/packages/f4/bf/b1d5e21dbc3b2e889ea4327044fb16312a736d97640fb8b6aa3f9c7b3b65/propcache-0.4.1-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:c0ef0aaafc66fbd87842a3fe3902fd889825646bc21149eafe47be6072725835", size = 48396, upload-time = "2025-10-08T19:47:31.79Z" }, + { url = "https://files.pythonhosted.org/packages/f4/04/5b4c54a103d480e978d3c8a76073502b18db0c4bc17ab91b3cb5092ad949/propcache-0.4.1-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:f95393b4d66bfae908c3ca8d169d5f79cd65636ae15b5e7a4f6e67af675adb0e", size = 275950, upload-time = "2025-10-08T19:47:33.481Z" }, + { url = "https://files.pythonhosted.org/packages/b4/c1/86f846827fb969c4b78b0af79bba1d1ea2156492e1b83dea8b8a6ae27395/propcache-0.4.1-cp313-cp313t-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:c07fda85708bc48578467e85099645167a955ba093be0a2dcba962195676e859", size = 273856, upload-time = "2025-10-08T19:47:34.906Z" }, + { url = "https://files.pythonhosted.org/packages/36/1d/fc272a63c8d3bbad6878c336c7a7dea15e8f2d23a544bda43205dfa83ada/propcache-0.4.1-cp313-cp313t-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:af223b406d6d000830c6f65f1e6431783fc3f713ba3e6cc8c024d5ee96170a4b", size = 280420, upload-time = "2025-10-08T19:47:36.338Z" }, + { url = "https://files.pythonhosted.org/packages/07/0c/01f2219d39f7e53d52e5173bcb09c976609ba30209912a0680adfb8c593a/propcache-0.4.1-cp313-cp313t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:a78372c932c90ee474559c5ddfffd718238e8673c340dc21fe45c5b8b54559a0", size = 263254, upload-time = "2025-10-08T19:47:37.692Z" }, + { url = "https://files.pythonhosted.org/packages/2d/18/cd28081658ce597898f0c4d174d4d0f3c5b6d4dc27ffafeef835c95eb359/propcache-0.4.1-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:564d9f0d4d9509e1a870c920a89b2fec951b44bf5ba7d537a9e7c1ccec2c18af", size = 261205, upload-time = "2025-10-08T19:47:39.659Z" }, + { url = "https://files.pythonhosted.org/packages/7a/71/1f9e22eb8b8316701c2a19fa1f388c8a3185082607da8e406a803c9b954e/propcache-0.4.1-cp313-cp313t-musllinux_1_2_armv7l.whl", hash = "sha256:17612831fda0138059cc5546f4d12a2aacfb9e47068c06af35c400ba58ba7393", size = 247873, upload-time = "2025-10-08T19:47:41.084Z" }, + { url = "https://files.pythonhosted.org/packages/4a/65/3d4b61f36af2b4eddba9def857959f1016a51066b4f1ce348e0cf7881f58/propcache-0.4.1-cp313-cp313t-musllinux_1_2_ppc64le.whl", hash = "sha256:41a89040cb10bd345b3c1a873b2bf36413d48da1def52f268a055f7398514874", size = 262739, upload-time = "2025-10-08T19:47:42.51Z" }, + { url = "https://files.pythonhosted.org/packages/2a/42/26746ab087faa77c1c68079b228810436ccd9a5ce9ac85e2b7307195fd06/propcache-0.4.1-cp313-cp313t-musllinux_1_2_s390x.whl", hash = "sha256:e35b88984e7fa64aacecea39236cee32dd9bd8c55f57ba8a75cf2399553f9bd7", size = 263514, upload-time = "2025-10-08T19:47:43.927Z" }, + { url = "https://files.pythonhosted.org/packages/94/13/630690fe201f5502d2403dd3cfd451ed8858fe3c738ee88d095ad2ff407b/propcache-0.4.1-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:6f8b465489f927b0df505cbe26ffbeed4d6d8a2bbc61ce90eb074ff129ef0ab1", size = 257781, upload-time = "2025-10-08T19:47:45.448Z" }, + { url = "https://files.pythonhosted.org/packages/92/f7/1d4ec5841505f423469efbfc381d64b7b467438cd5a4bbcbb063f3b73d27/propcache-0.4.1-cp313-cp313t-win32.whl", hash = "sha256:2ad890caa1d928c7c2965b48f3a3815c853180831d0e5503d35cf00c472f4717", size = 41396, upload-time = "2025-10-08T19:47:47.202Z" }, + { url = "https://files.pythonhosted.org/packages/48/f0/615c30622316496d2cbbc29f5985f7777d3ada70f23370608c1d3e081c1f/propcache-0.4.1-cp313-cp313t-win_amd64.whl", hash = "sha256:f7ee0e597f495cf415bcbd3da3caa3bd7e816b74d0d52b8145954c5e6fd3ff37", size = 44897, upload-time = "2025-10-08T19:47:48.336Z" }, + { url = "https://files.pythonhosted.org/packages/fd/ca/6002e46eccbe0e33dcd4069ef32f7f1c9e243736e07adca37ae8c4830ec3/propcache-0.4.1-cp313-cp313t-win_arm64.whl", hash = "sha256:929d7cbe1f01bb7baffb33dc14eb5691c95831450a26354cd210a8155170c93a", size = 39789, upload-time = "2025-10-08T19:47:49.876Z" }, + { url = "https://files.pythonhosted.org/packages/8e/5c/bca52d654a896f831b8256683457ceddd490ec18d9ec50e97dfd8fc726a8/propcache-0.4.1-cp314-cp314-macosx_10_13_universal2.whl", hash = "sha256:3f7124c9d820ba5548d431afb4632301acf965db49e666aa21c305cbe8c6de12", size = 78152, upload-time = "2025-10-08T19:47:51.051Z" }, + { url = "https://files.pythonhosted.org/packages/65/9b/03b04e7d82a5f54fb16113d839f5ea1ede58a61e90edf515f6577c66fa8f/propcache-0.4.1-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:c0d4b719b7da33599dfe3b22d3db1ef789210a0597bc650b7cee9c77c2be8c5c", size = 44869, upload-time = "2025-10-08T19:47:52.594Z" }, + { url = "https://files.pythonhosted.org/packages/b2/fa/89a8ef0468d5833a23fff277b143d0573897cf75bd56670a6d28126c7d68/propcache-0.4.1-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:9f302f4783709a78240ebc311b793f123328716a60911d667e0c036bc5dcbded", size = 46596, upload-time = "2025-10-08T19:47:54.073Z" }, + { url = "https://files.pythonhosted.org/packages/86/bd/47816020d337f4a746edc42fe8d53669965138f39ee117414c7d7a340cfe/propcache-0.4.1-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:c80ee5802e3fb9ea37938e7eecc307fb984837091d5fd262bb37238b1ae97641", size = 206981, upload-time = "2025-10-08T19:47:55.715Z" }, + { url = "https://files.pythonhosted.org/packages/df/f6/c5fa1357cc9748510ee55f37173eb31bfde6d94e98ccd9e6f033f2fc06e1/propcache-0.4.1-cp314-cp314-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:ed5a841e8bb29a55fb8159ed526b26adc5bdd7e8bd7bf793ce647cb08656cdf4", size = 211490, upload-time = "2025-10-08T19:47:57.499Z" }, + { url = "https://files.pythonhosted.org/packages/80/1e/e5889652a7c4a3846683401a48f0f2e5083ce0ec1a8a5221d8058fbd1adf/propcache-0.4.1-cp314-cp314-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:55c72fd6ea2da4c318e74ffdf93c4fe4e926051133657459131a95c846d16d44", size = 215371, upload-time = "2025-10-08T19:47:59.317Z" }, + { url = "https://files.pythonhosted.org/packages/b2/f2/889ad4b2408f72fe1a4f6a19491177b30ea7bf1a0fd5f17050ca08cfc882/propcache-0.4.1-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:8326e144341460402713f91df60ade3c999d601e7eb5ff8f6f7862d54de0610d", size = 201424, upload-time = "2025-10-08T19:48:00.67Z" }, + { url = "https://files.pythonhosted.org/packages/27/73/033d63069b57b0812c8bd19f311faebeceb6ba31b8f32b73432d12a0b826/propcache-0.4.1-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:060b16ae65bc098da7f6d25bf359f1f31f688384858204fe5d652979e0015e5b", size = 197566, upload-time = "2025-10-08T19:48:02.604Z" }, + { url = "https://files.pythonhosted.org/packages/dc/89/ce24f3dc182630b4e07aa6d15f0ff4b14ed4b9955fae95a0b54c58d66c05/propcache-0.4.1-cp314-cp314-musllinux_1_2_armv7l.whl", hash = "sha256:89eb3fa9524f7bec9de6e83cf3faed9d79bffa560672c118a96a171a6f55831e", size = 193130, upload-time = "2025-10-08T19:48:04.499Z" }, + { url = "https://files.pythonhosted.org/packages/a9/24/ef0d5fd1a811fb5c609278d0209c9f10c35f20581fcc16f818da959fc5b4/propcache-0.4.1-cp314-cp314-musllinux_1_2_ppc64le.whl", hash = "sha256:dee69d7015dc235f526fe80a9c90d65eb0039103fe565776250881731f06349f", size = 202625, upload-time = "2025-10-08T19:48:06.213Z" }, + { url = "https://files.pythonhosted.org/packages/f5/02/98ec20ff5546f68d673df2f7a69e8c0d076b5abd05ca882dc7ee3a83653d/propcache-0.4.1-cp314-cp314-musllinux_1_2_s390x.whl", hash = "sha256:5558992a00dfd54ccbc64a32726a3357ec93825a418a401f5cc67df0ac5d9e49", size = 204209, upload-time = "2025-10-08T19:48:08.432Z" }, + { url = "https://files.pythonhosted.org/packages/a0/87/492694f76759b15f0467a2a93ab68d32859672b646aa8a04ce4864e7932d/propcache-0.4.1-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:c9b822a577f560fbd9554812526831712c1436d2c046cedee4c3796d3543b144", size = 197797, upload-time = "2025-10-08T19:48:09.968Z" }, + { url = "https://files.pythonhosted.org/packages/ee/36/66367de3575db1d2d3f3d177432bd14ee577a39d3f5d1b3d5df8afe3b6e2/propcache-0.4.1-cp314-cp314-win32.whl", hash = "sha256:ab4c29b49d560fe48b696cdcb127dd36e0bc2472548f3bf56cc5cb3da2b2984f", size = 38140, upload-time = "2025-10-08T19:48:11.232Z" }, + { url = "https://files.pythonhosted.org/packages/0c/2a/a758b47de253636e1b8aef181c0b4f4f204bf0dd964914fb2af90a95b49b/propcache-0.4.1-cp314-cp314-win_amd64.whl", hash = "sha256:5a103c3eb905fcea0ab98be99c3a9a5ab2de60228aa5aceedc614c0281cf6153", size = 41257, upload-time = "2025-10-08T19:48:12.707Z" }, + { url = "https://files.pythonhosted.org/packages/34/5e/63bd5896c3fec12edcbd6f12508d4890d23c265df28c74b175e1ef9f4f3b/propcache-0.4.1-cp314-cp314-win_arm64.whl", hash = "sha256:74c1fb26515153e482e00177a1ad654721bf9207da8a494a0c05e797ad27b992", size = 38097, upload-time = "2025-10-08T19:48:13.923Z" }, + { url = "https://files.pythonhosted.org/packages/99/85/9ff785d787ccf9bbb3f3106f79884a130951436f58392000231b4c737c80/propcache-0.4.1-cp314-cp314t-macosx_10_13_universal2.whl", hash = "sha256:824e908bce90fb2743bd6b59db36eb4f45cd350a39637c9f73b1c1ea66f5b75f", size = 81455, upload-time = "2025-10-08T19:48:15.16Z" }, + { url = "https://files.pythonhosted.org/packages/90/85/2431c10c8e7ddb1445c1f7c4b54d886e8ad20e3c6307e7218f05922cad67/propcache-0.4.1-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:c2b5e7db5328427c57c8e8831abda175421b709672f6cfc3d630c3b7e2146393", size = 46372, upload-time = "2025-10-08T19:48:16.424Z" }, + { url = "https://files.pythonhosted.org/packages/01/20/b0972d902472da9bcb683fa595099911f4d2e86e5683bcc45de60dd05dc3/propcache-0.4.1-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:6f6ff873ed40292cd4969ef5310179afd5db59fdf055897e282485043fc80ad0", size = 48411, upload-time = "2025-10-08T19:48:17.577Z" }, + { url = "https://files.pythonhosted.org/packages/e2/e3/7dc89f4f21e8f99bad3d5ddb3a3389afcf9da4ac69e3deb2dcdc96e74169/propcache-0.4.1-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:49a2dc67c154db2c1463013594c458881a069fcf98940e61a0569016a583020a", size = 275712, upload-time = "2025-10-08T19:48:18.901Z" }, + { url = "https://files.pythonhosted.org/packages/20/67/89800c8352489b21a8047c773067644e3897f02ecbbd610f4d46b7f08612/propcache-0.4.1-cp314-cp314t-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:005f08e6a0529984491e37d8dbc3dd86f84bd78a8ceb5fa9a021f4c48d4984be", size = 273557, upload-time = "2025-10-08T19:48:20.762Z" }, + { url = "https://files.pythonhosted.org/packages/e2/a1/b52b055c766a54ce6d9c16d9aca0cad8059acd9637cdf8aa0222f4a026ef/propcache-0.4.1-cp314-cp314t-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:5c3310452e0d31390da9035c348633b43d7e7feb2e37be252be6da45abd1abcc", size = 280015, upload-time = "2025-10-08T19:48:22.592Z" }, + { url = "https://files.pythonhosted.org/packages/48/c8/33cee30bd890672c63743049f3c9e4be087e6780906bfc3ec58528be59c1/propcache-0.4.1-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:4c3c70630930447f9ef1caac7728c8ad1c56bc5015338b20fed0d08ea2480b3a", size = 262880, upload-time = "2025-10-08T19:48:23.947Z" }, + { url = "https://files.pythonhosted.org/packages/0c/b1/8f08a143b204b418285c88b83d00edbd61afbc2c6415ffafc8905da7038b/propcache-0.4.1-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:8e57061305815dfc910a3634dcf584f08168a8836e6999983569f51a8544cd89", size = 260938, upload-time = "2025-10-08T19:48:25.656Z" }, + { url = "https://files.pythonhosted.org/packages/cf/12/96e4664c82ca2f31e1c8dff86afb867348979eb78d3cb8546a680287a1e9/propcache-0.4.1-cp314-cp314t-musllinux_1_2_armv7l.whl", hash = "sha256:521a463429ef54143092c11a77e04056dd00636f72e8c45b70aaa3140d639726", size = 247641, upload-time = "2025-10-08T19:48:27.207Z" }, + { url = "https://files.pythonhosted.org/packages/18/ed/e7a9cfca28133386ba52278136d42209d3125db08d0a6395f0cba0c0285c/propcache-0.4.1-cp314-cp314t-musllinux_1_2_ppc64le.whl", hash = "sha256:120c964da3fdc75e3731aa392527136d4ad35868cc556fd09bb6d09172d9a367", size = 262510, upload-time = "2025-10-08T19:48:28.65Z" }, + { url = "https://files.pythonhosted.org/packages/f5/76/16d8bf65e8845dd62b4e2b57444ab81f07f40caa5652b8969b87ddcf2ef6/propcache-0.4.1-cp314-cp314t-musllinux_1_2_s390x.whl", hash = "sha256:d8f353eb14ee3441ee844ade4277d560cdd68288838673273b978e3d6d2c8f36", size = 263161, upload-time = "2025-10-08T19:48:30.133Z" }, + { url = "https://files.pythonhosted.org/packages/e7/70/c99e9edb5d91d5ad8a49fa3c1e8285ba64f1476782fed10ab251ff413ba1/propcache-0.4.1-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:ab2943be7c652f09638800905ee1bab2c544e537edb57d527997a24c13dc1455", size = 257393, upload-time = "2025-10-08T19:48:31.567Z" }, + { url = "https://files.pythonhosted.org/packages/08/02/87b25304249a35c0915d236575bc3574a323f60b47939a2262b77632a3ee/propcache-0.4.1-cp314-cp314t-win32.whl", hash = "sha256:05674a162469f31358c30bcaa8883cb7829fa3110bf9c0991fe27d7896c42d85", size = 42546, upload-time = "2025-10-08T19:48:32.872Z" }, + { url = "https://files.pythonhosted.org/packages/cb/ef/3c6ecf8b317aa982f309835e8f96987466123c6e596646d4e6a1dfcd080f/propcache-0.4.1-cp314-cp314t-win_amd64.whl", hash = "sha256:990f6b3e2a27d683cb7602ed6c86f15ee6b43b1194736f9baaeb93d0016633b1", size = 46259, upload-time = "2025-10-08T19:48:34.226Z" }, + { url = "https://files.pythonhosted.org/packages/c4/2d/346e946d4951f37eca1e4f55be0f0174c52cd70720f84029b02f296f4a38/propcache-0.4.1-cp314-cp314t-win_arm64.whl", hash = "sha256:ecef2343af4cc68e05131e45024ba34f6095821988a9d0a02aa7c73fcc448aa9", size = 40428, upload-time = "2025-10-08T19:48:35.441Z" }, + { url = "https://files.pythonhosted.org/packages/5b/5a/bc7b4a4ef808fa59a816c17b20c4bef6884daebbdf627ff2a161da67da19/propcache-0.4.1-py3-none-any.whl", hash = "sha256:af2a6052aeb6cf17d3e46ee169099044fd8224cbaf75c76a2ef596e8163e2237", size = 13305, upload-time = "2025-10-08T19:49:00.792Z" }, +] + +[[package]] +name = "protobuf" +version = "6.33.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/34/44/e49ecff446afeec9d1a66d6bbf9adc21e3c7cea7803a920ca3773379d4f6/protobuf-6.33.2.tar.gz", hash = "sha256:56dc370c91fbb8ac85bc13582c9e373569668a290aa2e66a590c2a0d35ddb9e4", size = 444296, upload-time = "2025-12-06T00:17:53.311Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/bc/91/1e3a34881a88697a7354ffd177e8746e97a722e5e8db101544b47e84afb1/protobuf-6.33.2-cp310-abi3-win32.whl", hash = "sha256:87eb388bd2d0f78febd8f4c8779c79247b26a5befad525008e49a6955787ff3d", size = 425603, upload-time = "2025-12-06T00:17:41.114Z" }, + { url = "https://files.pythonhosted.org/packages/64/20/4d50191997e917ae13ad0a235c8b42d8c1ab9c3e6fd455ca16d416944355/protobuf-6.33.2-cp310-abi3-win_amd64.whl", hash = "sha256:fc2a0e8b05b180e5fc0dd1559fe8ebdae21a27e81ac77728fb6c42b12c7419b4", size = 436930, upload-time = "2025-12-06T00:17:43.278Z" }, + { url = "https://files.pythonhosted.org/packages/b2/ca/7e485da88ba45c920fb3f50ae78de29ab925d9e54ef0de678306abfbb497/protobuf-6.33.2-cp39-abi3-macosx_10_9_universal2.whl", hash = "sha256:d9b19771ca75935b3a4422957bc518b0cecb978b31d1dd12037b088f6bcc0e43", size = 427621, upload-time = "2025-12-06T00:17:44.445Z" }, + { url = "https://files.pythonhosted.org/packages/7d/4f/f743761e41d3b2b2566748eb76bbff2b43e14d5fcab694f494a16458b05f/protobuf-6.33.2-cp39-abi3-manylinux2014_aarch64.whl", hash = "sha256:b5d3b5625192214066d99b2b605f5783483575656784de223f00a8d00754fc0e", size = 324460, upload-time = "2025-12-06T00:17:45.678Z" }, + { url = "https://files.pythonhosted.org/packages/b1/fa/26468d00a92824020f6f2090d827078c09c9c587e34cbfd2d0c7911221f8/protobuf-6.33.2-cp39-abi3-manylinux2014_s390x.whl", hash = "sha256:8cd7640aee0b7828b6d03ae518b5b4806fdfc1afe8de82f79c3454f8aef29872", size = 339168, upload-time = "2025-12-06T00:17:46.813Z" }, + { url = "https://files.pythonhosted.org/packages/56/13/333b8f421738f149d4fe5e49553bc2a2ab75235486259f689b4b91f96cec/protobuf-6.33.2-cp39-abi3-manylinux2014_x86_64.whl", hash = "sha256:1f8017c48c07ec5859106533b682260ba3d7c5567b1ca1f24297ce03384d1b4f", size = 323270, upload-time = "2025-12-06T00:17:48.253Z" }, + { url = "https://files.pythonhosted.org/packages/0e/15/4f02896cc3df04fc465010a4c6a0cd89810f54617a32a70ef531ed75d61c/protobuf-6.33.2-py3-none-any.whl", hash = "sha256:7636aad9bb01768870266de5dc009de2d1b936771b38a793f73cbbf279c91c5c", size = 170501, upload-time = "2025-12-06T00:17:52.211Z" }, +] + +[[package]] +name = "pycares" +version = "4.11.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "cffi" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/8d/ad/9d1e96486d2eb5a2672c4d9a2dd372d015b8d7a332c6ac2722c4c8e6bbbf/pycares-4.11.0.tar.gz", hash = "sha256:c863d9003ca0ce7df26429007859afd2a621d3276ed9fef154a9123db9252557", size = 654473, upload-time = "2025-09-09T15:18:21.849Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/e2/4e/4821b66feefaaa8ec03494c1a11614c430983572e54ff062b4589441e199/pycares-4.11.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:b93d624560ba52287873bacff70b42c99943821ecbc810b959b0953560f53c36", size = 145906, upload-time = "2025-09-09T15:16:53.204Z" }, + { url = "https://files.pythonhosted.org/packages/e8/81/93a505dcbb7533254b0ce1da519591dcda889d6a66dcdfa5737e3280e18a/pycares-4.11.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:775d99966e28c8abd9910ddef2de0f1e173afc5a11cea9f184613c747373ab80", size = 141972, upload-time = "2025-09-09T15:16:54.43Z" }, + { url = "https://files.pythonhosted.org/packages/7d/d6/76994c8b21316e48ea6c3ce3298574c28f90c9c41428a3349a57104621c9/pycares-4.11.0-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:84fde689557361764f052850a2d68916050adbfd9321f6105aca1d8f1a9bd49b", size = 637832, upload-time = "2025-09-09T15:16:55.523Z" }, + { url = "https://files.pythonhosted.org/packages/bb/a4/5ca7e316d0edb714d78974cb34f4883f63fe9f580644c2db99fb62b05f56/pycares-4.11.0-cp312-cp312-manylinux_2_28_ppc64le.whl", hash = "sha256:30ceed06f3bf5eff865a34d21562c25a7f3dad0ed336b9dd415330e03a6c50c4", size = 687751, upload-time = "2025-09-09T15:16:57.55Z" }, + { url = "https://files.pythonhosted.org/packages/cb/8d/c5c578fdd335d7b1dcaea88fae3497390095b5b05a1ba34a29f62d037abb/pycares-4.11.0-cp312-cp312-manylinux_2_28_s390x.whl", hash = "sha256:97d971b3a88a803bb95ff8a40ea4d68da59319eb8b59e924e318e2560af8c16d", size = 678362, upload-time = "2025-09-09T15:16:58.859Z" }, + { url = "https://files.pythonhosted.org/packages/b9/96/9be4d838a9348dd2e72a90c34d186b918b66d499af5be79afa18a6ba2808/pycares-4.11.0-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:2d5cac829da91ade70ce1af97dad448c6cd4778b48facbce1b015e16ced93642", size = 641069, upload-time = "2025-09-09T15:17:00.046Z" }, + { url = "https://files.pythonhosted.org/packages/39/d6/8ea9b5dcef6b566cde034aa2b68743f7b0a19fa0fba9ea01a4f98b8a57fb/pycares-4.11.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:ee1ea367835eb441d246164c09d1f9703197af4425fc6865cefcde9e2ca81f85", size = 622357, upload-time = "2025-09-09T15:17:01.205Z" }, + { url = "https://files.pythonhosted.org/packages/07/f8/3401e89b5d2970e30e02f9beb29ad59e2a8f19ef2c68c978de2b764cacb0/pycares-4.11.0-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:3139ec1f4450a4b253386035c5ecd2722582ae3320a456df5021ffe3f174260a", size = 670290, upload-time = "2025-09-09T15:17:02.413Z" }, + { url = "https://files.pythonhosted.org/packages/a2/c4/ff6a166e1d1d1987339548a19d0b1d52ec3ead8b3a8a2247a0d96e56013c/pycares-4.11.0-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:5d70324ca1d82c6c4b00aa678347f7560d1ef2ce1d181978903459a97751543a", size = 652958, upload-time = "2025-09-09T15:17:04.203Z" }, + { url = "https://files.pythonhosted.org/packages/b8/7c/fc084b395921c9b862d31a83f809fe649c24314b51b527ad0ab0df33edd4/pycares-4.11.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:e2f8d9cfe0eb3a2997fde5df99b1aaea5a46dabfcfcac97b2d05f027c2cd5e28", size = 629239, upload-time = "2025-09-09T15:17:05.477Z" }, + { url = "https://files.pythonhosted.org/packages/b0/7f/2f26062bea95ab657f979217d50df563dc9fd9cc4c5dd21a6e7323e9efe7/pycares-4.11.0-cp312-cp312-win32.whl", hash = "sha256:1571a7055c03a95d5270c914034eac7f8bfa1b432fc1de53d871b821752191a4", size = 118918, upload-time = "2025-09-09T15:17:06.882Z" }, + { url = "https://files.pythonhosted.org/packages/a5/86/277473d20f3df4e00fa7e0ebb21955b2830b15247462aaf8f3fc8c4950be/pycares-4.11.0-cp312-cp312-win_amd64.whl", hash = "sha256:7570e0b50db619b2ee370461c462617225dc3a3f63f975c6f117e2f0c94f82ca", size = 144560, upload-time = "2025-09-09T15:17:07.891Z" }, + { url = "https://files.pythonhosted.org/packages/f0/f9/d65ad17ec921d8b7eb42161dec2024ee2f5c9f1c44cabf0dd1b7f4fac6c5/pycares-4.11.0-cp312-cp312-win_arm64.whl", hash = "sha256:f199702740f3b766ed8c70efb885538be76cb48cd0cb596b948626f0b825e07a", size = 115695, upload-time = "2025-09-09T15:17:09.333Z" }, + { url = "https://files.pythonhosted.org/packages/dc/a9/62fea7ad72ac1fed2ac9dd8e9a7379b7eb0288bf2b3ea5731642c3a6f7de/pycares-4.11.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:2c296ab94d1974f8d2f76c499755a9ce31ffd4986e8898ef19b90e32525f7d84", size = 145909, upload-time = "2025-09-09T15:17:10.491Z" }, + { url = "https://files.pythonhosted.org/packages/f4/ac/0317d6d0d3bd7599c53b8f1db09ad04260647d2f6842018e322584791fd5/pycares-4.11.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:e0fcd3a8bac57a0987d9b09953ba0f8703eb9dca7c77f7051d8c2ed001185be8", size = 141974, upload-time = "2025-09-09T15:17:11.634Z" }, + { url = "https://files.pythonhosted.org/packages/63/11/731b565ae1e81c43dac247a248ee204628186f6df97c9927bd06c62237f8/pycares-4.11.0-cp313-cp313-manylinux_2_28_aarch64.whl", hash = "sha256:bac55842047567ddae177fb8189b89a60633ac956d5d37260f7f71b517fd8b87", size = 637796, upload-time = "2025-09-09T15:17:12.815Z" }, + { url = "https://files.pythonhosted.org/packages/f5/30/a2631fe2ffaa85475cdbff7df1d9376bc0b2a6ae77ca55d53233c937a5da/pycares-4.11.0-cp313-cp313-manylinux_2_28_ppc64le.whl", hash = "sha256:4da2e805ed8c789b9444ef4053f6ef8040cd13b0c1ca6d3c4fe6f9369c458cb4", size = 687734, upload-time = "2025-09-09T15:17:14.015Z" }, + { url = "https://files.pythonhosted.org/packages/a9/b7/b3a5f99d4ab776662e71d5a56e8f6ea10741230ff988d1f502a8d429236b/pycares-4.11.0-cp313-cp313-manylinux_2_28_s390x.whl", hash = "sha256:ea785d1f232b42b325578f0c8a2fa348192e182cc84a1e862896076a4a2ba2a7", size = 678320, upload-time = "2025-09-09T15:17:15.442Z" }, + { url = "https://files.pythonhosted.org/packages/ea/77/a00d962b90432993afbf3bd05da8fe42117e0d9037cd7fd428dc41094d7b/pycares-4.11.0-cp313-cp313-manylinux_2_28_x86_64.whl", hash = "sha256:aa160dc9e785212c49c12bb891e242c949758b99542946cc8e2098ef391f93b0", size = 641012, upload-time = "2025-09-09T15:17:16.728Z" }, + { url = "https://files.pythonhosted.org/packages/c6/fb/9266979ba59d37deee1fd74452b2ae32a7395acafe1bee510ac023c6c9a5/pycares-4.11.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:7830709c23bbc43fbaefbb3dde57bdd295dc86732504b9d2e65044df8fd5e9fb", size = 622363, upload-time = "2025-09-09T15:17:17.835Z" }, + { url = "https://files.pythonhosted.org/packages/91/c2/16dbc3dc33781a3c79cbdd76dd1cda808d98ba078d9a63a725d6a1fad181/pycares-4.11.0-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:3ef1ab7abbd238bb2dbbe871c3ea39f5a7fc63547c015820c1e24d0d494a1689", size = 670294, upload-time = "2025-09-09T15:17:19.214Z" }, + { url = "https://files.pythonhosted.org/packages/ff/75/f003905e55298a6dd5e0673a2dc11e31518a5141393b925dc05fcaba9fb4/pycares-4.11.0-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:a4060d8556c908660512d42df1f4a874e4e91b81f79e3a9090afedc7690ea5ba", size = 652973, upload-time = "2025-09-09T15:17:20.388Z" }, + { url = "https://files.pythonhosted.org/packages/55/2a/eafb235c371979e11f8998d686cbaa91df6a84a34ffe4d997dfe57c45445/pycares-4.11.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:a98fac4a3d4f780817016b6f00a8a2c2f41df5d25dfa8e5b1aa0d783645a6566", size = 629235, upload-time = "2025-09-09T15:17:21.92Z" }, + { url = "https://files.pythonhosted.org/packages/05/99/60f19eb1c8eb898882dd8875ea51ad0aac3aff5780b27247969e637cc26a/pycares-4.11.0-cp313-cp313-win32.whl", hash = "sha256:faa8321bc2a366189dcf87b3823e030edf5ac97a6b9a7fc99f1926c4bf8ef28e", size = 118918, upload-time = "2025-09-09T15:17:23.327Z" }, + { url = "https://files.pythonhosted.org/packages/2a/14/bc89ad7225cba73068688397de09d7cad657d67b93641c14e5e18b88e685/pycares-4.11.0-cp313-cp313-win_amd64.whl", hash = "sha256:6f74b1d944a50fa12c5006fd10b45e1a45da0c5d15570919ce48be88e428264c", size = 144556, upload-time = "2025-09-09T15:17:24.341Z" }, + { url = "https://files.pythonhosted.org/packages/af/88/4309576bd74b5e6fc1f39b9bc5e4b578df2cadb16bdc026ac0cc15663763/pycares-4.11.0-cp313-cp313-win_arm64.whl", hash = "sha256:4b6f7581793d8bb3014028b8397f6f80b99db8842da58f4409839c29b16397ad", size = 115692, upload-time = "2025-09-09T15:17:25.637Z" }, + { url = "https://files.pythonhosted.org/packages/2a/70/a723bc79bdcac60361b40184b649282ac0ab433b90e9cc0975370c2ff9c9/pycares-4.11.0-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:df0a17f4e677d57bca3624752bbb515316522ad1ce0de07ed9d920e6c4ee5d35", size = 145910, upload-time = "2025-09-09T15:17:26.774Z" }, + { url = "https://files.pythonhosted.org/packages/d5/4e/46311ef5a384b5f0bb206851135dde8f86b3def38fdbee9e3c03475d35ae/pycares-4.11.0-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:3b44e54cad31d3c3be5e8149ac36bc1c163ec86e0664293402f6f846fb22ad00", size = 142053, upload-time = "2025-09-09T15:17:27.956Z" }, + { url = "https://files.pythonhosted.org/packages/74/23/d236fc4f134d6311e4ad6445571e8285e84a3e155be36422ff20c0fbe471/pycares-4.11.0-cp314-cp314-manylinux_2_28_aarch64.whl", hash = "sha256:80752133442dc7e6dd9410cec227c49f69283c038c316a8585cca05ec32c2766", size = 637878, upload-time = "2025-09-09T15:17:29.173Z" }, + { url = "https://files.pythonhosted.org/packages/f7/92/6edd41282b3f0e3d9defaba7b05c39730d51c37c165d9d3b319349c975aa/pycares-4.11.0-cp314-cp314-manylinux_2_28_ppc64le.whl", hash = "sha256:84b0b402dd333403fdce0e204aef1ef834d839c439c0c1aa143dc7d1237bb197", size = 687865, upload-time = "2025-09-09T15:17:30.549Z" }, + { url = "https://files.pythonhosted.org/packages/a7/a9/4d7cf4d72600fd47d9518f9ce99703a3e8711fb08d2ef63d198056cdc9a9/pycares-4.11.0-cp314-cp314-manylinux_2_28_s390x.whl", hash = "sha256:c0eec184df42fc82e43197e073f9cc8f93b25ad2f11f230c64c2dc1c80dbc078", size = 678396, upload-time = "2025-09-09T15:17:32.304Z" }, + { url = "https://files.pythonhosted.org/packages/0b/4b/e546eeb1d8ff6559e2e3bef31a6ea0c6e57ec826191941f83a3ce900ca89/pycares-4.11.0-cp314-cp314-manylinux_2_28_x86_64.whl", hash = "sha256:ee751409322ff10709ee867d5aea1dc8431eec7f34835f0f67afd016178da134", size = 640786, upload-time = "2025-09-09T15:17:33.602Z" }, + { url = "https://files.pythonhosted.org/packages/0e/f5/b4572d9ee9c26de1f8d1dc80730df756276b9243a6794fa3101bbe56613d/pycares-4.11.0-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:1732db81e348bfce19c9bf9448ba660aea03042eeeea282824da1604a5bd4dcf", size = 621857, upload-time = "2025-09-09T15:17:34.74Z" }, + { url = "https://files.pythonhosted.org/packages/17/f2/639090376198bcaeff86562b25e1bce05a481cfb1e605f82ce62285230cd/pycares-4.11.0-cp314-cp314-musllinux_1_2_ppc64le.whl", hash = "sha256:702d21823996f139874aba5aa9bb786d69e93bde6e3915b99832eb4e335d31ae", size = 670130, upload-time = "2025-09-09T15:17:35.982Z" }, + { url = "https://files.pythonhosted.org/packages/3a/c4/cf40773cd9c36a12cebbe1e9b6fb120f9160dc9bfe0398d81a20b6c69972/pycares-4.11.0-cp314-cp314-musllinux_1_2_s390x.whl", hash = "sha256:218619b912cef7c64a339ab0e231daea10c994a05699740714dff8c428b9694a", size = 653133, upload-time = "2025-09-09T15:17:37.179Z" }, + { url = "https://files.pythonhosted.org/packages/32/6b/06054d977b0a9643821043b59f523f3db5e7684c4b1b4f5821994d5fa780/pycares-4.11.0-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:719f7ddff024fdacde97b926b4b26d0cc25901d5ef68bb994a581c420069936d", size = 629344, upload-time = "2025-09-09T15:17:38.308Z" }, + { url = "https://files.pythonhosted.org/packages/d6/6f/14bb0c2171a286d512e3f02d6168e608ffe5f6eceab78bf63e3073091ae3/pycares-4.11.0-cp314-cp314-win32.whl", hash = "sha256:d552fb2cb513ce910d1dc22dbba6420758a991a356f3cd1b7ec73a9e31f94d01", size = 121804, upload-time = "2025-09-09T15:17:39.388Z" }, + { url = "https://files.pythonhosted.org/packages/24/dc/6822f9ad6941027f70e1cf161d8631456531a87061588ed3b1dcad07d49d/pycares-4.11.0-cp314-cp314-win_amd64.whl", hash = "sha256:23d50a0842e8dbdddf870a7218a7ab5053b68892706b3a391ecb3d657424d266", size = 148005, upload-time = "2025-09-09T15:17:40.44Z" }, + { url = "https://files.pythonhosted.org/packages/ea/24/24ff3a80aa8471fbb62785c821a8e90f397ca842e0489f83ebf7ee274397/pycares-4.11.0-cp314-cp314-win_arm64.whl", hash = "sha256:836725754c32363d2c5d15b931b3ebd46b20185c02e850672cb6c5f0452c1e80", size = 119239, upload-time = "2025-09-09T15:17:42.094Z" }, + { url = "https://files.pythonhosted.org/packages/54/fe/2f3558d298ff8db31d5c83369001ab72af3b86a0374d9b0d40dc63314187/pycares-4.11.0-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:c9d839b5700542b27c1a0d359cbfad6496341e7c819c7fea63db9588857065ed", size = 146408, upload-time = "2025-09-09T15:17:43.74Z" }, + { url = "https://files.pythonhosted.org/packages/3c/c8/516901e46a1a73b3a75e87a35f3a3a4fe085f1214f37d954c9d7e782bd6d/pycares-4.11.0-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:31b85ad00422b38f426e5733a71dfb7ee7eb65a99ea328c508d4f552b1760dc8", size = 142371, upload-time = "2025-09-09T15:17:45.186Z" }, + { url = "https://files.pythonhosted.org/packages/ac/99/c3fba0aa575f331ebed91f87ba960ffbe0849211cdf103ab275bc0107ac6/pycares-4.11.0-cp314-cp314t-manylinux_2_28_aarch64.whl", hash = "sha256:cdac992206756b024b371760c55719eb5cd9d6b2cb25a8d5a04ae1b0ff426232", size = 647504, upload-time = "2025-09-09T15:17:46.503Z" }, + { url = "https://files.pythonhosted.org/packages/5c/e4/1cdc3ec9c92f8069ec18c58b016b2df7c44a088e2849f37ed457554961aa/pycares-4.11.0-cp314-cp314t-manylinux_2_28_ppc64le.whl", hash = "sha256:ffb22cee640bc12ee0e654eba74ecfb59e2e0aebc5bccc3cc7ef92f487008af7", size = 697122, upload-time = "2025-09-09T15:17:47.772Z" }, + { url = "https://files.pythonhosted.org/packages/9c/d5/bd8f370b97bb73e5bdd55dc2a78e18d6f49181cf77e88af0599d16f5c073/pycares-4.11.0-cp314-cp314t-manylinux_2_28_s390x.whl", hash = "sha256:00538826d2eaf4a0e4becb0753b0ac8d652334603c445c9566c9eb273657eb4c", size = 687543, upload-time = "2025-09-09T15:17:49.183Z" }, + { url = "https://files.pythonhosted.org/packages/33/38/49b77b9cf5dffc0b1fdd86656975c3bc1a58b79bdc883a9ef749b17a013c/pycares-4.11.0-cp314-cp314t-manylinux_2_28_x86_64.whl", hash = "sha256:29daa36548c04cdcd1a78ae187a4b7b003f0b357a2f4f1f98f9863373eedc759", size = 649565, upload-time = "2025-09-09T15:17:51.03Z" }, + { url = "https://files.pythonhosted.org/packages/3c/23/f6d57bfb99d00a6a7363f95c8d3a930fe82a868d9de24c64c8048d66f16a/pycares-4.11.0-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:cf306f3951740d7bed36149a6d8d656a7d5432dd4bbc6af3bb6554361fc87401", size = 631242, upload-time = "2025-09-09T15:17:52.298Z" }, + { url = "https://files.pythonhosted.org/packages/33/a2/7b9121c71cfe06a8474e221593f83a78176fae3b79e5853d2dfd13ab01cc/pycares-4.11.0-cp314-cp314t-musllinux_1_2_ppc64le.whl", hash = "sha256:386da2581db4ea2832629e275c061103b0be32f9391c5dfaea7f6040951950ad", size = 680304, upload-time = "2025-09-09T15:17:53.638Z" }, + { url = "https://files.pythonhosted.org/packages/5b/07/dfe76807f637d8b80e1a59dfc4a1bceabdd0205a45b2ebf78b415ae72af3/pycares-4.11.0-cp314-cp314t-musllinux_1_2_s390x.whl", hash = "sha256:45d3254a694459fdb0640ef08724ca9d4b4f6ff6d7161c9b526d7d2e2111379e", size = 661039, upload-time = "2025-09-09T15:17:55.024Z" }, + { url = "https://files.pythonhosted.org/packages/b2/9b/55d50c5acd46cbe95d0da27740a83e721d89c0ce7e42bff9891a9f29a855/pycares-4.11.0-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:eddf5e520bb88b23b04ac1f28f5e9a7c77c718b8b4af3a4a7a2cc4a600f34502", size = 637560, upload-time = "2025-09-09T15:17:56.492Z" }, + { url = "https://files.pythonhosted.org/packages/1f/79/2b2e723d1b929dbe7f99e80a56abb29a4f86988c1f73195d960d706b1629/pycares-4.11.0-cp314-cp314t-win32.whl", hash = "sha256:8a75a406432ce39ce0ca41edff7486df6c970eb0fe5cfbe292f195a6b8654461", size = 122235, upload-time = "2025-09-09T15:17:57.576Z" }, + { url = "https://files.pythonhosted.org/packages/93/fe/bf3b3ed9345a38092e72cd9890a5df5c2349fc27846a714d823a41f0ee27/pycares-4.11.0-cp314-cp314t-win_amd64.whl", hash = "sha256:3784b80d797bcc2ff2bf3d4b27f46d8516fe1707ff3b82c2580dc977537387f9", size = 148575, upload-time = "2025-09-09T15:17:58.699Z" }, + { url = "https://files.pythonhosted.org/packages/ce/20/c0c5cfcf89725fe533b27bc5f714dc4efa8e782bf697c36f9ddf04ba975d/pycares-4.11.0-cp314-cp314t-win_arm64.whl", hash = "sha256:afc6503adf8b35c21183b9387be64ca6810644ef54c9ef6c99d1d5635c01601b", size = 119690, upload-time = "2025-09-09T15:17:59.809Z" }, +] + +[[package]] +name = "pycparser" +version = "2.23" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/fe/cf/d2d3b9f5699fb1e4615c8e32ff220203e43b248e1dfcc6736ad9057731ca/pycparser-2.23.tar.gz", hash = "sha256:78816d4f24add8f10a06d6f05b4d424ad9e96cfebf68a4ddc99c65c0720d00c2", size = 173734, upload-time = "2025-09-09T13:23:47.91Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/a0/e3/59cd50310fc9b59512193629e1984c1f95e5c8ae6e5d8c69532ccc65a7fe/pycparser-2.23-py3-none-any.whl", hash = "sha256:e5c6e8d3fbad53479cab09ac03729e0a9faf2bee3db8208a550daf5af81a5934", size = 118140, upload-time = "2025-09-09T13:23:46.651Z" }, +] + +[[package]] +name = "pydantic" +version = "2.12.5" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "annotated-types" }, + { name = "pydantic-core" }, + { name = "typing-extensions" }, + { name = "typing-inspection" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/69/44/36f1a6e523abc58ae5f928898e4aca2e0ea509b5aa6f6f392a5d882be928/pydantic-2.12.5.tar.gz", hash = "sha256:4d351024c75c0f085a9febbb665ce8c0c6ec5d30e903bdb6394b7ede26aebb49", size = 821591, upload-time = "2025-11-26T15:11:46.471Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/5a/87/b70ad306ebb6f9b585f114d0ac2137d792b48be34d732d60e597c2f8465a/pydantic-2.12.5-py3-none-any.whl", hash = "sha256:e561593fccf61e8a20fc46dfc2dfe075b8be7d0188df33f221ad1f0139180f9d", size = 463580, upload-time = "2025-11-26T15:11:44.605Z" }, +] + +[[package]] +name = "pydantic-core" +version = "2.41.5" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/71/70/23b021c950c2addd24ec408e9ab05d59b035b39d97cdc1130e1bce647bb6/pydantic_core-2.41.5.tar.gz", hash = "sha256:08daa51ea16ad373ffd5e7606252cc32f07bc72b28284b6bc9c6df804816476e", size = 460952, upload-time = "2025-11-04T13:43:49.098Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/5f/5d/5f6c63eebb5afee93bcaae4ce9a898f3373ca23df3ccaef086d0233a35a7/pydantic_core-2.41.5-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:f41a7489d32336dbf2199c8c0a215390a751c5b014c2c1c5366e817202e9cdf7", size = 2110990, upload-time = "2025-11-04T13:39:58.079Z" }, + { url = "https://files.pythonhosted.org/packages/aa/32/9c2e8ccb57c01111e0fd091f236c7b371c1bccea0fa85247ac55b1e2b6b6/pydantic_core-2.41.5-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:070259a8818988b9a84a449a2a7337c7f430a22acc0859c6b110aa7212a6d9c0", size = 1896003, upload-time = "2025-11-04T13:39:59.956Z" }, + { url = "https://files.pythonhosted.org/packages/68/b8/a01b53cb0e59139fbc9e4fda3e9724ede8de279097179be4ff31f1abb65a/pydantic_core-2.41.5-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e96cea19e34778f8d59fe40775a7a574d95816eb150850a85a7a4c8f4b94ac69", size = 1919200, upload-time = "2025-11-04T13:40:02.241Z" }, + { url = "https://files.pythonhosted.org/packages/38/de/8c36b5198a29bdaade07b5985e80a233a5ac27137846f3bc2d3b40a47360/pydantic_core-2.41.5-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ed2e99c456e3fadd05c991f8f437ef902e00eedf34320ba2b0842bd1c3ca3a75", size = 2052578, upload-time = "2025-11-04T13:40:04.401Z" }, + { url = "https://files.pythonhosted.org/packages/00/b5/0e8e4b5b081eac6cb3dbb7e60a65907549a1ce035a724368c330112adfdd/pydantic_core-2.41.5-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:65840751b72fbfd82c3c640cff9284545342a4f1eb1586ad0636955b261b0b05", size = 2208504, upload-time = "2025-11-04T13:40:06.072Z" }, + { url = "https://files.pythonhosted.org/packages/77/56/87a61aad59c7c5b9dc8caad5a41a5545cba3810c3e828708b3d7404f6cef/pydantic_core-2.41.5-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e536c98a7626a98feb2d3eaf75944ef6f3dbee447e1f841eae16f2f0a72d8ddc", size = 2335816, upload-time = "2025-11-04T13:40:07.835Z" }, + { url = "https://files.pythonhosted.org/packages/0d/76/941cc9f73529988688a665a5c0ecff1112b3d95ab48f81db5f7606f522d3/pydantic_core-2.41.5-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:eceb81a8d74f9267ef4081e246ffd6d129da5d87e37a77c9bde550cb04870c1c", size = 2075366, upload-time = "2025-11-04T13:40:09.804Z" }, + { url = "https://files.pythonhosted.org/packages/d3/43/ebef01f69baa07a482844faaa0a591bad1ef129253ffd0cdaa9d8a7f72d3/pydantic_core-2.41.5-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d38548150c39b74aeeb0ce8ee1d8e82696f4a4e16ddc6de7b1d8823f7de4b9b5", size = 2171698, upload-time = "2025-11-04T13:40:12.004Z" }, + { url = "https://files.pythonhosted.org/packages/b1/87/41f3202e4193e3bacfc2c065fab7706ebe81af46a83d3e27605029c1f5a6/pydantic_core-2.41.5-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:c23e27686783f60290e36827f9c626e63154b82b116d7fe9adba1fda36da706c", size = 2132603, upload-time = "2025-11-04T13:40:13.868Z" }, + { url = "https://files.pythonhosted.org/packages/49/7d/4c00df99cb12070b6bccdef4a195255e6020a550d572768d92cc54dba91a/pydantic_core-2.41.5-cp312-cp312-musllinux_1_1_armv7l.whl", hash = "sha256:482c982f814460eabe1d3bb0adfdc583387bd4691ef00b90575ca0d2b6fe2294", size = 2329591, upload-time = "2025-11-04T13:40:15.672Z" }, + { url = "https://files.pythonhosted.org/packages/cc/6a/ebf4b1d65d458f3cda6a7335d141305dfa19bdc61140a884d165a8a1bbc7/pydantic_core-2.41.5-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:bfea2a5f0b4d8d43adf9d7b8bf019fb46fdd10a2e5cde477fbcb9d1fa08c68e1", size = 2319068, upload-time = "2025-11-04T13:40:17.532Z" }, + { url = "https://files.pythonhosted.org/packages/49/3b/774f2b5cd4192d5ab75870ce4381fd89cf218af999515baf07e7206753f0/pydantic_core-2.41.5-cp312-cp312-win32.whl", hash = "sha256:b74557b16e390ec12dca509bce9264c3bbd128f8a2c376eaa68003d7f327276d", size = 1985908, upload-time = "2025-11-04T13:40:19.309Z" }, + { url = "https://files.pythonhosted.org/packages/86/45/00173a033c801cacf67c190fef088789394feaf88a98a7035b0e40d53dc9/pydantic_core-2.41.5-cp312-cp312-win_amd64.whl", hash = "sha256:1962293292865bca8e54702b08a4f26da73adc83dd1fcf26fbc875b35d81c815", size = 2020145, upload-time = "2025-11-04T13:40:21.548Z" }, + { url = "https://files.pythonhosted.org/packages/f9/22/91fbc821fa6d261b376a3f73809f907cec5ca6025642c463d3488aad22fb/pydantic_core-2.41.5-cp312-cp312-win_arm64.whl", hash = "sha256:1746d4a3d9a794cacae06a5eaaccb4b8643a131d45fbc9af23e353dc0a5ba5c3", size = 1976179, upload-time = "2025-11-04T13:40:23.393Z" }, + { url = "https://files.pythonhosted.org/packages/87/06/8806241ff1f70d9939f9af039c6c35f2360cf16e93c2ca76f184e76b1564/pydantic_core-2.41.5-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:941103c9be18ac8daf7b7adca8228f8ed6bb7a1849020f643b3a14d15b1924d9", size = 2120403, upload-time = "2025-11-04T13:40:25.248Z" }, + { url = "https://files.pythonhosted.org/packages/94/02/abfa0e0bda67faa65fef1c84971c7e45928e108fe24333c81f3bfe35d5f5/pydantic_core-2.41.5-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:112e305c3314f40c93998e567879e887a3160bb8689ef3d2c04b6cc62c33ac34", size = 1896206, upload-time = "2025-11-04T13:40:27.099Z" }, + { url = "https://files.pythonhosted.org/packages/15/df/a4c740c0943e93e6500f9eb23f4ca7ec9bf71b19e608ae5b579678c8d02f/pydantic_core-2.41.5-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0cbaad15cb0c90aa221d43c00e77bb33c93e8d36e0bf74760cd00e732d10a6a0", size = 1919307, upload-time = "2025-11-04T13:40:29.806Z" }, + { url = "https://files.pythonhosted.org/packages/9a/e3/6324802931ae1d123528988e0e86587c2072ac2e5394b4bc2bc34b61ff6e/pydantic_core-2.41.5-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:03ca43e12fab6023fc79d28ca6b39b05f794ad08ec2feccc59a339b02f2b3d33", size = 2063258, upload-time = "2025-11-04T13:40:33.544Z" }, + { url = "https://files.pythonhosted.org/packages/c9/d4/2230d7151d4957dd79c3044ea26346c148c98fbf0ee6ebd41056f2d62ab5/pydantic_core-2.41.5-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:dc799088c08fa04e43144b164feb0c13f9a0bc40503f8df3e9fde58a3c0c101e", size = 2214917, upload-time = "2025-11-04T13:40:35.479Z" }, + { url = "https://files.pythonhosted.org/packages/e6/9f/eaac5df17a3672fef0081b6c1bb0b82b33ee89aa5cec0d7b05f52fd4a1fa/pydantic_core-2.41.5-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:97aeba56665b4c3235a0e52b2c2f5ae9cd071b8a8310ad27bddb3f7fb30e9aa2", size = 2332186, upload-time = "2025-11-04T13:40:37.436Z" }, + { url = "https://files.pythonhosted.org/packages/cf/4e/35a80cae583a37cf15604b44240e45c05e04e86f9cfd766623149297e971/pydantic_core-2.41.5-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:406bf18d345822d6c21366031003612b9c77b3e29ffdb0f612367352aab7d586", size = 2073164, upload-time = "2025-11-04T13:40:40.289Z" }, + { url = "https://files.pythonhosted.org/packages/bf/e3/f6e262673c6140dd3305d144d032f7bd5f7497d3871c1428521f19f9efa2/pydantic_core-2.41.5-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:b93590ae81f7010dbe380cdeab6f515902ebcbefe0b9327cc4804d74e93ae69d", size = 2179146, upload-time = "2025-11-04T13:40:42.809Z" }, + { url = "https://files.pythonhosted.org/packages/75/c7/20bd7fc05f0c6ea2056a4565c6f36f8968c0924f19b7d97bbfea55780e73/pydantic_core-2.41.5-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:01a3d0ab748ee531f4ea6c3e48ad9dac84ddba4b0d82291f87248f2f9de8d740", size = 2137788, upload-time = "2025-11-04T13:40:44.752Z" }, + { url = "https://files.pythonhosted.org/packages/3a/8d/34318ef985c45196e004bc46c6eab2eda437e744c124ef0dbe1ff2c9d06b/pydantic_core-2.41.5-cp313-cp313-musllinux_1_1_armv7l.whl", hash = "sha256:6561e94ba9dacc9c61bce40e2d6bdc3bfaa0259d3ff36ace3b1e6901936d2e3e", size = 2340133, upload-time = "2025-11-04T13:40:46.66Z" }, + { url = "https://files.pythonhosted.org/packages/9c/59/013626bf8c78a5a5d9350d12e7697d3d4de951a75565496abd40ccd46bee/pydantic_core-2.41.5-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:915c3d10f81bec3a74fbd4faebe8391013ba61e5a1a8d48c4455b923bdda7858", size = 2324852, upload-time = "2025-11-04T13:40:48.575Z" }, + { url = "https://files.pythonhosted.org/packages/1a/d9/c248c103856f807ef70c18a4f986693a46a8ffe1602e5d361485da502d20/pydantic_core-2.41.5-cp313-cp313-win32.whl", hash = "sha256:650ae77860b45cfa6e2cdafc42618ceafab3a2d9a3811fcfbd3bbf8ac3c40d36", size = 1994679, upload-time = "2025-11-04T13:40:50.619Z" }, + { url = "https://files.pythonhosted.org/packages/9e/8b/341991b158ddab181cff136acd2552c9f35bd30380422a639c0671e99a91/pydantic_core-2.41.5-cp313-cp313-win_amd64.whl", hash = "sha256:79ec52ec461e99e13791ec6508c722742ad745571f234ea6255bed38c6480f11", size = 2019766, upload-time = "2025-11-04T13:40:52.631Z" }, + { url = "https://files.pythonhosted.org/packages/73/7d/f2f9db34af103bea3e09735bb40b021788a5e834c81eedb541991badf8f5/pydantic_core-2.41.5-cp313-cp313-win_arm64.whl", hash = "sha256:3f84d5c1b4ab906093bdc1ff10484838aca54ef08de4afa9de0f5f14d69639cd", size = 1981005, upload-time = "2025-11-04T13:40:54.734Z" }, + { url = "https://files.pythonhosted.org/packages/ea/28/46b7c5c9635ae96ea0fbb779e271a38129df2550f763937659ee6c5dbc65/pydantic_core-2.41.5-cp314-cp314-macosx_10_12_x86_64.whl", hash = "sha256:3f37a19d7ebcdd20b96485056ba9e8b304e27d9904d233d7b1015db320e51f0a", size = 2119622, upload-time = "2025-11-04T13:40:56.68Z" }, + { url = "https://files.pythonhosted.org/packages/74/1a/145646e5687e8d9a1e8d09acb278c8535ebe9e972e1f162ed338a622f193/pydantic_core-2.41.5-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:1d1d9764366c73f996edd17abb6d9d7649a7eb690006ab6adbda117717099b14", size = 1891725, upload-time = "2025-11-04T13:40:58.807Z" }, + { url = "https://files.pythonhosted.org/packages/23/04/e89c29e267b8060b40dca97bfc64a19b2a3cf99018167ea1677d96368273/pydantic_core-2.41.5-cp314-cp314-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:25e1c2af0fce638d5f1988b686f3b3ea8cd7de5f244ca147c777769e798a9cd1", size = 1915040, upload-time = "2025-11-04T13:41:00.853Z" }, + { url = "https://files.pythonhosted.org/packages/84/a3/15a82ac7bd97992a82257f777b3583d3e84bdb06ba6858f745daa2ec8a85/pydantic_core-2.41.5-cp314-cp314-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:506d766a8727beef16b7adaeb8ee6217c64fc813646b424d0804d67c16eddb66", size = 2063691, upload-time = "2025-11-04T13:41:03.504Z" }, + { url = "https://files.pythonhosted.org/packages/74/9b/0046701313c6ef08c0c1cf0e028c67c770a4e1275ca73131563c5f2a310a/pydantic_core-2.41.5-cp314-cp314-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4819fa52133c9aa3c387b3328f25c1facc356491e6135b459f1de698ff64d869", size = 2213897, upload-time = "2025-11-04T13:41:05.804Z" }, + { url = "https://files.pythonhosted.org/packages/8a/cd/6bac76ecd1b27e75a95ca3a9a559c643b3afcd2dd62086d4b7a32a18b169/pydantic_core-2.41.5-cp314-cp314-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2b761d210c9ea91feda40d25b4efe82a1707da2ef62901466a42492c028553a2", size = 2333302, upload-time = "2025-11-04T13:41:07.809Z" }, + { url = "https://files.pythonhosted.org/packages/4c/d2/ef2074dc020dd6e109611a8be4449b98cd25e1b9b8a303c2f0fca2f2bcf7/pydantic_core-2.41.5-cp314-cp314-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:22f0fb8c1c583a3b6f24df2470833b40207e907b90c928cc8d3594b76f874375", size = 2064877, upload-time = "2025-11-04T13:41:09.827Z" }, + { url = "https://files.pythonhosted.org/packages/18/66/e9db17a9a763d72f03de903883c057b2592c09509ccfe468187f2a2eef29/pydantic_core-2.41.5-cp314-cp314-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2782c870e99878c634505236d81e5443092fba820f0373997ff75f90f68cd553", size = 2180680, upload-time = "2025-11-04T13:41:12.379Z" }, + { url = "https://files.pythonhosted.org/packages/d3/9e/3ce66cebb929f3ced22be85d4c2399b8e85b622db77dad36b73c5387f8f8/pydantic_core-2.41.5-cp314-cp314-musllinux_1_1_aarch64.whl", hash = "sha256:0177272f88ab8312479336e1d777f6b124537d47f2123f89cb37e0accea97f90", size = 2138960, upload-time = "2025-11-04T13:41:14.627Z" }, + { url = "https://files.pythonhosted.org/packages/a6/62/205a998f4327d2079326b01abee48e502ea739d174f0a89295c481a2272e/pydantic_core-2.41.5-cp314-cp314-musllinux_1_1_armv7l.whl", hash = "sha256:63510af5e38f8955b8ee5687740d6ebf7c2a0886d15a6d65c32814613681bc07", size = 2339102, upload-time = "2025-11-04T13:41:16.868Z" }, + { url = "https://files.pythonhosted.org/packages/3c/0d/f05e79471e889d74d3d88f5bd20d0ed189ad94c2423d81ff8d0000aab4ff/pydantic_core-2.41.5-cp314-cp314-musllinux_1_1_x86_64.whl", hash = "sha256:e56ba91f47764cc14f1daacd723e3e82d1a89d783f0f5afe9c364b8bb491ccdb", size = 2326039, upload-time = "2025-11-04T13:41:18.934Z" }, + { url = "https://files.pythonhosted.org/packages/ec/e1/e08a6208bb100da7e0c4b288eed624a703f4d129bde2da475721a80cab32/pydantic_core-2.41.5-cp314-cp314-win32.whl", hash = "sha256:aec5cf2fd867b4ff45b9959f8b20ea3993fc93e63c7363fe6851424c8a7e7c23", size = 1995126, upload-time = "2025-11-04T13:41:21.418Z" }, + { url = "https://files.pythonhosted.org/packages/48/5d/56ba7b24e9557f99c9237e29f5c09913c81eeb2f3217e40e922353668092/pydantic_core-2.41.5-cp314-cp314-win_amd64.whl", hash = "sha256:8e7c86f27c585ef37c35e56a96363ab8de4e549a95512445b85c96d3e2f7c1bf", size = 2015489, upload-time = "2025-11-04T13:41:24.076Z" }, + { url = "https://files.pythonhosted.org/packages/4e/bb/f7a190991ec9e3e0ba22e4993d8755bbc4a32925c0b5b42775c03e8148f9/pydantic_core-2.41.5-cp314-cp314-win_arm64.whl", hash = "sha256:e672ba74fbc2dc8eea59fb6d4aed6845e6905fc2a8afe93175d94a83ba2a01a0", size = 1977288, upload-time = "2025-11-04T13:41:26.33Z" }, + { url = "https://files.pythonhosted.org/packages/92/ed/77542d0c51538e32e15afe7899d79efce4b81eee631d99850edc2f5e9349/pydantic_core-2.41.5-cp314-cp314t-macosx_10_12_x86_64.whl", hash = "sha256:8566def80554c3faa0e65ac30ab0932b9e3a5cd7f8323764303d468e5c37595a", size = 2120255, upload-time = "2025-11-04T13:41:28.569Z" }, + { url = "https://files.pythonhosted.org/packages/bb/3d/6913dde84d5be21e284439676168b28d8bbba5600d838b9dca99de0fad71/pydantic_core-2.41.5-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:b80aa5095cd3109962a298ce14110ae16b8c1aece8b72f9dafe81cf597ad80b3", size = 1863760, upload-time = "2025-11-04T13:41:31.055Z" }, + { url = "https://files.pythonhosted.org/packages/5a/f0/e5e6b99d4191da102f2b0eb9687aaa7f5bea5d9964071a84effc3e40f997/pydantic_core-2.41.5-cp314-cp314t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3006c3dd9ba34b0c094c544c6006cc79e87d8612999f1a5d43b769b89181f23c", size = 1878092, upload-time = "2025-11-04T13:41:33.21Z" }, + { url = "https://files.pythonhosted.org/packages/71/48/36fb760642d568925953bcc8116455513d6e34c4beaa37544118c36aba6d/pydantic_core-2.41.5-cp314-cp314t-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:72f6c8b11857a856bcfa48c86f5368439f74453563f951e473514579d44aa612", size = 2053385, upload-time = "2025-11-04T13:41:35.508Z" }, + { url = "https://files.pythonhosted.org/packages/20/25/92dc684dd8eb75a234bc1c764b4210cf2646479d54b47bf46061657292a8/pydantic_core-2.41.5-cp314-cp314t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5cb1b2f9742240e4bb26b652a5aeb840aa4b417c7748b6f8387927bc6e45e40d", size = 2218832, upload-time = "2025-11-04T13:41:37.732Z" }, + { url = "https://files.pythonhosted.org/packages/e2/09/f53e0b05023d3e30357d82eb35835d0f6340ca344720a4599cd663dca599/pydantic_core-2.41.5-cp314-cp314t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:bd3d54f38609ff308209bd43acea66061494157703364ae40c951f83ba99a1a9", size = 2327585, upload-time = "2025-11-04T13:41:40Z" }, + { url = "https://files.pythonhosted.org/packages/aa/4e/2ae1aa85d6af35a39b236b1b1641de73f5a6ac4d5a7509f77b814885760c/pydantic_core-2.41.5-cp314-cp314t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2ff4321e56e879ee8d2a879501c8e469414d948f4aba74a2d4593184eb326660", size = 2041078, upload-time = "2025-11-04T13:41:42.323Z" }, + { url = "https://files.pythonhosted.org/packages/cd/13/2e215f17f0ef326fc72afe94776edb77525142c693767fc347ed6288728d/pydantic_core-2.41.5-cp314-cp314t-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d0d2568a8c11bf8225044aa94409e21da0cb09dcdafe9ecd10250b2baad531a9", size = 2173914, upload-time = "2025-11-04T13:41:45.221Z" }, + { url = "https://files.pythonhosted.org/packages/02/7a/f999a6dcbcd0e5660bc348a3991c8915ce6599f4f2c6ac22f01d7a10816c/pydantic_core-2.41.5-cp314-cp314t-musllinux_1_1_aarch64.whl", hash = "sha256:a39455728aabd58ceabb03c90e12f71fd30fa69615760a075b9fec596456ccc3", size = 2129560, upload-time = "2025-11-04T13:41:47.474Z" }, + { url = "https://files.pythonhosted.org/packages/3a/b1/6c990ac65e3b4c079a4fb9f5b05f5b013afa0f4ed6780a3dd236d2cbdc64/pydantic_core-2.41.5-cp314-cp314t-musllinux_1_1_armv7l.whl", hash = "sha256:239edca560d05757817c13dc17c50766136d21f7cd0fac50295499ae24f90fdf", size = 2329244, upload-time = "2025-11-04T13:41:49.992Z" }, + { url = "https://files.pythonhosted.org/packages/d9/02/3c562f3a51afd4d88fff8dffb1771b30cfdfd79befd9883ee094f5b6c0d8/pydantic_core-2.41.5-cp314-cp314t-musllinux_1_1_x86_64.whl", hash = "sha256:2a5e06546e19f24c6a96a129142a75cee553cc018ffee48a460059b1185f4470", size = 2331955, upload-time = "2025-11-04T13:41:54.079Z" }, + { url = "https://files.pythonhosted.org/packages/5c/96/5fb7d8c3c17bc8c62fdb031c47d77a1af698f1d7a406b0f79aaa1338f9ad/pydantic_core-2.41.5-cp314-cp314t-win32.whl", hash = "sha256:b4ececa40ac28afa90871c2cc2b9ffd2ff0bf749380fbdf57d165fd23da353aa", size = 1988906, upload-time = "2025-11-04T13:41:56.606Z" }, + { url = "https://files.pythonhosted.org/packages/22/ed/182129d83032702912c2e2d8bbe33c036f342cc735737064668585dac28f/pydantic_core-2.41.5-cp314-cp314t-win_amd64.whl", hash = "sha256:80aa89cad80b32a912a65332f64a4450ed00966111b6615ca6816153d3585a8c", size = 1981607, upload-time = "2025-11-04T13:41:58.889Z" }, + { url = "https://files.pythonhosted.org/packages/9f/ed/068e41660b832bb0b1aa5b58011dea2a3fe0ba7861ff38c4d4904c1c1a99/pydantic_core-2.41.5-cp314-cp314t-win_arm64.whl", hash = "sha256:35b44f37a3199f771c3eaa53051bc8a70cd7b54f333531c59e29fd4db5d15008", size = 1974769, upload-time = "2025-11-04T13:42:01.186Z" }, + { url = "https://files.pythonhosted.org/packages/09/32/59b0c7e63e277fa7911c2fc70ccfb45ce4b98991e7ef37110663437005af/pydantic_core-2.41.5-graalpy312-graalpy250_312_native-macosx_10_12_x86_64.whl", hash = "sha256:7da7087d756b19037bc2c06edc6c170eeef3c3bafcb8f532ff17d64dc427adfd", size = 2110495, upload-time = "2025-11-04T13:42:49.689Z" }, + { url = "https://files.pythonhosted.org/packages/aa/81/05e400037eaf55ad400bcd318c05bb345b57e708887f07ddb2d20e3f0e98/pydantic_core-2.41.5-graalpy312-graalpy250_312_native-macosx_11_0_arm64.whl", hash = "sha256:aabf5777b5c8ca26f7824cb4a120a740c9588ed58df9b2d196ce92fba42ff8dc", size = 1915388, upload-time = "2025-11-04T13:42:52.215Z" }, + { url = "https://files.pythonhosted.org/packages/6e/0d/e3549b2399f71d56476b77dbf3cf8937cec5cd70536bdc0e374a421d0599/pydantic_core-2.41.5-graalpy312-graalpy250_312_native-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c007fe8a43d43b3969e8469004e9845944f1a80e6acd47c150856bb87f230c56", size = 1942879, upload-time = "2025-11-04T13:42:56.483Z" }, + { url = "https://files.pythonhosted.org/packages/f7/07/34573da085946b6a313d7c42f82f16e8920bfd730665de2d11c0c37a74b5/pydantic_core-2.41.5-graalpy312-graalpy250_312_native-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:76d0819de158cd855d1cbb8fcafdf6f5cf1eb8e470abe056d5d161106e38062b", size = 2139017, upload-time = "2025-11-04T13:42:59.471Z" }, +] + +[[package]] +name = "pydantic-settings" +version = "2.12.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "pydantic" }, + { name = "python-dotenv" }, + { name = "typing-inspection" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/43/4b/ac7e0aae12027748076d72a8764ff1c9d82ca75a7a52622e67ed3f765c54/pydantic_settings-2.12.0.tar.gz", hash = "sha256:005538ef951e3c2a68e1c08b292b5f2e71490def8589d4221b95dab00dafcfd0", size = 194184, upload-time = "2025-11-10T14:25:47.013Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/c1/60/5d4751ba3f4a40a6891f24eec885f51afd78d208498268c734e256fb13c4/pydantic_settings-2.12.0-py3-none-any.whl", hash = "sha256:fddb9fd99a5b18da837b29710391e945b1e30c135477f484084ee513adb93809", size = 51880, upload-time = "2025-11-10T14:25:45.546Z" }, +] + +[[package]] +name = "pydot" +version = "4.0.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "pyparsing" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/50/35/b17cb89ff865484c6a20ef46bf9d95a5f07328292578de0b295f4a6beec2/pydot-4.0.1.tar.gz", hash = "sha256:c2148f681c4a33e08bf0e26a9e5f8e4099a82e0e2a068098f32ce86577364ad5", size = 162594, upload-time = "2025-06-17T20:09:56.454Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/7e/32/a7125fb28c4261a627f999d5fb4afff25b523800faed2c30979949d6facd/pydot-4.0.1-py3-none-any.whl", hash = "sha256:869c0efadd2708c0be1f916eb669f3d664ca684bc57ffb7ecc08e70d5e93fee6", size = 37087, upload-time = "2025-06-17T20:09:55.25Z" }, +] + +[[package]] +name = "pygments" +version = "2.19.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/b0/77/a5b8c569bf593b0140bde72ea885a803b82086995367bf2037de0159d924/pygments-2.19.2.tar.gz", hash = "sha256:636cb2477cec7f8952536970bc533bc43743542f70392ae026374600add5b887", size = 4968631, upload-time = "2025-06-21T13:39:12.283Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/c7/21/705964c7812476f378728bdf590ca4b771ec72385c533964653c68e86bdc/pygments-2.19.2-py3-none-any.whl", hash = "sha256:86540386c03d588bb81d44bc3928634ff26449851e99741617ecb9037ee5ec0b", size = 1225217, upload-time = "2025-06-21T13:39:07.939Z" }, +] + +[[package]] +name = "pymdown-extensions" +version = "10.19.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "markdown" }, + { name = "pyyaml" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/72/2d/9f30cee56d4d6d222430d401e85b0a6a1ae229819362f5786943d1a8c03b/pymdown_extensions-10.19.1.tar.gz", hash = "sha256:4969c691009a389fb1f9712dd8e7bd70dcc418d15a0faf70acb5117d022f7de8", size = 847839, upload-time = "2025-12-14T17:25:24.42Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/fb/35/b763e8fbcd51968329b9adc52d188fc97859f85f2ee15fe9f379987d99c5/pymdown_extensions-10.19.1-py3-none-any.whl", hash = "sha256:e8698a66055b1dc0dca2a7f2c9d0ea6f5faa7834a9c432e3535ab96c0c4e509b", size = 266693, upload-time = "2025-12-14T17:25:22.999Z" }, +] + +[[package]] +name = "pyparsing" +version = "3.3.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/33/c1/1d9de9aeaa1b89b0186e5fe23294ff6517fce1bc69149185577cd31016b2/pyparsing-3.3.1.tar.gz", hash = "sha256:47fad0f17ac1e2cad3de3b458570fbc9b03560aa029ed5e16ee5554da9a2251c", size = 1550512, upload-time = "2025-12-23T03:14:04.391Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/8b/40/2614036cdd416452f5bf98ec037f38a1afb17f327cb8e6b652d4729e0af8/pyparsing-3.3.1-py3-none-any.whl", hash = "sha256:023b5e7e5520ad96642e2c6db4cb683d3970bd640cdf7115049a6e9c3682df82", size = 121793, upload-time = "2025-12-23T03:14:02.103Z" }, +] + +[[package]] +name = "pyproject-api" +version = "1.10.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "packaging" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/45/7b/c0e1333b61d41c69e59e5366e727b18c4992688caf0de1be10b3e5265f6b/pyproject_api-1.10.0.tar.gz", hash = "sha256:40c6f2d82eebdc4afee61c773ed208c04c19db4c4a60d97f8d7be3ebc0bbb330", size = 22785, upload-time = "2025-10-09T19:12:27.21Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/54/cc/cecf97be298bee2b2a37dd360618c819a2a7fd95251d8e480c1f0eb88f3b/pyproject_api-1.10.0-py3-none-any.whl", hash = "sha256:8757c41a79c0f4ab71b99abed52b97ecf66bd20b04fa59da43b5840bac105a09", size = 13218, upload-time = "2025-10-09T19:12:24.428Z" }, +] + +[[package]] +name = "pyproject-hooks" +version = "1.2.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/e7/82/28175b2414effca1cdac8dc99f76d660e7a4fb0ceefa4b4ab8f5f6742925/pyproject_hooks-1.2.0.tar.gz", hash = "sha256:1e859bd5c40fae9448642dd871adf459e5e2084186e8d2c2a79a824c970da1f8", size = 19228, upload-time = "2024-09-29T09:24:13.293Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/bd/24/12818598c362d7f300f18e74db45963dbcb85150324092410c8b49405e42/pyproject_hooks-1.2.0-py3-none-any.whl", hash = "sha256:9e5c6bfa8dcc30091c74b0cf803c81fdd29d94f01992a7707bc97babb1141913", size = 10216, upload-time = "2024-09-29T09:24:11.978Z" }, +] + +[[package]] +name = "pytest" +version = "9.0.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "colorama", marker = "sys_platform == 'win32'" }, + { name = "iniconfig" }, + { name = "packaging" }, + { name = "pluggy" }, + { name = "pygments" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/d1/db/7ef3487e0fb0049ddb5ce41d3a49c235bf9ad299b6a25d5780a89f19230f/pytest-9.0.2.tar.gz", hash = "sha256:75186651a92bd89611d1d9fc20f0b4345fd827c41ccd5c299a868a05d70edf11", size = 1568901, upload-time = "2025-12-06T21:30:51.014Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/3b/ab/b3226f0bd7cdcf710fbede2b3548584366da3b19b5021e74f5bde2a8fa3f/pytest-9.0.2-py3-none-any.whl", hash = "sha256:711ffd45bf766d5264d487b917733b453d917afd2b0ad65223959f59089f875b", size = 374801, upload-time = "2025-12-06T21:30:49.154Z" }, +] + +[[package]] +name = "pytest-asyncio" +version = "1.3.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "pytest" }, + { name = "typing-extensions", marker = "python_full_version < '3.13'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/90/2c/8af215c0f776415f3590cac4f9086ccefd6fd463befeae41cd4d3f193e5a/pytest_asyncio-1.3.0.tar.gz", hash = "sha256:d7f52f36d231b80ee124cd216ffb19369aa168fc10095013c6b014a34d3ee9e5", size = 50087, upload-time = "2025-11-10T16:07:47.256Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/e5/35/f8b19922b6a25bc0880171a2f1a003eaeb93657475193ab516fd87cac9da/pytest_asyncio-1.3.0-py3-none-any.whl", hash = "sha256:611e26147c7f77640e6d0a92a38ed17c3e9848063698d5c93d5aa7aa11cebff5", size = 15075, upload-time = "2025-11-10T16:07:45.537Z" }, +] + +[[package]] +name = "pytest-cov" +version = "7.0.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "coverage" }, + { name = "pluggy" }, + { name = "pytest" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/5e/f7/c933acc76f5208b3b00089573cf6a2bc26dc80a8aece8f52bb7d6b1855ca/pytest_cov-7.0.0.tar.gz", hash = "sha256:33c97eda2e049a0c5298e91f519302a1334c26ac65c1a483d6206fd458361af1", size = 54328, upload-time = "2025-09-09T10:57:02.113Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ee/49/1377b49de7d0c1ce41292161ea0f721913fa8722c19fb9c1e3aa0367eecb/pytest_cov-7.0.0-py3-none-any.whl", hash = "sha256:3b8e9558b16cc1479da72058bdecf8073661c7f57f7d3c5f22a1c23507f2d861", size = 22424, upload-time = "2025-09-09T10:57:00.695Z" }, +] + +[[package]] +name = "pytest-xdist" +version = "3.8.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "execnet" }, + { name = "pytest" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/78/b4/439b179d1ff526791eb921115fca8e44e596a13efeda518b9d845a619450/pytest_xdist-3.8.0.tar.gz", hash = "sha256:7e578125ec9bc6050861aa93f2d59f1d8d085595d6551c2c90b6f4fad8d3a9f1", size = 88069, upload-time = "2025-07-01T13:30:59.346Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ca/31/d4e37e9e550c2b92a9cbc2e4d0b7420a27224968580b5a447f420847c975/pytest_xdist-3.8.0-py3-none-any.whl", hash = "sha256:202ca578cfeb7370784a8c33d6d05bc6e13b4f25b5053c30a152269fd10f0b88", size = 46396, upload-time = "2025-07-01T13:30:56.632Z" }, +] + +[[package]] +name = "python-dateutil" +version = "2.9.0.post0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "six" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/66/c0/0c8b6ad9f17a802ee498c46e004a0eb49bc148f2fd230864601a86dcf6db/python-dateutil-2.9.0.post0.tar.gz", hash = "sha256:37dd54208da7e1cd875388217d5e00ebd4179249f90fb72437e91a35459a0ad3", size = 342432, upload-time = "2024-03-01T18:36:20.211Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ec/57/56b9bcc3c9c6a792fcbaf139543cee77261f3651ca9da0c93f5c1221264b/python_dateutil-2.9.0.post0-py2.py3-none-any.whl", hash = "sha256:a8b2bc7bffae282281c8140a97d3aa9c14da0b136dfe83f850eea9a5f7470427", size = 229892, upload-time = "2024-03-01T18:36:18.57Z" }, +] + +[[package]] +name = "python-dotenv" +version = "1.2.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/f0/26/19cadc79a718c5edbec86fd4919a6b6d3f681039a2f6d66d14be94e75fb9/python_dotenv-1.2.1.tar.gz", hash = "sha256:42667e897e16ab0d66954af0e60a9caa94f0fd4ecf3aaf6d2d260eec1aa36ad6", size = 44221, upload-time = "2025-10-26T15:12:10.434Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/14/1b/a298b06749107c305e1fe0f814c6c74aea7b2f1e10989cb30f544a1b3253/python_dotenv-1.2.1-py3-none-any.whl", hash = "sha256:b81ee9561e9ca4004139c6cbba3a238c32b03e4894671e181b671e8cb8425d61", size = 21230, upload-time = "2025-10-26T15:12:09.109Z" }, +] + +[[package]] +name = "pytokens" +version = "0.3.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/4e/8d/a762be14dae1c3bf280202ba3172020b2b0b4c537f94427435f19c413b72/pytokens-0.3.0.tar.gz", hash = "sha256:2f932b14ed08de5fcf0b391ace2642f858f1394c0857202959000b68ed7a458a", size = 17644, upload-time = "2025-11-05T13:36:35.34Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/84/25/d9db8be44e205a124f6c98bc0324b2bb149b7431c53877fc6d1038dddaf5/pytokens-0.3.0-py3-none-any.whl", hash = "sha256:95b2b5eaf832e469d141a378872480ede3f251a5a5041b8ec6e581d3ac71bbf3", size = 12195, upload-time = "2025-11-05T13:36:33.183Z" }, +] + +[[package]] +name = "pyyaml" +version = "6.0.3" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/05/8e/961c0007c59b8dd7729d542c61a4d537767a59645b82a0b521206e1e25c2/pyyaml-6.0.3.tar.gz", hash = "sha256:d76623373421df22fb4cf8817020cbb7ef15c725b9d5e45f17e189bfc384190f", size = 130960, upload-time = "2025-09-25T21:33:16.546Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/d1/33/422b98d2195232ca1826284a76852ad5a86fe23e31b009c9886b2d0fb8b2/pyyaml-6.0.3-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:7f047e29dcae44602496db43be01ad42fc6f1cc0d8cd6c83d342306c32270196", size = 182063, upload-time = "2025-09-25T21:32:11.445Z" }, + { url = "https://files.pythonhosted.org/packages/89/a0/6cf41a19a1f2f3feab0e9c0b74134aa2ce6849093d5517a0c550fe37a648/pyyaml-6.0.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:fc09d0aa354569bc501d4e787133afc08552722d3ab34836a80547331bb5d4a0", size = 173973, upload-time = "2025-09-25T21:32:12.492Z" }, + { url = "https://files.pythonhosted.org/packages/ed/23/7a778b6bd0b9a8039df8b1b1d80e2e2ad78aa04171592c8a5c43a56a6af4/pyyaml-6.0.3-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:9149cad251584d5fb4981be1ecde53a1ca46c891a79788c0df828d2f166bda28", size = 775116, upload-time = "2025-09-25T21:32:13.652Z" }, + { url = "https://files.pythonhosted.org/packages/65/30/d7353c338e12baef4ecc1b09e877c1970bd3382789c159b4f89d6a70dc09/pyyaml-6.0.3-cp312-cp312-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:5fdec68f91a0c6739b380c83b951e2c72ac0197ace422360e6d5a959d8d97b2c", size = 844011, upload-time = "2025-09-25T21:32:15.21Z" }, + { url = "https://files.pythonhosted.org/packages/8b/9d/b3589d3877982d4f2329302ef98a8026e7f4443c765c46cfecc8858c6b4b/pyyaml-6.0.3-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:ba1cc08a7ccde2d2ec775841541641e4548226580ab850948cbfda66a1befcdc", size = 807870, upload-time = "2025-09-25T21:32:16.431Z" }, + { url = "https://files.pythonhosted.org/packages/05/c0/b3be26a015601b822b97d9149ff8cb5ead58c66f981e04fedf4e762f4bd4/pyyaml-6.0.3-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:8dc52c23056b9ddd46818a57b78404882310fb473d63f17b07d5c40421e47f8e", size = 761089, upload-time = "2025-09-25T21:32:17.56Z" }, + { url = "https://files.pythonhosted.org/packages/be/8e/98435a21d1d4b46590d5459a22d88128103f8da4c2d4cb8f14f2a96504e1/pyyaml-6.0.3-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:41715c910c881bc081f1e8872880d3c650acf13dfa8214bad49ed4cede7c34ea", size = 790181, upload-time = "2025-09-25T21:32:18.834Z" }, + { url = "https://files.pythonhosted.org/packages/74/93/7baea19427dcfbe1e5a372d81473250b379f04b1bd3c4c5ff825e2327202/pyyaml-6.0.3-cp312-cp312-win32.whl", hash = "sha256:96b533f0e99f6579b3d4d4995707cf36df9100d67e0c8303a0c55b27b5f99bc5", size = 137658, upload-time = "2025-09-25T21:32:20.209Z" }, + { url = "https://files.pythonhosted.org/packages/86/bf/899e81e4cce32febab4fb42bb97dcdf66bc135272882d1987881a4b519e9/pyyaml-6.0.3-cp312-cp312-win_amd64.whl", hash = "sha256:5fcd34e47f6e0b794d17de1b4ff496c00986e1c83f7ab2fb8fcfe9616ff7477b", size = 154003, upload-time = "2025-09-25T21:32:21.167Z" }, + { url = "https://files.pythonhosted.org/packages/1a/08/67bd04656199bbb51dbed1439b7f27601dfb576fb864099c7ef0c3e55531/pyyaml-6.0.3-cp312-cp312-win_arm64.whl", hash = "sha256:64386e5e707d03a7e172c0701abfb7e10f0fb753ee1d773128192742712a98fd", size = 140344, upload-time = "2025-09-25T21:32:22.617Z" }, + { url = "https://files.pythonhosted.org/packages/d1/11/0fd08f8192109f7169db964b5707a2f1e8b745d4e239b784a5a1dd80d1db/pyyaml-6.0.3-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:8da9669d359f02c0b91ccc01cac4a67f16afec0dac22c2ad09f46bee0697eba8", size = 181669, upload-time = "2025-09-25T21:32:23.673Z" }, + { url = "https://files.pythonhosted.org/packages/b1/16/95309993f1d3748cd644e02e38b75d50cbc0d9561d21f390a76242ce073f/pyyaml-6.0.3-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:2283a07e2c21a2aa78d9c4442724ec1eb15f5e42a723b99cb3d822d48f5f7ad1", size = 173252, upload-time = "2025-09-25T21:32:25.149Z" }, + { url = "https://files.pythonhosted.org/packages/50/31/b20f376d3f810b9b2371e72ef5adb33879b25edb7a6d072cb7ca0c486398/pyyaml-6.0.3-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:ee2922902c45ae8ccada2c5b501ab86c36525b883eff4255313a253a3160861c", size = 767081, upload-time = "2025-09-25T21:32:26.575Z" }, + { url = "https://files.pythonhosted.org/packages/49/1e/a55ca81e949270d5d4432fbbd19dfea5321eda7c41a849d443dc92fd1ff7/pyyaml-6.0.3-cp313-cp313-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:a33284e20b78bd4a18c8c2282d549d10bc8408a2a7ff57653c0cf0b9be0afce5", size = 841159, upload-time = "2025-09-25T21:32:27.727Z" }, + { url = "https://files.pythonhosted.org/packages/74/27/e5b8f34d02d9995b80abcef563ea1f8b56d20134d8f4e5e81733b1feceb2/pyyaml-6.0.3-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:0f29edc409a6392443abf94b9cf89ce99889a1dd5376d94316ae5145dfedd5d6", size = 801626, upload-time = "2025-09-25T21:32:28.878Z" }, + { url = "https://files.pythonhosted.org/packages/f9/11/ba845c23988798f40e52ba45f34849aa8a1f2d4af4b798588010792ebad6/pyyaml-6.0.3-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:f7057c9a337546edc7973c0d3ba84ddcdf0daa14533c2065749c9075001090e6", size = 753613, upload-time = "2025-09-25T21:32:30.178Z" }, + { url = "https://files.pythonhosted.org/packages/3d/e0/7966e1a7bfc0a45bf0a7fb6b98ea03fc9b8d84fa7f2229e9659680b69ee3/pyyaml-6.0.3-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:eda16858a3cab07b80edaf74336ece1f986ba330fdb8ee0d6c0d68fe82bc96be", size = 794115, upload-time = "2025-09-25T21:32:31.353Z" }, + { url = "https://files.pythonhosted.org/packages/de/94/980b50a6531b3019e45ddeada0626d45fa85cbe22300844a7983285bed3b/pyyaml-6.0.3-cp313-cp313-win32.whl", hash = "sha256:d0eae10f8159e8fdad514efdc92d74fd8d682c933a6dd088030f3834bc8e6b26", size = 137427, upload-time = "2025-09-25T21:32:32.58Z" }, + { url = "https://files.pythonhosted.org/packages/97/c9/39d5b874e8b28845e4ec2202b5da735d0199dbe5b8fb85f91398814a9a46/pyyaml-6.0.3-cp313-cp313-win_amd64.whl", hash = "sha256:79005a0d97d5ddabfeeea4cf676af11e647e41d81c9a7722a193022accdb6b7c", size = 154090, upload-time = "2025-09-25T21:32:33.659Z" }, + { url = "https://files.pythonhosted.org/packages/73/e8/2bdf3ca2090f68bb3d75b44da7bbc71843b19c9f2b9cb9b0f4ab7a5a4329/pyyaml-6.0.3-cp313-cp313-win_arm64.whl", hash = "sha256:5498cd1645aa724a7c71c8f378eb29ebe23da2fc0d7a08071d89469bf1d2defb", size = 140246, upload-time = "2025-09-25T21:32:34.663Z" }, + { url = "https://files.pythonhosted.org/packages/9d/8c/f4bd7f6465179953d3ac9bc44ac1a8a3e6122cf8ada906b4f96c60172d43/pyyaml-6.0.3-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:8d1fab6bb153a416f9aeb4b8763bc0f22a5586065f86f7664fc23339fc1c1fac", size = 181814, upload-time = "2025-09-25T21:32:35.712Z" }, + { url = "https://files.pythonhosted.org/packages/bd/9c/4d95bb87eb2063d20db7b60faa3840c1b18025517ae857371c4dd55a6b3a/pyyaml-6.0.3-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:34d5fcd24b8445fadc33f9cf348c1047101756fd760b4dacb5c3e99755703310", size = 173809, upload-time = "2025-09-25T21:32:36.789Z" }, + { url = "https://files.pythonhosted.org/packages/92/b5/47e807c2623074914e29dabd16cbbdd4bf5e9b2db9f8090fa64411fc5382/pyyaml-6.0.3-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:501a031947e3a9025ed4405a168e6ef5ae3126c59f90ce0cd6f2bfc477be31b7", size = 766454, upload-time = "2025-09-25T21:32:37.966Z" }, + { url = "https://files.pythonhosted.org/packages/02/9e/e5e9b168be58564121efb3de6859c452fccde0ab093d8438905899a3a483/pyyaml-6.0.3-cp314-cp314-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:b3bc83488de33889877a0f2543ade9f70c67d66d9ebb4ac959502e12de895788", size = 836355, upload-time = "2025-09-25T21:32:39.178Z" }, + { url = "https://files.pythonhosted.org/packages/88/f9/16491d7ed2a919954993e48aa941b200f38040928474c9e85ea9e64222c3/pyyaml-6.0.3-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:c458b6d084f9b935061bc36216e8a69a7e293a2f1e68bf956dcd9e6cbcd143f5", size = 794175, upload-time = "2025-09-25T21:32:40.865Z" }, + { url = "https://files.pythonhosted.org/packages/dd/3f/5989debef34dc6397317802b527dbbafb2b4760878a53d4166579111411e/pyyaml-6.0.3-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:7c6610def4f163542a622a73fb39f534f8c101d690126992300bf3207eab9764", size = 755228, upload-time = "2025-09-25T21:32:42.084Z" }, + { url = "https://files.pythonhosted.org/packages/d7/ce/af88a49043cd2e265be63d083fc75b27b6ed062f5f9fd6cdc223ad62f03e/pyyaml-6.0.3-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:5190d403f121660ce8d1d2c1bb2ef1bd05b5f68533fc5c2ea899bd15f4399b35", size = 789194, upload-time = "2025-09-25T21:32:43.362Z" }, + { url = "https://files.pythonhosted.org/packages/23/20/bb6982b26a40bb43951265ba29d4c246ef0ff59c9fdcdf0ed04e0687de4d/pyyaml-6.0.3-cp314-cp314-win_amd64.whl", hash = "sha256:4a2e8cebe2ff6ab7d1050ecd59c25d4c8bd7e6f400f5f82b96557ac0abafd0ac", size = 156429, upload-time = "2025-09-25T21:32:57.844Z" }, + { url = "https://files.pythonhosted.org/packages/f4/f4/a4541072bb9422c8a883ab55255f918fa378ecf083f5b85e87fc2b4eda1b/pyyaml-6.0.3-cp314-cp314-win_arm64.whl", hash = "sha256:93dda82c9c22deb0a405ea4dc5f2d0cda384168e466364dec6255b293923b2f3", size = 143912, upload-time = "2025-09-25T21:32:59.247Z" }, + { url = "https://files.pythonhosted.org/packages/7c/f9/07dd09ae774e4616edf6cda684ee78f97777bdd15847253637a6f052a62f/pyyaml-6.0.3-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:02893d100e99e03eda1c8fd5c441d8c60103fd175728e23e431db1b589cf5ab3", size = 189108, upload-time = "2025-09-25T21:32:44.377Z" }, + { url = "https://files.pythonhosted.org/packages/4e/78/8d08c9fb7ce09ad8c38ad533c1191cf27f7ae1effe5bb9400a46d9437fcf/pyyaml-6.0.3-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:c1ff362665ae507275af2853520967820d9124984e0f7466736aea23d8611fba", size = 183641, upload-time = "2025-09-25T21:32:45.407Z" }, + { url = "https://files.pythonhosted.org/packages/7b/5b/3babb19104a46945cf816d047db2788bcaf8c94527a805610b0289a01c6b/pyyaml-6.0.3-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:6adc77889b628398debc7b65c073bcb99c4a0237b248cacaf3fe8a557563ef6c", size = 831901, upload-time = "2025-09-25T21:32:48.83Z" }, + { url = "https://files.pythonhosted.org/packages/8b/cc/dff0684d8dc44da4d22a13f35f073d558c268780ce3c6ba1b87055bb0b87/pyyaml-6.0.3-cp314-cp314t-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:a80cb027f6b349846a3bf6d73b5e95e782175e52f22108cfa17876aaeff93702", size = 861132, upload-time = "2025-09-25T21:32:50.149Z" }, + { url = "https://files.pythonhosted.org/packages/b1/5e/f77dc6b9036943e285ba76b49e118d9ea929885becb0a29ba8a7c75e29fe/pyyaml-6.0.3-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:00c4bdeba853cc34e7dd471f16b4114f4162dc03e6b7afcc2128711f0eca823c", size = 839261, upload-time = "2025-09-25T21:32:51.808Z" }, + { url = "https://files.pythonhosted.org/packages/ce/88/a9db1376aa2a228197c58b37302f284b5617f56a5d959fd1763fb1675ce6/pyyaml-6.0.3-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:66e1674c3ef6f541c35191caae2d429b967b99e02040f5ba928632d9a7f0f065", size = 805272, upload-time = "2025-09-25T21:32:52.941Z" }, + { url = "https://files.pythonhosted.org/packages/da/92/1446574745d74df0c92e6aa4a7b0b3130706a4142b2d1a5869f2eaa423c6/pyyaml-6.0.3-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:16249ee61e95f858e83976573de0f5b2893b3677ba71c9dd36b9cf8be9ac6d65", size = 829923, upload-time = "2025-09-25T21:32:54.537Z" }, + { url = "https://files.pythonhosted.org/packages/f0/7a/1c7270340330e575b92f397352af856a8c06f230aa3e76f86b39d01b416a/pyyaml-6.0.3-cp314-cp314t-win_amd64.whl", hash = "sha256:4ad1906908f2f5ae4e5a8ddfce73c320c2a1429ec52eafd27138b7f1cbe341c9", size = 174062, upload-time = "2025-09-25T21:32:55.767Z" }, + { url = "https://files.pythonhosted.org/packages/f1/12/de94a39c2ef588c7e6455cfbe7343d3b2dc9d6b6b2f40c4c6565744c873d/pyyaml-6.0.3-cp314-cp314t-win_arm64.whl", hash = "sha256:ebc55a14a21cb14062aa4162f906cd962b28e2e9ea38f9b4391244cd8de4ae0b", size = 149341, upload-time = "2025-09-25T21:32:56.828Z" }, +] + +[[package]] +name = "pyyaml-env-tag" +version = "1.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "pyyaml" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/eb/2e/79c822141bfd05a853236b504869ebc6b70159afc570e1d5a20641782eaa/pyyaml_env_tag-1.1.tar.gz", hash = "sha256:2eb38b75a2d21ee0475d6d97ec19c63287a7e140231e4214969d0eac923cd7ff", size = 5737, upload-time = "2025-05-13T15:24:01.64Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/04/11/432f32f8097b03e3cd5fe57e88efb685d964e2e5178a48ed61e841f7fdce/pyyaml_env_tag-1.1-py3-none-any.whl", hash = "sha256:17109e1a528561e32f026364712fee1264bc2ea6715120891174ed1b980d2e04", size = 4722, upload-time = "2025-05-13T15:23:59.629Z" }, +] + +[[package]] +name = "redis" +version = "7.1.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/43/c8/983d5c6579a411d8a99bc5823cc5712768859b5ce2c8afe1a65b37832c81/redis-7.1.0.tar.gz", hash = "sha256:b1cc3cfa5a2cb9c2ab3ba700864fb0ad75617b41f01352ce5779dabf6d5f9c3c", size = 4796669, upload-time = "2025-11-19T15:54:39.961Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/89/f0/8956f8a86b20d7bb9d6ac0187cf4cd54d8065bc9a1a09eb8011d4d326596/redis-7.1.0-py3-none-any.whl", hash = "sha256:23c52b208f92b56103e17c5d06bdc1a6c2c0b3106583985a76a18f83b265de2b", size = 354159, upload-time = "2025-11-19T15:54:38.064Z" }, +] + +[package.optional-dependencies] +hiredis = [ + { name = "hiredis" }, +] + +[[package]] +name = "requests" +version = "2.32.5" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "certifi" }, + { name = "charset-normalizer" }, + { name = "idna" }, + { name = "urllib3" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/c9/74/b3ff8e6c8446842c3f5c837e9c3dfcfe2018ea6ecef224c710c85ef728f4/requests-2.32.5.tar.gz", hash = "sha256:dbba0bac56e100853db0ea71b82b4dfd5fe2bf6d3754a8893c3af500cec7d7cf", size = 134517, upload-time = "2025-08-18T20:46:02.573Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/1e/db/4254e3eabe8020b458f1a747140d32277ec7a271daf1d235b70dc0b4e6e3/requests-2.32.5-py3-none-any.whl", hash = "sha256:2462f94637a34fd532264295e186976db0f5d453d1cdd31473c85a6a161affb6", size = 64738, upload-time = "2025-08-18T20:46:00.542Z" }, +] + +[[package]] +name = "rich" +version = "14.2.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "markdown-it-py" }, + { name = "pygments" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/fb/d2/8920e102050a0de7bfabeb4c4614a49248cf8d5d7a8d01885fbb24dc767a/rich-14.2.0.tar.gz", hash = "sha256:73ff50c7c0c1c77c8243079283f4edb376f0f6442433aecb8ce7e6d0b92d1fe4", size = 219990, upload-time = "2025-10-09T14:16:53.064Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/25/7a/b0178788f8dc6cafce37a212c99565fa1fe7872c70c6c9c1e1a372d9d88f/rich-14.2.0-py3-none-any.whl", hash = "sha256:76bc51fe2e57d2b1be1f96c524b890b816e334ab4c1e45888799bfaab0021edd", size = 243393, upload-time = "2025-10-09T14:16:51.245Z" }, +] + +[[package]] +name = "ruff" +version = "0.14.10" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/57/08/52232a877978dd8f9cf2aeddce3e611b40a63287dfca29b6b8da791f5e8d/ruff-0.14.10.tar.gz", hash = "sha256:9a2e830f075d1a42cd28420d7809ace390832a490ed0966fe373ba288e77aaf4", size = 5859763, upload-time = "2025-12-18T19:28:57.98Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/60/01/933704d69f3f05ee16ef11406b78881733c186fe14b6a46b05cfcaf6d3b2/ruff-0.14.10-py3-none-linux_armv6l.whl", hash = "sha256:7a3ce585f2ade3e1f29ec1b92df13e3da262178df8c8bdf876f48fa0e8316c49", size = 13527080, upload-time = "2025-12-18T19:29:25.642Z" }, + { url = "https://files.pythonhosted.org/packages/df/58/a0349197a7dfa603ffb7f5b0470391efa79ddc327c1e29c4851e85b09cc5/ruff-0.14.10-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:674f9be9372907f7257c51f1d4fc902cb7cf014b9980152b802794317941f08f", size = 13797320, upload-time = "2025-12-18T19:29:02.571Z" }, + { url = "https://files.pythonhosted.org/packages/7b/82/36be59f00a6082e38c23536df4e71cdbc6af8d7c707eade97fcad5c98235/ruff-0.14.10-py3-none-macosx_11_0_arm64.whl", hash = "sha256:d85713d522348837ef9df8efca33ccb8bd6fcfc86a2cde3ccb4bc9d28a18003d", size = 12918434, upload-time = "2025-12-18T19:28:51.202Z" }, + { url = "https://files.pythonhosted.org/packages/a6/00/45c62a7f7e34da92a25804f813ebe05c88aa9e0c25e5cb5a7d23dd7450e3/ruff-0.14.10-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6987ebe0501ae4f4308d7d24e2d0fe3d7a98430f5adfd0f1fead050a740a3a77", size = 13371961, upload-time = "2025-12-18T19:29:04.991Z" }, + { url = "https://files.pythonhosted.org/packages/40/31/a5906d60f0405f7e57045a70f2d57084a93ca7425f22e1d66904769d1628/ruff-0.14.10-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:16a01dfb7b9e4eee556fbfd5392806b1b8550c9b4a9f6acd3dbe6812b193c70a", size = 13275629, upload-time = "2025-12-18T19:29:21.381Z" }, + { url = "https://files.pythonhosted.org/packages/3e/60/61c0087df21894cf9d928dc04bcd4fb10e8b2e8dca7b1a276ba2155b2002/ruff-0.14.10-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7165d31a925b7a294465fa81be8c12a0e9b60fb02bf177e79067c867e71f8b1f", size = 14029234, upload-time = "2025-12-18T19:29:00.132Z" }, + { url = "https://files.pythonhosted.org/packages/44/84/77d911bee3b92348b6e5dab5a0c898d87084ea03ac5dc708f46d88407def/ruff-0.14.10-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:c561695675b972effb0c0a45db233f2c816ff3da8dcfbe7dfc7eed625f218935", size = 15449890, upload-time = "2025-12-18T19:28:53.573Z" }, + { url = "https://files.pythonhosted.org/packages/e9/36/480206eaefa24a7ec321582dda580443a8f0671fdbf6b1c80e9c3e93a16a/ruff-0.14.10-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4bb98fcbbc61725968893682fd4df8966a34611239c9fd07a1f6a07e7103d08e", size = 15123172, upload-time = "2025-12-18T19:29:23.453Z" }, + { url = "https://files.pythonhosted.org/packages/5c/38/68e414156015ba80cef5473d57919d27dfb62ec804b96180bafdeaf0e090/ruff-0.14.10-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f24b47993a9d8cb858429e97bdf8544c78029f09b520af615c1d261bf827001d", size = 14460260, upload-time = "2025-12-18T19:29:27.808Z" }, + { url = "https://files.pythonhosted.org/packages/b3/19/9e050c0dca8aba824d67cc0db69fb459c28d8cd3f6855b1405b3f29cc91d/ruff-0.14.10-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:59aabd2e2c4fd614d2862e7939c34a532c04f1084476d6833dddef4afab87e9f", size = 14229978, upload-time = "2025-12-18T19:29:11.32Z" }, + { url = "https://files.pythonhosted.org/packages/51/eb/e8dd1dd6e05b9e695aa9dd420f4577debdd0f87a5ff2fedda33c09e9be8c/ruff-0.14.10-py3-none-manylinux_2_31_riscv64.whl", hash = "sha256:213db2b2e44be8625002dbea33bb9c60c66ea2c07c084a00d55732689d697a7f", size = 14338036, upload-time = "2025-12-18T19:29:09.184Z" }, + { url = "https://files.pythonhosted.org/packages/6a/12/f3e3a505db7c19303b70af370d137795fcfec136d670d5de5391e295c134/ruff-0.14.10-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:b914c40ab64865a17a9a5b67911d14df72346a634527240039eb3bd650e5979d", size = 13264051, upload-time = "2025-12-18T19:29:13.431Z" }, + { url = "https://files.pythonhosted.org/packages/08/64/8c3a47eaccfef8ac20e0484e68e0772013eb85802f8a9f7603ca751eb166/ruff-0.14.10-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:1484983559f026788e3a5c07c81ef7d1e97c1c78ed03041a18f75df104c45405", size = 13283998, upload-time = "2025-12-18T19:29:06.994Z" }, + { url = "https://files.pythonhosted.org/packages/12/84/534a5506f4074e5cc0529e5cd96cfc01bb480e460c7edf5af70d2bcae55e/ruff-0.14.10-py3-none-musllinux_1_2_i686.whl", hash = "sha256:c70427132db492d25f982fffc8d6c7535cc2fd2c83fc8888f05caaa248521e60", size = 13601891, upload-time = "2025-12-18T19:28:55.811Z" }, + { url = "https://files.pythonhosted.org/packages/0d/1e/14c916087d8598917dbad9b2921d340f7884824ad6e9c55de948a93b106d/ruff-0.14.10-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:5bcf45b681e9f1ee6445d317ce1fa9d6cba9a6049542d1c3d5b5958986be8830", size = 14336660, upload-time = "2025-12-18T19:29:16.531Z" }, + { url = "https://files.pythonhosted.org/packages/f2/1c/d7b67ab43f30013b47c12b42d1acd354c195351a3f7a1d67f59e54227ede/ruff-0.14.10-py3-none-win32.whl", hash = "sha256:104c49fc7ab73f3f3a758039adea978869a918f31b73280db175b43a2d9b51d6", size = 13196187, upload-time = "2025-12-18T19:29:19.006Z" }, + { url = "https://files.pythonhosted.org/packages/fb/9c/896c862e13886fae2af961bef3e6312db9ebc6adc2b156fe95e615dee8c1/ruff-0.14.10-py3-none-win_amd64.whl", hash = "sha256:466297bd73638c6bdf06485683e812db1c00c7ac96d4ddd0294a338c62fdc154", size = 14661283, upload-time = "2025-12-18T19:29:30.16Z" }, + { url = "https://files.pythonhosted.org/packages/74/31/b0e29d572670dca3674eeee78e418f20bdf97fa8aa9ea71380885e175ca0/ruff-0.14.10-py3-none-win_arm64.whl", hash = "sha256:e51d046cf6dda98a4633b8a8a771451107413b0f07183b2bef03f075599e44e6", size = 13729839, upload-time = "2025-12-18T19:28:48.636Z" }, +] + +[[package]] +name = "setuptools" +version = "80.9.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/18/5d/3bf57dcd21979b887f014ea83c24ae194cfcd12b9e0fda66b957c69d1fca/setuptools-80.9.0.tar.gz", hash = "sha256:f36b47402ecde768dbfafc46e8e4207b4360c654f1f3bb84475f0a28628fb19c", size = 1319958, upload-time = "2025-05-27T00:56:51.443Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/a3/dc/17031897dae0efacfea57dfd3a82fdd2a2aeb58e0ff71b77b87e44edc772/setuptools-80.9.0-py3-none-any.whl", hash = "sha256:062d34222ad13e0cc312a4c02d73f059e86a4acbfbdea8f8f76b28c99f306922", size = 1201486, upload-time = "2025-05-27T00:56:49.664Z" }, +] + +[[package]] +name = "six" +version = "1.17.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/94/e7/b2c673351809dca68a0e064b6af791aa332cf192da575fd474ed7d6f16a2/six-1.17.0.tar.gz", hash = "sha256:ff70335d468e7eb6ec65b95b99d3a2836546063f63acc5171de367e834932a81", size = 34031, upload-time = "2024-12-04T17:35:28.174Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/b7/ce/149a00dd41f10bc29e5921b496af8b574d8413afcd5e30dfa0ed46c2cc5e/six-1.17.0-py2.py3-none-any.whl", hash = "sha256:4721f391ed90541fddacab5acf947aa0d3dc7d27b2e1e8eda2be8970586c3274", size = 11050, upload-time = "2024-12-04T17:35:26.475Z" }, +] + +[[package]] +name = "smmap" +version = "5.0.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/44/cd/a040c4b3119bbe532e5b0732286f805445375489fceaec1f48306068ee3b/smmap-5.0.2.tar.gz", hash = "sha256:26ea65a03958fa0c8a1c7e8c7a58fdc77221b8910f6be2131affade476898ad5", size = 22329, upload-time = "2025-01-02T07:14:40.909Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/04/be/d09147ad1ec7934636ad912901c5fd7667e1c858e19d355237db0d0cd5e4/smmap-5.0.2-py3-none-any.whl", hash = "sha256:b30115f0def7d7531d22a0fb6502488d879e75b260a9db4d0819cfb25403af5e", size = 24303, upload-time = "2025-01-02T07:14:38.724Z" }, +] + +[[package]] +name = "soupsieve" +version = "2.8.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/89/23/adf3796d740536d63a6fbda113d07e60c734b6ed5d3058d1e47fc0495e47/soupsieve-2.8.1.tar.gz", hash = "sha256:4cf733bc50fa805f5df4b8ef4740fc0e0fa6218cf3006269afd3f9d6d80fd350", size = 117856, upload-time = "2025-12-18T13:50:34.655Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/48/f3/b67d6ea49ca9154453b6d70b34ea22f3996b9fa55da105a79d8732227adc/soupsieve-2.8.1-py3-none-any.whl", hash = "sha256:a11fe2a6f3d76ab3cf2de04eb339c1be5b506a8a47f2ceb6d139803177f85434", size = 36710, upload-time = "2025-12-18T13:50:33.267Z" }, +] + +[[package]] +name = "strenum" +version = "0.4.15" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/85/ad/430fb60d90e1d112a62ff57bdd1f286ec73a2a0331272febfddd21f330e1/StrEnum-0.4.15.tar.gz", hash = "sha256:878fb5ab705442070e4dd1929bb5e2249511c0bcf2b0eeacf3bcd80875c82eff", size = 23384, upload-time = "2023-06-29T22:02:58.399Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/81/69/297302c5f5f59c862faa31e6cb9a4cd74721cd1e052b38e464c5b402df8b/StrEnum-0.4.15-py3-none-any.whl", hash = "sha256:a30cda4af7cc6b5bf52c8055bc4bf4b2b6b14a93b574626da33df53cf7740659", size = 8851, upload-time = "2023-06-29T22:02:56.947Z" }, +] + +[[package]] +name = "tach" +version = "0.32.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "gitpython" }, + { name = "networkx" }, + { name = "prompt-toolkit" }, + { name = "pydot" }, + { name = "pyyaml" }, + { name = "rich" }, + { name = "tomli" }, + { name = "tomli-w" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/ee/5a/732029a738278d960e5e33c9b76e0431243ac1024f339a19c55668088b92/tach-0.32.2.tar.gz", hash = "sha256:51fff9132e33f7c7d7e59c4274be6ad5d546f8123150f6bf1ec15dcddcbd795a", size = 775967, upload-time = "2025-12-07T03:39:29.033Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/67/d9/1ffebc46538fb3fe55293cb2de2894efa2f267e74d1704fe84c81d968273/tach-0.32.2-cp37-abi3-macosx_10_12_x86_64.whl", hash = "sha256:f50be337a3fff4595dc0f40e7d1578f91a5ac9e8cca5d3dd7fc987566a3e7305", size = 4092084, upload-time = "2025-12-07T03:39:22.411Z" }, + { url = "https://files.pythonhosted.org/packages/5a/76/19a58ad1dfff0f6960f3f60ec4f15d329711d61c366b9cd43dfeb747279b/tach-0.32.2-cp37-abi3-macosx_11_0_arm64.whl", hash = "sha256:0bcc8c7e9d74be4f0e74df6bc5d2684856f7e0e9ecc04615c5e7e5b81dc6ef5a", size = 3982942, upload-time = "2025-12-07T03:39:20.795Z" }, + { url = "https://files.pythonhosted.org/packages/48/d6/74315f5b810204e5f405a59de195148b28fd2d21f03955a437ed3fd906af/tach-0.32.2-cp37-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e6c2b0dcb415741c028f041e98e3c640229059664238a2f9cb56269497524068", size = 4314151, upload-time = "2025-12-07T03:39:14.259Z" }, + { url = "https://files.pythonhosted.org/packages/cc/42/8c02e28f47b8027e56677a59c5a64eed035a6811670ee9e5fc06eeda124f/tach-0.32.2-cp37-abi3-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:d05187e9dd3770d75435e6b6aa1d7e1dac03d4b2b601683b410922a11388d840", size = 4249524, upload-time = "2025-12-07T03:39:17.88Z" }, + { url = "https://files.pythonhosted.org/packages/82/6b/a838ec61ec2f4d512b2583c2c67a54c9fba8fe9af4ac9ff214dba61a986a/tach-0.32.2-cp37-abi3-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:21e8d219e228a840036cec5c3e115aa81a75a6f55c2e603c27d8f9208e6a69be", size = 4644336, upload-time = "2025-12-07T03:39:32.866Z" }, + { url = "https://files.pythonhosted.org/packages/36/e2/a2f12e2bb6b76e3b8c15232aaa1dbd06f062e814f4368394c285e9445aca/tach-0.32.2-cp37-abi3-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:95014a154bcd340e421d52e438a238fa22d7d17caeddbe4654c083d92b6874b8", size = 5171182, upload-time = "2025-12-07T03:39:24.167Z" }, + { url = "https://files.pythonhosted.org/packages/8c/72/19bbb900a06c069fedc4a53e2dd4686e9d1d029dd398da646951930dbf41/tach-0.32.2-cp37-abi3-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:664722b9f8678e088998b847be1cd62f41272ade1c3cde11a02c9ef6afb41aaa", size = 4333057, upload-time = "2025-12-07T03:39:27.493Z" }, + { url = "https://files.pythonhosted.org/packages/b1/dc/02150ec3085627f3d1d14e1a40198defa13fa243ea8ab73164cc3e75847b/tach-0.32.2-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f93dc75f46ef70b755e24d434c7e4ef9cc0334cf9952d3909f474ab4c36d7f52", size = 4416054, upload-time = "2025-12-07T03:39:37.701Z" }, + { url = "https://files.pythonhosted.org/packages/b6/60/6ceef846a861dab34d25c9cc1c6378f876e74a2dfd1819fc923a1ea08cd9/tach-0.32.2-cp37-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:97a4b279c932a752bcf8d4d7d23b3035542973e5cb1ac4871a75eda81e51c4ec", size = 4474819, upload-time = "2025-12-07T03:39:16.374Z" }, + { url = "https://files.pythonhosted.org/packages/e0/95/53876b1cbfee1fd18acd1638e76f4823a78dcf9c8fb4e0b2970ad83a013a/tach-0.32.2-cp37-abi3-musllinux_1_2_armv7l.whl", hash = "sha256:db53ea81ae7a4088a6d13081711fadbbed42281abe783d53403aaf406ca19f24", size = 4513515, upload-time = "2025-12-07T03:39:19.494Z" }, + { url = "https://files.pythonhosted.org/packages/aa/79/8dd84143069bc915eeffb557ff3d85e6343ce1b237a287f65ddce9c07b01/tach-0.32.2-cp37-abi3-musllinux_1_2_i686.whl", hash = "sha256:617aa03136ede042d38a36037144270998acdf5666d81a687e409ccf95bc1f12", size = 4610007, upload-time = "2025-12-07T03:39:35.92Z" }, + { url = "https://files.pythonhosted.org/packages/57/e3/e364c58cb8d367b931d010e5ba63757b9dca340bcf32c9d87963aef05947/tach-0.32.2-cp37-abi3-musllinux_1_2_ppc64le.whl", hash = "sha256:879b17febf5eca7b3dbee7f98f0929af05391ba5a085bc80cc8a22158eb0d15e", size = 5387603, upload-time = "2025-12-07T03:39:25.837Z" }, + { url = "https://files.pythonhosted.org/packages/bd/f7/d827bc7800986f6b1955408233ccb31630b443e8cd7f53b1d6b74d0daff0/tach-0.32.2-cp37-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:d4b4894e2855245928ed20bb5044170d98e374df84570b2cf3131fb929f99a95", size = 4589805, upload-time = "2025-12-07T03:39:40.639Z" }, + { url = "https://files.pythonhosted.org/packages/ce/3b/397e266a7a0ded116834adb87d2f2591a7f8e3a74359ab05365feff9c1a9/tach-0.32.2-cp37-abi3-win32.whl", hash = "sha256:0cbf176580107ed6d2df79bafc8172165ac83855edc64edd948f102139bf2c93", size = 3434263, upload-time = "2025-12-07T03:39:31.572Z" }, + { url = "https://files.pythonhosted.org/packages/8d/be/43b479130bd96befde1bcd042dd8cd8b5dffbeef0044016f93868a484f18/tach-0.32.2-cp37-abi3-win_amd64.whl", hash = "sha256:b028e1aa7e81087b695afed7501cfab0289ba150165913926b6a1e6de8f0c7f3", size = 3754498, upload-time = "2025-12-07T03:39:30.184Z" }, +] + +[[package]] +name = "tomli" +version = "2.3.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/52/ed/3f73f72945444548f33eba9a87fc7a6e969915e7b1acc8260b30e1f76a2f/tomli-2.3.0.tar.gz", hash = "sha256:64be704a875d2a59753d80ee8a533c3fe183e3f06807ff7dc2232938ccb01549", size = 17392, upload-time = "2025-10-08T22:01:47.119Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ff/b7/40f36368fcabc518bb11c8f06379a0fd631985046c038aca08c6d6a43c6e/tomli-2.3.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:d7d86942e56ded512a594786a5ba0a5e521d02529b3826e7761a05138341a2ac", size = 154891, upload-time = "2025-10-08T22:01:09.082Z" }, + { url = "https://files.pythonhosted.org/packages/f9/3f/d9dd692199e3b3aab2e4e4dd948abd0f790d9ded8cd10cbaae276a898434/tomli-2.3.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:73ee0b47d4dad1c5e996e3cd33b8a76a50167ae5f96a2607cbe8cc773506ab22", size = 148796, upload-time = "2025-10-08T22:01:10.266Z" }, + { url = "https://files.pythonhosted.org/packages/60/83/59bff4996c2cf9f9387a0f5a3394629c7efa5ef16142076a23a90f1955fa/tomli-2.3.0-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:792262b94d5d0a466afb5bc63c7daa9d75520110971ee269152083270998316f", size = 242121, upload-time = "2025-10-08T22:01:11.332Z" }, + { url = "https://files.pythonhosted.org/packages/45/e5/7c5119ff39de8693d6baab6c0b6dcb556d192c165596e9fc231ea1052041/tomli-2.3.0-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:4f195fe57ecceac95a66a75ac24d9d5fbc98ef0962e09b2eddec5d39375aae52", size = 250070, upload-time = "2025-10-08T22:01:12.498Z" }, + { url = "https://files.pythonhosted.org/packages/45/12/ad5126d3a278f27e6701abde51d342aa78d06e27ce2bb596a01f7709a5a2/tomli-2.3.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:e31d432427dcbf4d86958c184b9bfd1e96b5b71f8eb17e6d02531f434fd335b8", size = 245859, upload-time = "2025-10-08T22:01:13.551Z" }, + { url = "https://files.pythonhosted.org/packages/fb/a1/4d6865da6a71c603cfe6ad0e6556c73c76548557a8d658f9e3b142df245f/tomli-2.3.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:7b0882799624980785240ab732537fcfc372601015c00f7fc367c55308c186f6", size = 250296, upload-time = "2025-10-08T22:01:14.614Z" }, + { url = "https://files.pythonhosted.org/packages/a0/b7/a7a7042715d55c9ba6e8b196d65d2cb662578b4d8cd17d882d45322b0d78/tomli-2.3.0-cp312-cp312-win32.whl", hash = "sha256:ff72b71b5d10d22ecb084d345fc26f42b5143c5533db5e2eaba7d2d335358876", size = 97124, upload-time = "2025-10-08T22:01:15.629Z" }, + { url = "https://files.pythonhosted.org/packages/06/1e/f22f100db15a68b520664eb3328fb0ae4e90530887928558112c8d1f4515/tomli-2.3.0-cp312-cp312-win_amd64.whl", hash = "sha256:1cb4ed918939151a03f33d4242ccd0aa5f11b3547d0cf30f7c74a408a5b99878", size = 107698, upload-time = "2025-10-08T22:01:16.51Z" }, + { url = "https://files.pythonhosted.org/packages/89/48/06ee6eabe4fdd9ecd48bf488f4ac783844fd777f547b8d1b61c11939974e/tomli-2.3.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:5192f562738228945d7b13d4930baffda67b69425a7f0da96d360b0a3888136b", size = 154819, upload-time = "2025-10-08T22:01:17.964Z" }, + { url = "https://files.pythonhosted.org/packages/f1/01/88793757d54d8937015c75dcdfb673c65471945f6be98e6a0410fba167ed/tomli-2.3.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:be71c93a63d738597996be9528f4abe628d1adf5e6eb11607bc8fe1a510b5dae", size = 148766, upload-time = "2025-10-08T22:01:18.959Z" }, + { url = "https://files.pythonhosted.org/packages/42/17/5e2c956f0144b812e7e107f94f1cc54af734eb17b5191c0bbfb72de5e93e/tomli-2.3.0-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:c4665508bcbac83a31ff8ab08f424b665200c0e1e645d2bd9ab3d3e557b6185b", size = 240771, upload-time = "2025-10-08T22:01:20.106Z" }, + { url = "https://files.pythonhosted.org/packages/d5/f4/0fbd014909748706c01d16824eadb0307115f9562a15cbb012cd9b3512c5/tomli-2.3.0-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:4021923f97266babc6ccab9f5068642a0095faa0a51a246a6a02fccbb3514eaf", size = 248586, upload-time = "2025-10-08T22:01:21.164Z" }, + { url = "https://files.pythonhosted.org/packages/30/77/fed85e114bde5e81ecf9bc5da0cc69f2914b38f4708c80ae67d0c10180c5/tomli-2.3.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:a4ea38c40145a357d513bffad0ed869f13c1773716cf71ccaa83b0fa0cc4e42f", size = 244792, upload-time = "2025-10-08T22:01:22.417Z" }, + { url = "https://files.pythonhosted.org/packages/55/92/afed3d497f7c186dc71e6ee6d4fcb0acfa5f7d0a1a2878f8beae379ae0cc/tomli-2.3.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:ad805ea85eda330dbad64c7ea7a4556259665bdf9d2672f5dccc740eb9d3ca05", size = 248909, upload-time = "2025-10-08T22:01:23.859Z" }, + { url = "https://files.pythonhosted.org/packages/f8/84/ef50c51b5a9472e7265ce1ffc7f24cd4023d289e109f669bdb1553f6a7c2/tomli-2.3.0-cp313-cp313-win32.whl", hash = "sha256:97d5eec30149fd3294270e889b4234023f2c69747e555a27bd708828353ab606", size = 96946, upload-time = "2025-10-08T22:01:24.893Z" }, + { url = "https://files.pythonhosted.org/packages/b2/b7/718cd1da0884f281f95ccfa3a6cc572d30053cba64603f79d431d3c9b61b/tomli-2.3.0-cp313-cp313-win_amd64.whl", hash = "sha256:0c95ca56fbe89e065c6ead5b593ee64b84a26fca063b5d71a1122bf26e533999", size = 107705, upload-time = "2025-10-08T22:01:26.153Z" }, + { url = "https://files.pythonhosted.org/packages/19/94/aeafa14a52e16163008060506fcb6aa1949d13548d13752171a755c65611/tomli-2.3.0-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:cebc6fe843e0733ee827a282aca4999b596241195f43b4cc371d64fc6639da9e", size = 154244, upload-time = "2025-10-08T22:01:27.06Z" }, + { url = "https://files.pythonhosted.org/packages/db/e4/1e58409aa78eefa47ccd19779fc6f36787edbe7d4cd330eeeedb33a4515b/tomli-2.3.0-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:4c2ef0244c75aba9355561272009d934953817c49f47d768070c3c94355c2aa3", size = 148637, upload-time = "2025-10-08T22:01:28.059Z" }, + { url = "https://files.pythonhosted.org/packages/26/b6/d1eccb62f665e44359226811064596dd6a366ea1f985839c566cd61525ae/tomli-2.3.0-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:c22a8bf253bacc0cf11f35ad9808b6cb75ada2631c2d97c971122583b129afbc", size = 241925, upload-time = "2025-10-08T22:01:29.066Z" }, + { url = "https://files.pythonhosted.org/packages/70/91/7cdab9a03e6d3d2bb11beae108da5bdc1c34bdeb06e21163482544ddcc90/tomli-2.3.0-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:0eea8cc5c5e9f89c9b90c4896a8deefc74f518db5927d0e0e8d4a80953d774d0", size = 249045, upload-time = "2025-10-08T22:01:31.98Z" }, + { url = "https://files.pythonhosted.org/packages/15/1b/8c26874ed1f6e4f1fcfeb868db8a794cbe9f227299402db58cfcc858766c/tomli-2.3.0-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:b74a0e59ec5d15127acdabd75ea17726ac4c5178ae51b85bfe39c4f8a278e879", size = 245835, upload-time = "2025-10-08T22:01:32.989Z" }, + { url = "https://files.pythonhosted.org/packages/fd/42/8e3c6a9a4b1a1360c1a2a39f0b972cef2cc9ebd56025168c4137192a9321/tomli-2.3.0-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:b5870b50c9db823c595983571d1296a6ff3e1b88f734a4c8f6fc6188397de005", size = 253109, upload-time = "2025-10-08T22:01:34.052Z" }, + { url = "https://files.pythonhosted.org/packages/22/0c/b4da635000a71b5f80130937eeac12e686eefb376b8dee113b4a582bba42/tomli-2.3.0-cp314-cp314-win32.whl", hash = "sha256:feb0dacc61170ed7ab602d3d972a58f14ee3ee60494292d384649a3dc38ef463", size = 97930, upload-time = "2025-10-08T22:01:35.082Z" }, + { url = "https://files.pythonhosted.org/packages/b9/74/cb1abc870a418ae99cd5c9547d6bce30701a954e0e721821df483ef7223c/tomli-2.3.0-cp314-cp314-win_amd64.whl", hash = "sha256:b273fcbd7fc64dc3600c098e39136522650c49bca95df2d11cf3b626422392c8", size = 107964, upload-time = "2025-10-08T22:01:36.057Z" }, + { url = "https://files.pythonhosted.org/packages/54/78/5c46fff6432a712af9f792944f4fcd7067d8823157949f4e40c56b8b3c83/tomli-2.3.0-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:940d56ee0410fa17ee1f12b817b37a4d4e4dc4d27340863cc67236c74f582e77", size = 163065, upload-time = "2025-10-08T22:01:37.27Z" }, + { url = "https://files.pythonhosted.org/packages/39/67/f85d9bd23182f45eca8939cd2bc7050e1f90c41f4a2ecbbd5963a1d1c486/tomli-2.3.0-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:f85209946d1fe94416debbb88d00eb92ce9cd5266775424ff81bc959e001acaf", size = 159088, upload-time = "2025-10-08T22:01:38.235Z" }, + { url = "https://files.pythonhosted.org/packages/26/5a/4b546a0405b9cc0659b399f12b6adb750757baf04250b148d3c5059fc4eb/tomli-2.3.0-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:a56212bdcce682e56b0aaf79e869ba5d15a6163f88d5451cbde388d48b13f530", size = 268193, upload-time = "2025-10-08T22:01:39.712Z" }, + { url = "https://files.pythonhosted.org/packages/42/4f/2c12a72ae22cf7b59a7fe75b3465b7aba40ea9145d026ba41cb382075b0e/tomli-2.3.0-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:c5f3ffd1e098dfc032d4d3af5c0ac64f6d286d98bc148698356847b80fa4de1b", size = 275488, upload-time = "2025-10-08T22:01:40.773Z" }, + { url = "https://files.pythonhosted.org/packages/92/04/a038d65dbe160c3aa5a624e93ad98111090f6804027d474ba9c37c8ae186/tomli-2.3.0-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:5e01decd096b1530d97d5d85cb4dff4af2d8347bd35686654a004f8dea20fc67", size = 272669, upload-time = "2025-10-08T22:01:41.824Z" }, + { url = "https://files.pythonhosted.org/packages/be/2f/8b7c60a9d1612a7cbc39ffcca4f21a73bf368a80fc25bccf8253e2563267/tomli-2.3.0-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:8a35dd0e643bb2610f156cca8db95d213a90015c11fee76c946aa62b7ae7e02f", size = 279709, upload-time = "2025-10-08T22:01:43.177Z" }, + { url = "https://files.pythonhosted.org/packages/7e/46/cc36c679f09f27ded940281c38607716c86cf8ba4a518d524e349c8b4874/tomli-2.3.0-cp314-cp314t-win32.whl", hash = "sha256:a1f7f282fe248311650081faafa5f4732bdbfef5d45fe3f2e702fbc6f2d496e0", size = 107563, upload-time = "2025-10-08T22:01:44.233Z" }, + { url = "https://files.pythonhosted.org/packages/84/ff/426ca8683cf7b753614480484f6437f568fd2fda2edbdf57a2d3d8b27a0b/tomli-2.3.0-cp314-cp314t-win_amd64.whl", hash = "sha256:70a251f8d4ba2d9ac2542eecf008b3c8a9fc5c3f9f02c56a9d7952612be2fdba", size = 119756, upload-time = "2025-10-08T22:01:45.234Z" }, + { url = "https://files.pythonhosted.org/packages/77/b8/0135fadc89e73be292b473cb820b4f5a08197779206b33191e801feeae40/tomli-2.3.0-py3-none-any.whl", hash = "sha256:e95b1af3c5b07d9e643909b5abbec77cd9f1217e6d0bca72b0234736b9fb1f1b", size = 14408, upload-time = "2025-10-08T22:01:46.04Z" }, +] + +[[package]] +name = "tomli-w" +version = "1.2.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/19/75/241269d1da26b624c0d5e110e8149093c759b7a286138f4efd61a60e75fe/tomli_w-1.2.0.tar.gz", hash = "sha256:2dd14fac5a47c27be9cd4c976af5a12d87fb1f0b4512f81d69cce3b35ae25021", size = 7184, upload-time = "2025-01-15T12:07:24.262Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/c7/18/c86eb8e0202e32dd3df50d43d7ff9854f8e0603945ff398974c1d91ac1ef/tomli_w-1.2.0-py3-none-any.whl", hash = "sha256:188306098d013b691fcadc011abd66727d3c414c571bb01b1a174ba8c983cf90", size = 6675, upload-time = "2025-01-15T12:07:22.074Z" }, +] + +[[package]] +name = "tox" +version = "4.23.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "cachetools" }, + { name = "chardet" }, + { name = "colorama" }, + { name = "filelock" }, + { name = "packaging" }, + { name = "platformdirs" }, + { name = "pluggy" }, + { name = "pyproject-api" }, + { name = "virtualenv" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/1f/86/32b10f91b4b975a37ac402b0f9fa016775088e0565c93602ba0b3c729ce8/tox-4.23.2.tar.gz", hash = "sha256:86075e00e555df6e82e74cfc333917f91ecb47ffbc868dcafbd2672e332f4a2c", size = 189998, upload-time = "2024-10-22T14:29:04.46Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/af/c0/124b73d01c120e917383bc6c53ebc34efdf7243faa9fca64d105c94cf2ab/tox-4.23.2-py3-none-any.whl", hash = "sha256:452bc32bb031f2282881a2118923176445bac783ab97c874b8770ab4c3b76c38", size = 166758, upload-time = "2024-10-22T14:29:02.087Z" }, +] + +[[package]] +name = "typeguard" +version = "4.4.4" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/c7/68/71c1a15b5f65f40e91b65da23b8224dad41349894535a97f63a52e462196/typeguard-4.4.4.tar.gz", hash = "sha256:3a7fd2dffb705d4d0efaed4306a704c89b9dee850b688f060a8b1615a79e5f74", size = 75203, upload-time = "2025-06-18T09:56:07.624Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/1b/a9/e3aee762739c1d7528da1c3e06d518503f8b6c439c35549b53735ba52ead/typeguard-4.4.4-py3-none-any.whl", hash = "sha256:b5f562281b6bfa1f5492470464730ef001646128b180769880468bd84b68b09e", size = 34874, upload-time = "2025-06-18T09:56:05.999Z" }, +] + +[[package]] +name = "types-aiofiles" +version = "25.1.0.20251011" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/84/6c/6d23908a8217e36704aa9c79d99a620f2fdd388b66a4b7f72fbc6b6ff6c6/types_aiofiles-25.1.0.20251011.tar.gz", hash = "sha256:1c2b8ab260cb3cd40c15f9d10efdc05a6e1e6b02899304d80dfa0410e028d3ff", size = 14535, upload-time = "2025-10-11T02:44:51.237Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/71/0f/76917bab27e270bb6c32addd5968d69e558e5b6f7fb4ac4cbfa282996a96/types_aiofiles-25.1.0.20251011-py3-none-any.whl", hash = "sha256:8ff8de7f9d42739d8f0dadcceeb781ce27cd8d8c4152d4a7c52f6b20edb8149c", size = 14338, upload-time = "2025-10-11T02:44:50.054Z" }, +] + +[[package]] +name = "types-networkx" +version = "3.6.1.20251220" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "numpy" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/07/e3/dcc20d645dc0631b0df263959b8dde49dc47ad3c0537d8958bfefe692380/types_networkx-3.6.1.20251220.tar.gz", hash = "sha256:caf95e0d7777b969e50ceeb2c430d9d4dfe6b7bdee43c42dc9879a2d4408a790", size = 73500, upload-time = "2025-12-20T03:07:47.933Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/65/e7/fe40cfe7ba384d1f46fee835eb7727a4ee2fd80021a69add9553197b69a1/types_networkx-3.6.1.20251220-py3-none-any.whl", hash = "sha256:417ccbe7841f335a4c2b8e7515c3bc97a00fb5f686f399a763ef64392b209eac", size = 162715, upload-time = "2025-12-20T03:07:46.882Z" }, +] + +[[package]] +name = "types-pillow" +version = "10.2.0.20240822" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/18/4a/4495264dddaa600d65d68bcedb64dcccf9d9da61adff51f7d2ffd8e4c9ce/types-Pillow-10.2.0.20240822.tar.gz", hash = "sha256:559fb52a2ef991c326e4a0d20accb3bb63a7ba8d40eb493e0ecb0310ba52f0d3", size = 35389, upload-time = "2024-08-22T02:32:48.15Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/66/23/e81a5354859831fcf54d488d33b80ba6133ea84f874a9c0ec40a4881e133/types_Pillow-10.2.0.20240822-py3-none-any.whl", hash = "sha256:d9dab025aba07aeb12fd50a6799d4eac52a9603488eca09d7662543983f16c5d", size = 54354, upload-time = "2024-08-22T02:32:46.664Z" }, +] + +[[package]] +name = "types-pytz" +version = "2025.2.0.20251108" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/40/ff/c047ddc68c803b46470a357454ef76f4acd8c1088f5cc4891cdd909bfcf6/types_pytz-2025.2.0.20251108.tar.gz", hash = "sha256:fca87917836ae843f07129567b74c1929f1870610681b4c92cb86a3df5817bdb", size = 10961, upload-time = "2025-11-08T02:55:57.001Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/e7/c1/56ef16bf5dcd255155cc736d276efa6ae0a5c26fd685e28f0412a4013c01/types_pytz-2025.2.0.20251108-py3-none-any.whl", hash = "sha256:0f1c9792cab4eb0e46c52f8845c8f77cf1e313cb3d68bf826aa867fe4717d91c", size = 10116, upload-time = "2025-11-08T02:55:56.194Z" }, +] + +[[package]] +name = "types-pyyaml" +version = "6.0.12.20250915" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/7e/69/3c51b36d04da19b92f9e815be12753125bd8bc247ba0470a982e6979e71c/types_pyyaml-6.0.12.20250915.tar.gz", hash = "sha256:0f8b54a528c303f0e6f7165687dd33fafa81c807fcac23f632b63aa624ced1d3", size = 17522, upload-time = "2025-09-15T03:01:00.728Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/bd/e0/1eed384f02555dde685fff1a1ac805c1c7dcb6dd019c916fe659b1c1f9ec/types_pyyaml-6.0.12.20250915-py3-none-any.whl", hash = "sha256:e7d4d9e064e89a3b3cae120b4990cd370874d2bf12fa5f46c97018dd5d3c9ab6", size = 20338, upload-time = "2025-09-15T03:00:59.218Z" }, +] + +[[package]] +name = "types-requests" +version = "2.32.4.20250913" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "urllib3" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/36/27/489922f4505975b11de2b5ad07b4fe1dca0bca9be81a703f26c5f3acfce5/types_requests-2.32.4.20250913.tar.gz", hash = "sha256:abd6d4f9ce3a9383f269775a9835a4c24e5cd6b9f647d64f88aa4613c33def5d", size = 23113, upload-time = "2025-09-13T02:40:02.309Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/2a/20/9a227ea57c1285986c4cf78400d0a91615d25b24e257fd9e2969606bdfae/types_requests-2.32.4.20250913-py3-none-any.whl", hash = "sha256:78c9c1fffebbe0fa487a418e0fa5252017e9c60d1a2da394077f1780f655d7e1", size = 20658, upload-time = "2025-09-13T02:40:01.115Z" }, +] + +[[package]] +name = "types-setuptools" +version = "80.9.0.20251223" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/00/07/d1b605230730990de20477150191d6dccf6aecc037da94c9960a5d563bc8/types_setuptools-80.9.0.20251223.tar.gz", hash = "sha256:d3411059ae2f5f03985217d86ac6084efea2c9e9cacd5f0869ef950f308169b2", size = 42420, upload-time = "2025-12-23T03:18:26.752Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/78/5c/b8877da94012dbc6643e4eeca22bca9b99b295be05d161f8a403ae9387c0/types_setuptools-80.9.0.20251223-py3-none-any.whl", hash = "sha256:1b36db79d724c2287d83dc052cf887b47c0da6a2fff044378be0b019545f56e6", size = 64318, upload-time = "2025-12-23T03:18:25.868Z" }, +] + +[[package]] +name = "types-urllib3" +version = "1.26.25.14" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/73/de/b9d7a68ad39092368fb21dd6194b362b98a1daeea5dcfef5e1adb5031c7e/types-urllib3-1.26.25.14.tar.gz", hash = "sha256:229b7f577c951b8c1b92c1bc2b2fdb0b49847bd2af6d1cc2a2e3dd340f3bda8f", size = 11239, upload-time = "2023-07-20T15:19:31.307Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/11/7b/3fc711b2efea5e85a7a0bbfe269ea944aa767bbba5ec52f9ee45d362ccf3/types_urllib3-1.26.25.14-py3-none-any.whl", hash = "sha256:9683bbb7fb72e32bfe9d2be6e04875fbe1b3eeec3cbb4ea231435aa7fd6b4f0e", size = 15377, upload-time = "2023-07-20T15:19:30.379Z" }, +] + +[[package]] +name = "typing-extensions" +version = "4.15.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/72/94/1a15dd82efb362ac84269196e94cf00f187f7ed21c242792a923cdb1c61f/typing_extensions-4.15.0.tar.gz", hash = "sha256:0cea48d173cc12fa28ecabc3b837ea3cf6f38c6d1136f85cbaaf598984861466", size = 109391, upload-time = "2025-08-25T13:49:26.313Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/18/67/36e9267722cc04a6b9f15c7f3441c2363321a3ea07da7ae0c0707beb2a9c/typing_extensions-4.15.0-py3-none-any.whl", hash = "sha256:f0fa19c6845758ab08074a0cfa8b7aecb71c999ca73d62883bc25cc018c4e548", size = 44614, upload-time = "2025-08-25T13:49:24.86Z" }, +] + +[[package]] +name = "typing-inspection" +version = "0.4.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/55/e3/70399cb7dd41c10ac53367ae42139cf4b1ca5f36bb3dc6c9d33acdb43655/typing_inspection-0.4.2.tar.gz", hash = "sha256:ba561c48a67c5958007083d386c3295464928b01faa735ab8547c5692e87f464", size = 75949, upload-time = "2025-10-01T02:14:41.687Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/dc/9b/47798a6c91d8bdb567fe2698fe81e0c6b7cb7ef4d13da4114b41d239f65d/typing_inspection-0.4.2-py3-none-any.whl", hash = "sha256:4ed1cacbdc298c220f1bd249ed5287caa16f34d44ef4e9c3d0cbad5b521545e7", size = 14611, upload-time = "2025-10-01T02:14:40.154Z" }, +] + +[[package]] +name = "ujson" +version = "5.11.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/43/d9/3f17e3c5773fb4941c68d9a37a47b1a79c9649d6c56aefbed87cc409d18a/ujson-5.11.0.tar.gz", hash = "sha256:e204ae6f909f099ba6b6b942131cee359ddda2b6e4ea39c12eb8b991fe2010e0", size = 7156583, upload-time = "2025-08-20T11:57:02.452Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/b9/ef/a9cb1fce38f699123ff012161599fb9f2ff3f8d482b4b18c43a2dc35073f/ujson-5.11.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:7895f0d2d53bd6aea11743bd56e3cb82d729980636cd0ed9b89418bf66591702", size = 55434, upload-time = "2025-08-20T11:55:34.987Z" }, + { url = "https://files.pythonhosted.org/packages/b1/05/dba51a00eb30bd947791b173766cbed3492269c150a7771d2750000c965f/ujson-5.11.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:12b5e7e22a1fe01058000d1b317d3b65cc3daf61bd2ea7a2b76721fe160fa74d", size = 53190, upload-time = "2025-08-20T11:55:36.384Z" }, + { url = "https://files.pythonhosted.org/packages/03/3c/fd11a224f73fbffa299fb9644e425f38b38b30231f7923a088dd513aabb4/ujson-5.11.0-cp312-cp312-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:0180a480a7d099082501cad1fe85252e4d4bf926b40960fb3d9e87a3a6fbbc80", size = 57600, upload-time = "2025-08-20T11:55:37.692Z" }, + { url = "https://files.pythonhosted.org/packages/55/b9/405103cae24899df688a3431c776e00528bd4799e7d68820e7ebcf824f92/ujson-5.11.0-cp312-cp312-manylinux_2_24_i686.manylinux_2_28_i686.whl", hash = "sha256:fa79fdb47701942c2132a9dd2297a1a85941d966d8c87bfd9e29b0cf423f26cc", size = 59791, upload-time = "2025-08-20T11:55:38.877Z" }, + { url = "https://files.pythonhosted.org/packages/17/7b/2dcbc2bbfdbf68f2368fb21ab0f6735e872290bb604c75f6e06b81edcb3f/ujson-5.11.0-cp312-cp312-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:8254e858437c00f17cb72e7a644fc42dad0ebb21ea981b71df6e84b1072aaa7c", size = 57356, upload-time = "2025-08-20T11:55:40.036Z" }, + { url = "https://files.pythonhosted.org/packages/d1/71/fea2ca18986a366c750767b694430d5ded6b20b6985fddca72f74af38a4c/ujson-5.11.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:1aa8a2ab482f09f6c10fba37112af5f957689a79ea598399c85009f2f29898b5", size = 1036313, upload-time = "2025-08-20T11:55:41.408Z" }, + { url = "https://files.pythonhosted.org/packages/a3/bb/d4220bd7532eac6288d8115db51710fa2d7d271250797b0bfba9f1e755af/ujson-5.11.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:a638425d3c6eed0318df663df44480f4a40dc87cc7c6da44d221418312f6413b", size = 1195782, upload-time = "2025-08-20T11:55:43.357Z" }, + { url = "https://files.pythonhosted.org/packages/80/47/226e540aa38878ce1194454385701d82df538ccb5ff8db2cf1641dde849a/ujson-5.11.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:7e3cff632c1d78023b15f7e3a81c3745cd3f94c044d1e8fa8efbd6b161997bbc", size = 1088817, upload-time = "2025-08-20T11:55:45.262Z" }, + { url = "https://files.pythonhosted.org/packages/7e/81/546042f0b23c9040d61d46ea5ca76f0cc5e0d399180ddfb2ae976ebff5b5/ujson-5.11.0-cp312-cp312-win32.whl", hash = "sha256:be6b0eaf92cae8cdee4d4c9e074bde43ef1c590ed5ba037ea26c9632fb479c88", size = 39757, upload-time = "2025-08-20T11:55:46.522Z" }, + { url = "https://files.pythonhosted.org/packages/44/1b/27c05dc8c9728f44875d74b5bfa948ce91f6c33349232619279f35c6e817/ujson-5.11.0-cp312-cp312-win_amd64.whl", hash = "sha256:b7b136cc6abc7619124fd897ef75f8e63105298b5ca9bdf43ebd0e1fa0ee105f", size = 43859, upload-time = "2025-08-20T11:55:47.987Z" }, + { url = "https://files.pythonhosted.org/packages/22/2d/37b6557c97c3409c202c838aa9c960ca3896843b4295c4b7bb2bbd260664/ujson-5.11.0-cp312-cp312-win_arm64.whl", hash = "sha256:6cd2df62f24c506a0ba322d5e4fe4466d47a9467b57e881ee15a31f7ecf68ff6", size = 38361, upload-time = "2025-08-20T11:55:49.122Z" }, + { url = "https://files.pythonhosted.org/packages/1c/ec/2de9dd371d52c377abc05d2b725645326c4562fc87296a8907c7bcdf2db7/ujson-5.11.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:109f59885041b14ee9569bf0bb3f98579c3fa0652317b355669939e5fc5ede53", size = 55435, upload-time = "2025-08-20T11:55:50.243Z" }, + { url = "https://files.pythonhosted.org/packages/5b/a4/f611f816eac3a581d8a4372f6967c3ed41eddbae4008d1d77f223f1a4e0a/ujson-5.11.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:a31c6b8004438e8c20fc55ac1c0e07dad42941db24176fe9acf2815971f8e752", size = 53193, upload-time = "2025-08-20T11:55:51.373Z" }, + { url = "https://files.pythonhosted.org/packages/e9/c5/c161940967184de96f5cbbbcce45b562a4bf851d60f4c677704b1770136d/ujson-5.11.0-cp313-cp313-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:78c684fb21255b9b90320ba7e199780f653e03f6c2528663768965f4126a5b50", size = 57603, upload-time = "2025-08-20T11:55:52.583Z" }, + { url = "https://files.pythonhosted.org/packages/2b/d6/c7b2444238f5b2e2d0e3dab300b9ddc3606e4b1f0e4bed5a48157cebc792/ujson-5.11.0-cp313-cp313-manylinux_2_24_i686.manylinux_2_28_i686.whl", hash = "sha256:4c9f5d6a27d035dd90a146f7761c2272cf7103de5127c9ab9c4cd39ea61e878a", size = 59794, upload-time = "2025-08-20T11:55:53.69Z" }, + { url = "https://files.pythonhosted.org/packages/fe/a3/292551f936d3d02d9af148f53e1bc04306b00a7cf1fcbb86fa0d1c887242/ujson-5.11.0-cp313-cp313-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:837da4d27fed5fdc1b630bd18f519744b23a0b5ada1bbde1a36ba463f2900c03", size = 57363, upload-time = "2025-08-20T11:55:54.843Z" }, + { url = "https://files.pythonhosted.org/packages/90/a6/82cfa70448831b1a9e73f882225980b5c689bf539ec6400b31656a60ea46/ujson-5.11.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:787aff4a84da301b7f3bac09bc696e2e5670df829c6f8ecf39916b4e7e24e701", size = 1036311, upload-time = "2025-08-20T11:55:56.197Z" }, + { url = "https://files.pythonhosted.org/packages/84/5c/96e2266be50f21e9b27acaee8ca8f23ea0b85cb998c33d4f53147687839b/ujson-5.11.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:6dd703c3e86dc6f7044c5ac0b3ae079ed96bf297974598116aa5fb7f655c3a60", size = 1195783, upload-time = "2025-08-20T11:55:58.081Z" }, + { url = "https://files.pythonhosted.org/packages/8d/20/78abe3d808cf3bb3e76f71fca46cd208317bf461c905d79f0d26b9df20f1/ujson-5.11.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:3772e4fe6b0c1e025ba3c50841a0ca4786825a4894c8411bf8d3afe3a8061328", size = 1088822, upload-time = "2025-08-20T11:55:59.469Z" }, + { url = "https://files.pythonhosted.org/packages/d8/50/8856e24bec5e2fc7f775d867aeb7a3f137359356200ac44658f1f2c834b2/ujson-5.11.0-cp313-cp313-win32.whl", hash = "sha256:8fa2af7c1459204b7a42e98263b069bd535ea0cd978b4d6982f35af5a04a4241", size = 39753, upload-time = "2025-08-20T11:56:01.345Z" }, + { url = "https://files.pythonhosted.org/packages/5b/d8/1baee0f4179a4d0f5ce086832147b6cc9b7731c24ca08e14a3fdb8d39c32/ujson-5.11.0-cp313-cp313-win_amd64.whl", hash = "sha256:34032aeca4510a7c7102bd5933f59a37f63891f30a0706fb46487ab6f0edf8f0", size = 43866, upload-time = "2025-08-20T11:56:02.552Z" }, + { url = "https://files.pythonhosted.org/packages/a9/8c/6d85ef5be82c6d66adced3ec5ef23353ed710a11f70b0b6a836878396334/ujson-5.11.0-cp313-cp313-win_arm64.whl", hash = "sha256:ce076f2df2e1aa62b685086fbad67f2b1d3048369664b4cdccc50707325401f9", size = 38363, upload-time = "2025-08-20T11:56:03.688Z" }, + { url = "https://files.pythonhosted.org/packages/28/08/4518146f4984d112764b1dfa6fb7bad691c44a401adadaa5e23ccd930053/ujson-5.11.0-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:65724738c73645db88f70ba1f2e6fb678f913281804d5da2fd02c8c5839af302", size = 55462, upload-time = "2025-08-20T11:56:04.873Z" }, + { url = "https://files.pythonhosted.org/packages/29/37/2107b9a62168867a692654d8766b81bd2fd1e1ba13e2ec90555861e02b0c/ujson-5.11.0-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:29113c003ca33ab71b1b480bde952fbab2a0b6b03a4ee4c3d71687cdcbd1a29d", size = 53246, upload-time = "2025-08-20T11:56:06.054Z" }, + { url = "https://files.pythonhosted.org/packages/9b/f8/25583c70f83788edbe3ca62ce6c1b79eff465d78dec5eb2b2b56b3e98b33/ujson-5.11.0-cp314-cp314-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:c44c703842024d796b4c78542a6fcd5c3cb948b9fc2a73ee65b9c86a22ee3638", size = 57631, upload-time = "2025-08-20T11:56:07.374Z" }, + { url = "https://files.pythonhosted.org/packages/ed/ca/19b3a632933a09d696f10dc1b0dfa1d692e65ad507d12340116ce4f67967/ujson-5.11.0-cp314-cp314-manylinux_2_24_i686.manylinux_2_28_i686.whl", hash = "sha256:e750c436fb90edf85585f5c62a35b35082502383840962c6983403d1bd96a02c", size = 59877, upload-time = "2025-08-20T11:56:08.534Z" }, + { url = "https://files.pythonhosted.org/packages/55/7a/4572af5324ad4b2bfdd2321e898a527050290147b4ea337a79a0e4e87ec7/ujson-5.11.0-cp314-cp314-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:f278b31a7c52eb0947b2db55a5133fbc46b6f0ef49972cd1a80843b72e135aba", size = 57363, upload-time = "2025-08-20T11:56:09.758Z" }, + { url = "https://files.pythonhosted.org/packages/7b/71/a2b8c19cf4e1efe53cf439cdf7198ac60ae15471d2f1040b490c1f0f831f/ujson-5.11.0-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:ab2cb8351d976e788669c8281465d44d4e94413718af497b4e7342d7b2f78018", size = 1036394, upload-time = "2025-08-20T11:56:11.168Z" }, + { url = "https://files.pythonhosted.org/packages/7a/3e/7b98668cba3bb3735929c31b999b374ebc02c19dfa98dfebaeeb5c8597ca/ujson-5.11.0-cp314-cp314-musllinux_1_2_i686.whl", hash = "sha256:090b4d11b380ae25453100b722d0609d5051ffe98f80ec52853ccf8249dfd840", size = 1195837, upload-time = "2025-08-20T11:56:12.6Z" }, + { url = "https://files.pythonhosted.org/packages/a1/ea/8870f208c20b43571a5c409ebb2fe9b9dba5f494e9e60f9314ac01ea8f78/ujson-5.11.0-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:80017e870d882d5517d28995b62e4e518a894f932f1e242cbc802a2fd64d365c", size = 1088837, upload-time = "2025-08-20T11:56:14.15Z" }, + { url = "https://files.pythonhosted.org/packages/63/b6/c0e6607e37fa47929920a685a968c6b990a802dec65e9c5181e97845985d/ujson-5.11.0-cp314-cp314-win32.whl", hash = "sha256:1d663b96eb34c93392e9caae19c099ec4133ba21654b081956613327f0e973ac", size = 41022, upload-time = "2025-08-20T11:56:15.509Z" }, + { url = "https://files.pythonhosted.org/packages/4e/56/f4fe86b4c9000affd63e9219e59b222dc48b01c534533093e798bf617a7e/ujson-5.11.0-cp314-cp314-win_amd64.whl", hash = "sha256:849e65b696f0d242833f1df4182096cedc50d414215d1371fca85c541fbff629", size = 45111, upload-time = "2025-08-20T11:56:16.597Z" }, + { url = "https://files.pythonhosted.org/packages/0a/f3/669437f0280308db4783b12a6d88c00730b394327d8334cc7a32ef218e64/ujson-5.11.0-cp314-cp314-win_arm64.whl", hash = "sha256:e73df8648c9470af2b6a6bf5250d4744ad2cf3d774dcf8c6e31f018bdd04d764", size = 39682, upload-time = "2025-08-20T11:56:17.763Z" }, + { url = "https://files.pythonhosted.org/packages/6e/cd/e9809b064a89fe5c4184649adeb13c1b98652db3f8518980b04227358574/ujson-5.11.0-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:de6e88f62796372fba1de973c11138f197d3e0e1d80bcb2b8aae1e826096d433", size = 55759, upload-time = "2025-08-20T11:56:18.882Z" }, + { url = "https://files.pythonhosted.org/packages/1b/be/ae26a6321179ebbb3a2e2685b9007c71bcda41ad7a77bbbe164005e956fc/ujson-5.11.0-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:49e56ef8066f11b80d620985ae36869a3ff7e4b74c3b6129182ec5d1df0255f3", size = 53634, upload-time = "2025-08-20T11:56:20.012Z" }, + { url = "https://files.pythonhosted.org/packages/ae/e9/fb4a220ee6939db099f4cfeeae796ecb91e7584ad4d445d4ca7f994a9135/ujson-5.11.0-cp314-cp314t-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:1a325fd2c3a056cf6c8e023f74a0c478dd282a93141356ae7f16d5309f5ff823", size = 58547, upload-time = "2025-08-20T11:56:21.175Z" }, + { url = "https://files.pythonhosted.org/packages/bd/f8/fc4b952b8f5fea09ea3397a0bd0ad019e474b204cabcb947cead5d4d1ffc/ujson-5.11.0-cp314-cp314t-manylinux_2_24_i686.manylinux_2_28_i686.whl", hash = "sha256:a0af6574fc1d9d53f4ff371f58c96673e6d988ed2b5bf666a6143c782fa007e9", size = 60489, upload-time = "2025-08-20T11:56:22.342Z" }, + { url = "https://files.pythonhosted.org/packages/2e/e5/af5491dfda4f8b77e24cf3da68ee0d1552f99a13e5c622f4cef1380925c3/ujson-5.11.0-cp314-cp314t-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:10f29e71ecf4ecd93a6610bd8efa8e7b6467454a363c3d6416db65de883eb076", size = 58035, upload-time = "2025-08-20T11:56:23.92Z" }, + { url = "https://files.pythonhosted.org/packages/c4/09/0945349dd41f25cc8c38d78ace49f14c5052c5bbb7257d2f466fa7bdb533/ujson-5.11.0-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:1a0a9b76a89827a592656fe12e000cf4f12da9692f51a841a4a07aa4c7ecc41c", size = 1037212, upload-time = "2025-08-20T11:56:25.274Z" }, + { url = "https://files.pythonhosted.org/packages/49/44/8e04496acb3d5a1cbee3a54828d9652f67a37523efa3d3b18a347339680a/ujson-5.11.0-cp314-cp314t-musllinux_1_2_i686.whl", hash = "sha256:b16930f6a0753cdc7d637b33b4e8f10d5e351e1fb83872ba6375f1e87be39746", size = 1196500, upload-time = "2025-08-20T11:56:27.517Z" }, + { url = "https://files.pythonhosted.org/packages/64/ae/4bc825860d679a0f208a19af2f39206dfd804ace2403330fdc3170334a2f/ujson-5.11.0-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:04c41afc195fd477a59db3a84d5b83a871bd648ef371cf8c6f43072d89144eef", size = 1089487, upload-time = "2025-08-20T11:56:29.07Z" }, + { url = "https://files.pythonhosted.org/packages/30/ed/5a057199fb0a5deabe0957073a1c1c1c02a3e99476cd03daee98ea21fa57/ujson-5.11.0-cp314-cp314t-win32.whl", hash = "sha256:aa6d7a5e09217ff93234e050e3e380da62b084e26b9f2e277d2606406a2fc2e5", size = 41859, upload-time = "2025-08-20T11:56:30.495Z" }, + { url = "https://files.pythonhosted.org/packages/aa/03/b19c6176bdf1dc13ed84b886e99677a52764861b6cc023d5e7b6ebda249d/ujson-5.11.0-cp314-cp314t-win_amd64.whl", hash = "sha256:48055e1061c1bb1f79e75b4ac39e821f3f35a9b82de17fce92c3140149009bec", size = 46183, upload-time = "2025-08-20T11:56:31.574Z" }, + { url = "https://files.pythonhosted.org/packages/5d/ca/a0413a3874b2dc1708b8796ca895bf363292f9c70b2e8ca482b7dbc0259d/ujson-5.11.0-cp314-cp314t-win_arm64.whl", hash = "sha256:1194b943e951092db611011cb8dbdb6cf94a3b816ed07906e14d3bc6ce0e90ab", size = 40264, upload-time = "2025-08-20T11:56:32.773Z" }, +] + +[[package]] +name = "urllib3" +version = "2.6.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/1e/24/a2a2ed9addd907787d7aa0355ba36a6cadf1768b934c652ea78acbd59dcd/urllib3-2.6.2.tar.gz", hash = "sha256:016f9c98bb7e98085cb2b4b17b87d2c702975664e4f060c6532e64d1c1a5e797", size = 432930, upload-time = "2025-12-11T15:56:40.252Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/6d/b9/4095b668ea3678bf6a0af005527f39de12fb026516fb3df17495a733b7f8/urllib3-2.6.2-py3-none-any.whl", hash = "sha256:ec21cddfe7724fc7cb4ba4bea7aa8e2ef36f607a4bab81aa6ce42a13dc3f03dd", size = 131182, upload-time = "2025-12-11T15:56:38.584Z" }, +] + +[[package]] +name = "virtualenv" +version = "20.35.4" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "distlib" }, + { name = "filelock" }, + { name = "platformdirs" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/20/28/e6f1a6f655d620846bd9df527390ecc26b3805a0c5989048c210e22c5ca9/virtualenv-20.35.4.tar.gz", hash = "sha256:643d3914d73d3eeb0c552cbb12d7e82adf0e504dbf86a3182f8771a153a1971c", size = 6028799, upload-time = "2025-10-29T06:57:40.511Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/79/0c/c05523fa3181fdf0c9c52a6ba91a23fbf3246cc095f26f6516f9c60e6771/virtualenv-20.35.4-py3-none-any.whl", hash = "sha256:c21c9cede36c9753eeade68ba7d523529f228a403463376cf821eaae2b650f1b", size = 6005095, upload-time = "2025-10-29T06:57:37.598Z" }, +] + +[[package]] +name = "watchdog" +version = "6.0.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/db/7d/7f3d619e951c88ed75c6037b246ddcf2d322812ee8ea189be89511721d54/watchdog-6.0.0.tar.gz", hash = "sha256:9ddf7c82fda3ae8e24decda1338ede66e1c99883db93711d8fb941eaa2d8c282", size = 131220, upload-time = "2024-11-01T14:07:13.037Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/39/ea/3930d07dafc9e286ed356a679aa02d777c06e9bfd1164fa7c19c288a5483/watchdog-6.0.0-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:bdd4e6f14b8b18c334febb9c4425a878a2ac20efd1e0b231978e7b150f92a948", size = 96471, upload-time = "2024-11-01T14:06:37.745Z" }, + { url = "https://files.pythonhosted.org/packages/12/87/48361531f70b1f87928b045df868a9fd4e253d9ae087fa4cf3f7113be363/watchdog-6.0.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:c7c15dda13c4eb00d6fb6fc508b3c0ed88b9d5d374056b239c4ad1611125c860", size = 88449, upload-time = "2024-11-01T14:06:39.748Z" }, + { url = "https://files.pythonhosted.org/packages/5b/7e/8f322f5e600812e6f9a31b75d242631068ca8f4ef0582dd3ae6e72daecc8/watchdog-6.0.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:6f10cb2d5902447c7d0da897e2c6768bca89174d0c6e1e30abec5421af97a5b0", size = 89054, upload-time = "2024-11-01T14:06:41.009Z" }, + { url = "https://files.pythonhosted.org/packages/68/98/b0345cabdce2041a01293ba483333582891a3bd5769b08eceb0d406056ef/watchdog-6.0.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:490ab2ef84f11129844c23fb14ecf30ef3d8a6abafd3754a6f75ca1e6654136c", size = 96480, upload-time = "2024-11-01T14:06:42.952Z" }, + { url = "https://files.pythonhosted.org/packages/85/83/cdf13902c626b28eedef7ec4f10745c52aad8a8fe7eb04ed7b1f111ca20e/watchdog-6.0.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:76aae96b00ae814b181bb25b1b98076d5fc84e8a53cd8885a318b42b6d3a5134", size = 88451, upload-time = "2024-11-01T14:06:45.084Z" }, + { url = "https://files.pythonhosted.org/packages/fe/c4/225c87bae08c8b9ec99030cd48ae9c4eca050a59bf5c2255853e18c87b50/watchdog-6.0.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:a175f755fc2279e0b7312c0035d52e27211a5bc39719dd529625b1930917345b", size = 89057, upload-time = "2024-11-01T14:06:47.324Z" }, + { url = "https://files.pythonhosted.org/packages/a9/c7/ca4bf3e518cb57a686b2feb4f55a1892fd9a3dd13f470fca14e00f80ea36/watchdog-6.0.0-py3-none-manylinux2014_aarch64.whl", hash = "sha256:7607498efa04a3542ae3e05e64da8202e58159aa1fa4acddf7678d34a35d4f13", size = 79079, upload-time = "2024-11-01T14:06:59.472Z" }, + { url = "https://files.pythonhosted.org/packages/5c/51/d46dc9332f9a647593c947b4b88e2381c8dfc0942d15b8edc0310fa4abb1/watchdog-6.0.0-py3-none-manylinux2014_armv7l.whl", hash = "sha256:9041567ee8953024c83343288ccc458fd0a2d811d6a0fd68c4c22609e3490379", size = 79078, upload-time = "2024-11-01T14:07:01.431Z" }, + { url = "https://files.pythonhosted.org/packages/d4/57/04edbf5e169cd318d5f07b4766fee38e825d64b6913ca157ca32d1a42267/watchdog-6.0.0-py3-none-manylinux2014_i686.whl", hash = "sha256:82dc3e3143c7e38ec49d61af98d6558288c415eac98486a5c581726e0737c00e", size = 79076, upload-time = "2024-11-01T14:07:02.568Z" }, + { url = "https://files.pythonhosted.org/packages/ab/cc/da8422b300e13cb187d2203f20b9253e91058aaf7db65b74142013478e66/watchdog-6.0.0-py3-none-manylinux2014_ppc64.whl", hash = "sha256:212ac9b8bf1161dc91bd09c048048a95ca3a4c4f5e5d4a7d1b1a7d5752a7f96f", size = 79077, upload-time = "2024-11-01T14:07:03.893Z" }, + { url = "https://files.pythonhosted.org/packages/2c/3b/b8964e04ae1a025c44ba8e4291f86e97fac443bca31de8bd98d3263d2fcf/watchdog-6.0.0-py3-none-manylinux2014_ppc64le.whl", hash = "sha256:e3df4cbb9a450c6d49318f6d14f4bbc80d763fa587ba46ec86f99f9e6876bb26", size = 79078, upload-time = "2024-11-01T14:07:05.189Z" }, + { url = "https://files.pythonhosted.org/packages/62/ae/a696eb424bedff7407801c257d4b1afda455fe40821a2be430e173660e81/watchdog-6.0.0-py3-none-manylinux2014_s390x.whl", hash = "sha256:2cce7cfc2008eb51feb6aab51251fd79b85d9894e98ba847408f662b3395ca3c", size = 79077, upload-time = "2024-11-01T14:07:06.376Z" }, + { url = "https://files.pythonhosted.org/packages/b5/e8/dbf020b4d98251a9860752a094d09a65e1b436ad181faf929983f697048f/watchdog-6.0.0-py3-none-manylinux2014_x86_64.whl", hash = "sha256:20ffe5b202af80ab4266dcd3e91aae72bf2da48c0d33bdb15c66658e685e94e2", size = 79078, upload-time = "2024-11-01T14:07:07.547Z" }, + { url = "https://files.pythonhosted.org/packages/07/f6/d0e5b343768e8bcb4cda79f0f2f55051bf26177ecd5651f84c07567461cf/watchdog-6.0.0-py3-none-win32.whl", hash = "sha256:07df1fdd701c5d4c8e55ef6cf55b8f0120fe1aef7ef39a1c6fc6bc2e606d517a", size = 79065, upload-time = "2024-11-01T14:07:09.525Z" }, + { url = "https://files.pythonhosted.org/packages/db/d9/c495884c6e548fce18a8f40568ff120bc3a4b7b99813081c8ac0c936fa64/watchdog-6.0.0-py3-none-win_amd64.whl", hash = "sha256:cbafb470cf848d93b5d013e2ecb245d4aa1c8fd0504e863ccefa32445359d680", size = 79070, upload-time = "2024-11-01T14:07:10.686Z" }, + { url = "https://files.pythonhosted.org/packages/33/e8/e40370e6d74ddba47f002a32919d91310d6074130fe4e17dabcafc15cbf1/watchdog-6.0.0-py3-none-win_ia64.whl", hash = "sha256:a1914259fa9e1454315171103c6a30961236f508b9b623eae470268bbcc6a22f", size = 79067, upload-time = "2024-11-01T14:07:11.845Z" }, +] + +[[package]] +name = "wcmatch" +version = "10.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "bracex" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/79/3e/c0bdc27cf06f4e47680bd5803a07cb3dfd17de84cde92dd217dcb9e05253/wcmatch-10.1.tar.gz", hash = "sha256:f11f94208c8c8484a16f4f48638a85d771d9513f4ab3f37595978801cb9465af", size = 117421, upload-time = "2025-06-22T19:14:02.49Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/eb/d8/0d1d2e9d3fabcf5d6840362adcf05f8cf3cd06a73358140c3a97189238ae/wcmatch-10.1-py3-none-any.whl", hash = "sha256:5848ace7dbb0476e5e55ab63c6bbd529745089343427caa5537f230cc01beb8a", size = 39854, upload-time = "2025-06-22T19:14:00.978Z" }, +] + +[[package]] +name = "wcwidth" +version = "0.2.14" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/24/30/6b0809f4510673dc723187aeaf24c7f5459922d01e2f794277a3dfb90345/wcwidth-0.2.14.tar.gz", hash = "sha256:4d478375d31bc5395a3c55c40ccdf3354688364cd61c4f6adacaa9215d0b3605", size = 102293, upload-time = "2025-09-22T16:29:53.023Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/af/b5/123f13c975e9f27ab9c0770f514345bd406d0e8d3b7a0723af9d43f710af/wcwidth-0.2.14-py2.py3-none-any.whl", hash = "sha256:a7bb560c8aee30f9957e5f9895805edd20602f2d7f720186dfd906e82b4982e1", size = 37286, upload-time = "2025-09-22T16:29:51.641Z" }, +] + +[[package]] +name = "win32-setctime" +version = "1.2.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/b3/8f/705086c9d734d3b663af0e9bb3d4de6578d08f46b1b101c2442fd9aecaa2/win32_setctime-1.2.0.tar.gz", hash = "sha256:ae1fdf948f5640aae05c511ade119313fb6a30d7eabe25fef9764dca5873c4c0", size = 4867, upload-time = "2024-12-07T15:28:28.314Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/e1/07/c6fe3ad3e685340704d314d765b7912993bcb8dc198f0e7a89382d37974b/win32_setctime-1.2.0-py3-none-any.whl", hash = "sha256:95d644c4e708aba81dc3704a116d8cbc974d70b3bdb8be1d150e36be6e9d1390", size = 4083, upload-time = "2024-12-07T15:28:26.465Z" }, +] + +[[package]] +name = "wrapt" +version = "1.17.3" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/95/8f/aeb76c5b46e273670962298c23e7ddde79916cb74db802131d49a85e4b7d/wrapt-1.17.3.tar.gz", hash = "sha256:f66eb08feaa410fe4eebd17f2a2c8e2e46d3476e9f8c783daa8e09e0faa666d0", size = 55547, upload-time = "2025-08-12T05:53:21.714Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/9f/41/cad1aba93e752f1f9268c77270da3c469883d56e2798e7df6240dcb2287b/wrapt-1.17.3-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:ab232e7fdb44cdfbf55fc3afa31bcdb0d8980b9b95c38b6405df2acb672af0e0", size = 53998, upload-time = "2025-08-12T05:51:47.138Z" }, + { url = "https://files.pythonhosted.org/packages/60/f8/096a7cc13097a1869fe44efe68dace40d2a16ecb853141394047f0780b96/wrapt-1.17.3-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:9baa544e6acc91130e926e8c802a17f3b16fbea0fd441b5a60f5cf2cc5c3deba", size = 39020, upload-time = "2025-08-12T05:51:35.906Z" }, + { url = "https://files.pythonhosted.org/packages/33/df/bdf864b8997aab4febb96a9ae5c124f700a5abd9b5e13d2a3214ec4be705/wrapt-1.17.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:6b538e31eca1a7ea4605e44f81a48aa24c4632a277431a6ed3f328835901f4fd", size = 39098, upload-time = "2025-08-12T05:51:57.474Z" }, + { url = "https://files.pythonhosted.org/packages/9f/81/5d931d78d0eb732b95dc3ddaeeb71c8bb572fb01356e9133916cd729ecdd/wrapt-1.17.3-cp312-cp312-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:042ec3bb8f319c147b1301f2393bc19dba6e176b7da446853406d041c36c7828", size = 88036, upload-time = "2025-08-12T05:52:34.784Z" }, + { url = "https://files.pythonhosted.org/packages/ca/38/2e1785df03b3d72d34fc6252d91d9d12dc27a5c89caef3335a1bbb8908ca/wrapt-1.17.3-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:3af60380ba0b7b5aeb329bc4e402acd25bd877e98b3727b0135cb5c2efdaefe9", size = 88156, upload-time = "2025-08-12T05:52:13.599Z" }, + { url = "https://files.pythonhosted.org/packages/b3/8b/48cdb60fe0603e34e05cffda0b2a4adab81fd43718e11111a4b0100fd7c1/wrapt-1.17.3-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:0b02e424deef65c9f7326d8c19220a2c9040c51dc165cddb732f16198c168396", size = 87102, upload-time = "2025-08-12T05:52:14.56Z" }, + { url = "https://files.pythonhosted.org/packages/3c/51/d81abca783b58f40a154f1b2c56db1d2d9e0d04fa2d4224e357529f57a57/wrapt-1.17.3-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:74afa28374a3c3a11b3b5e5fca0ae03bef8450d6aa3ab3a1e2c30e3a75d023dc", size = 87732, upload-time = "2025-08-12T05:52:36.165Z" }, + { url = "https://files.pythonhosted.org/packages/9e/b1/43b286ca1392a006d5336412d41663eeef1ad57485f3e52c767376ba7e5a/wrapt-1.17.3-cp312-cp312-win32.whl", hash = "sha256:4da9f45279fff3543c371d5ababc57a0384f70be244de7759c85a7f989cb4ebe", size = 36705, upload-time = "2025-08-12T05:53:07.123Z" }, + { url = "https://files.pythonhosted.org/packages/28/de/49493f962bd3c586ab4b88066e967aa2e0703d6ef2c43aa28cb83bf7b507/wrapt-1.17.3-cp312-cp312-win_amd64.whl", hash = "sha256:e71d5c6ebac14875668a1e90baf2ea0ef5b7ac7918355850c0908ae82bcb297c", size = 38877, upload-time = "2025-08-12T05:53:05.436Z" }, + { url = "https://files.pythonhosted.org/packages/f1/48/0f7102fe9cb1e8a5a77f80d4f0956d62d97034bbe88d33e94699f99d181d/wrapt-1.17.3-cp312-cp312-win_arm64.whl", hash = "sha256:604d076c55e2fdd4c1c03d06dc1a31b95130010517b5019db15365ec4a405fc6", size = 36885, upload-time = "2025-08-12T05:52:54.367Z" }, + { url = "https://files.pythonhosted.org/packages/fc/f6/759ece88472157acb55fc195e5b116e06730f1b651b5b314c66291729193/wrapt-1.17.3-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:a47681378a0439215912ef542c45a783484d4dd82bac412b71e59cf9c0e1cea0", size = 54003, upload-time = "2025-08-12T05:51:48.627Z" }, + { url = "https://files.pythonhosted.org/packages/4f/a9/49940b9dc6d47027dc850c116d79b4155f15c08547d04db0f07121499347/wrapt-1.17.3-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:54a30837587c6ee3cd1a4d1c2ec5d24e77984d44e2f34547e2323ddb4e22eb77", size = 39025, upload-time = "2025-08-12T05:51:37.156Z" }, + { url = "https://files.pythonhosted.org/packages/45/35/6a08de0f2c96dcdd7fe464d7420ddb9a7655a6561150e5fc4da9356aeaab/wrapt-1.17.3-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:16ecf15d6af39246fe33e507105d67e4b81d8f8d2c6598ff7e3ca1b8a37213f7", size = 39108, upload-time = "2025-08-12T05:51:58.425Z" }, + { url = "https://files.pythonhosted.org/packages/0c/37/6faf15cfa41bf1f3dba80cd3f5ccc6622dfccb660ab26ed79f0178c7497f/wrapt-1.17.3-cp313-cp313-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:6fd1ad24dc235e4ab88cda009e19bf347aabb975e44fd5c2fb22a3f6e4141277", size = 88072, upload-time = "2025-08-12T05:52:37.53Z" }, + { url = "https://files.pythonhosted.org/packages/78/f2/efe19ada4a38e4e15b6dff39c3e3f3f73f5decf901f66e6f72fe79623a06/wrapt-1.17.3-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:0ed61b7c2d49cee3c027372df5809a59d60cf1b6c2f81ee980a091f3afed6a2d", size = 88214, upload-time = "2025-08-12T05:52:15.886Z" }, + { url = "https://files.pythonhosted.org/packages/40/90/ca86701e9de1622b16e09689fc24b76f69b06bb0150990f6f4e8b0eeb576/wrapt-1.17.3-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:423ed5420ad5f5529db9ce89eac09c8a2f97da18eb1c870237e84c5a5c2d60aa", size = 87105, upload-time = "2025-08-12T05:52:17.914Z" }, + { url = "https://files.pythonhosted.org/packages/fd/e0/d10bd257c9a3e15cbf5523025252cc14d77468e8ed644aafb2d6f54cb95d/wrapt-1.17.3-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:e01375f275f010fcbf7f643b4279896d04e571889b8a5b3f848423d91bf07050", size = 87766, upload-time = "2025-08-12T05:52:39.243Z" }, + { url = "https://files.pythonhosted.org/packages/e8/cf/7d848740203c7b4b27eb55dbfede11aca974a51c3d894f6cc4b865f42f58/wrapt-1.17.3-cp313-cp313-win32.whl", hash = "sha256:53e5e39ff71b3fc484df8a522c933ea2b7cdd0d5d15ae82e5b23fde87d44cbd8", size = 36711, upload-time = "2025-08-12T05:53:10.074Z" }, + { url = "https://files.pythonhosted.org/packages/57/54/35a84d0a4d23ea675994104e667ceff49227ce473ba6a59ba2c84f250b74/wrapt-1.17.3-cp313-cp313-win_amd64.whl", hash = "sha256:1f0b2f40cf341ee8cc1a97d51ff50dddb9fcc73241b9143ec74b30fc4f44f6cb", size = 38885, upload-time = "2025-08-12T05:53:08.695Z" }, + { url = "https://files.pythonhosted.org/packages/01/77/66e54407c59d7b02a3c4e0af3783168fff8e5d61def52cda8728439d86bc/wrapt-1.17.3-cp313-cp313-win_arm64.whl", hash = "sha256:7425ac3c54430f5fc5e7b6f41d41e704db073309acfc09305816bc6a0b26bb16", size = 36896, upload-time = "2025-08-12T05:52:55.34Z" }, + { url = "https://files.pythonhosted.org/packages/02/a2/cd864b2a14f20d14f4c496fab97802001560f9f41554eef6df201cd7f76c/wrapt-1.17.3-cp314-cp314-macosx_10_13_universal2.whl", hash = "sha256:cf30f6e3c077c8e6a9a7809c94551203c8843e74ba0c960f4a98cd80d4665d39", size = 54132, upload-time = "2025-08-12T05:51:49.864Z" }, + { url = "https://files.pythonhosted.org/packages/d5/46/d011725b0c89e853dc44cceb738a307cde5d240d023d6d40a82d1b4e1182/wrapt-1.17.3-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:e228514a06843cae89621384cfe3a80418f3c04aadf8a3b14e46a7be704e4235", size = 39091, upload-time = "2025-08-12T05:51:38.935Z" }, + { url = "https://files.pythonhosted.org/packages/2e/9e/3ad852d77c35aae7ddebdbc3b6d35ec8013af7d7dddad0ad911f3d891dae/wrapt-1.17.3-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:5ea5eb3c0c071862997d6f3e02af1d055f381b1d25b286b9d6644b79db77657c", size = 39172, upload-time = "2025-08-12T05:51:59.365Z" }, + { url = "https://files.pythonhosted.org/packages/c3/f7/c983d2762bcce2326c317c26a6a1e7016f7eb039c27cdf5c4e30f4160f31/wrapt-1.17.3-cp314-cp314-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:281262213373b6d5e4bb4353bc36d1ba4084e6d6b5d242863721ef2bf2c2930b", size = 87163, upload-time = "2025-08-12T05:52:40.965Z" }, + { url = "https://files.pythonhosted.org/packages/e4/0f/f673f75d489c7f22d17fe0193e84b41540d962f75fce579cf6873167c29b/wrapt-1.17.3-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:dc4a8d2b25efb6681ecacad42fca8859f88092d8732b170de6a5dddd80a1c8fa", size = 87963, upload-time = "2025-08-12T05:52:20.326Z" }, + { url = "https://files.pythonhosted.org/packages/df/61/515ad6caca68995da2fac7a6af97faab8f78ebe3bf4f761e1b77efbc47b5/wrapt-1.17.3-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:373342dd05b1d07d752cecbec0c41817231f29f3a89aa8b8843f7b95992ed0c7", size = 86945, upload-time = "2025-08-12T05:52:21.581Z" }, + { url = "https://files.pythonhosted.org/packages/d3/bd/4e70162ce398462a467bc09e768bee112f1412e563620adc353de9055d33/wrapt-1.17.3-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:d40770d7c0fd5cbed9d84b2c3f2e156431a12c9a37dc6284060fb4bec0b7ffd4", size = 86857, upload-time = "2025-08-12T05:52:43.043Z" }, + { url = "https://files.pythonhosted.org/packages/2b/b8/da8560695e9284810b8d3df8a19396a6e40e7518059584a1a394a2b35e0a/wrapt-1.17.3-cp314-cp314-win32.whl", hash = "sha256:fbd3c8319de8e1dc79d346929cd71d523622da527cca14e0c1d257e31c2b8b10", size = 37178, upload-time = "2025-08-12T05:53:12.605Z" }, + { url = "https://files.pythonhosted.org/packages/db/c8/b71eeb192c440d67a5a0449aaee2310a1a1e8eca41676046f99ed2487e9f/wrapt-1.17.3-cp314-cp314-win_amd64.whl", hash = "sha256:e1a4120ae5705f673727d3253de3ed0e016f7cd78dc463db1b31e2463e1f3cf6", size = 39310, upload-time = "2025-08-12T05:53:11.106Z" }, + { url = "https://files.pythonhosted.org/packages/45/20/2cda20fd4865fa40f86f6c46ed37a2a8356a7a2fde0773269311f2af56c7/wrapt-1.17.3-cp314-cp314-win_arm64.whl", hash = "sha256:507553480670cab08a800b9463bdb881b2edeed77dc677b0a5915e6106e91a58", size = 37266, upload-time = "2025-08-12T05:52:56.531Z" }, + { url = "https://files.pythonhosted.org/packages/77/ed/dd5cf21aec36c80443c6f900449260b80e2a65cf963668eaef3b9accce36/wrapt-1.17.3-cp314-cp314t-macosx_10_13_universal2.whl", hash = "sha256:ed7c635ae45cfbc1a7371f708727bf74690daedc49b4dba310590ca0bd28aa8a", size = 56544, upload-time = "2025-08-12T05:51:51.109Z" }, + { url = "https://files.pythonhosted.org/packages/8d/96/450c651cc753877ad100c7949ab4d2e2ecc4d97157e00fa8f45df682456a/wrapt-1.17.3-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:249f88ed15503f6492a71f01442abddd73856a0032ae860de6d75ca62eed8067", size = 40283, upload-time = "2025-08-12T05:51:39.912Z" }, + { url = "https://files.pythonhosted.org/packages/d1/86/2fcad95994d9b572db57632acb6f900695a648c3e063f2cd344b3f5c5a37/wrapt-1.17.3-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:5a03a38adec8066d5a37bea22f2ba6bbf39fcdefbe2d91419ab864c3fb515454", size = 40366, upload-time = "2025-08-12T05:52:00.693Z" }, + { url = "https://files.pythonhosted.org/packages/64/0e/f4472f2fdde2d4617975144311f8800ef73677a159be7fe61fa50997d6c0/wrapt-1.17.3-cp314-cp314t-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:5d4478d72eb61c36e5b446e375bbc49ed002430d17cdec3cecb36993398e1a9e", size = 108571, upload-time = "2025-08-12T05:52:44.521Z" }, + { url = "https://files.pythonhosted.org/packages/cc/01/9b85a99996b0a97c8a17484684f206cbb6ba73c1ce6890ac668bcf3838fb/wrapt-1.17.3-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:223db574bb38637e8230eb14b185565023ab624474df94d2af18f1cdb625216f", size = 113094, upload-time = "2025-08-12T05:52:22.618Z" }, + { url = "https://files.pythonhosted.org/packages/25/02/78926c1efddcc7b3aa0bc3d6b33a822f7d898059f7cd9ace8c8318e559ef/wrapt-1.17.3-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:e405adefb53a435f01efa7ccdec012c016b5a1d3f35459990afc39b6be4d5056", size = 110659, upload-time = "2025-08-12T05:52:24.057Z" }, + { url = "https://files.pythonhosted.org/packages/dc/ee/c414501ad518ac3e6fe184753632fe5e5ecacdcf0effc23f31c1e4f7bfcf/wrapt-1.17.3-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:88547535b787a6c9ce4086917b6e1d291aa8ed914fdd3a838b3539dc95c12804", size = 106946, upload-time = "2025-08-12T05:52:45.976Z" }, + { url = "https://files.pythonhosted.org/packages/be/44/a1bd64b723d13bb151d6cc91b986146a1952385e0392a78567e12149c7b4/wrapt-1.17.3-cp314-cp314t-win32.whl", hash = "sha256:41b1d2bc74c2cac6f9074df52b2efbef2b30bdfe5f40cb78f8ca22963bc62977", size = 38717, upload-time = "2025-08-12T05:53:15.214Z" }, + { url = "https://files.pythonhosted.org/packages/79/d9/7cfd5a312760ac4dd8bf0184a6ee9e43c33e47f3dadc303032ce012b8fa3/wrapt-1.17.3-cp314-cp314t-win_amd64.whl", hash = "sha256:73d496de46cd2cdbdbcce4ae4bcdb4afb6a11234a1df9c085249d55166b95116", size = 41334, upload-time = "2025-08-12T05:53:14.178Z" }, + { url = "https://files.pythonhosted.org/packages/46/78/10ad9781128ed2f99dbc474f43283b13fea8ba58723e98844367531c18e9/wrapt-1.17.3-cp314-cp314t-win_arm64.whl", hash = "sha256:f38e60678850c42461d4202739f9bf1e3a737c7ad283638251e79cc49effb6b6", size = 38471, upload-time = "2025-08-12T05:52:57.784Z" }, + { url = "https://files.pythonhosted.org/packages/1f/f6/a933bd70f98e9cf3e08167fc5cd7aaaca49147e48411c0bd5ae701bb2194/wrapt-1.17.3-py3-none-any.whl", hash = "sha256:7171ae35d2c33d326ac19dd8facb1e82e5fd04ef8c6c0e394d7af55a55051c22", size = 23591, upload-time = "2025-08-12T05:53:20.674Z" }, +] + +[[package]] +name = "yarl" +version = "1.22.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "idna" }, + { name = "multidict" }, + { name = "propcache" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/57/63/0c6ebca57330cd313f6102b16dd57ffaf3ec4c83403dcb45dbd15c6f3ea1/yarl-1.22.0.tar.gz", hash = "sha256:bebf8557577d4401ba8bd9ff33906f1376c877aa78d1fe216ad01b4d6745af71", size = 187169, upload-time = "2025-10-06T14:12:55.963Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/75/ff/46736024fee3429b80a165a732e38e5d5a238721e634ab41b040d49f8738/yarl-1.22.0-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:e340382d1afa5d32b892b3ff062436d592ec3d692aeea3bef3a5cfe11bbf8c6f", size = 142000, upload-time = "2025-10-06T14:09:44.631Z" }, + { url = "https://files.pythonhosted.org/packages/5a/9a/b312ed670df903145598914770eb12de1bac44599549b3360acc96878df8/yarl-1.22.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:f1e09112a2c31ffe8d80be1b0988fa6a18c5d5cad92a9ffbb1c04c91bfe52ad2", size = 94338, upload-time = "2025-10-06T14:09:46.372Z" }, + { url = "https://files.pythonhosted.org/packages/ba/f5/0601483296f09c3c65e303d60c070a5c19fcdbc72daa061e96170785bc7d/yarl-1.22.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:939fe60db294c786f6b7c2d2e121576628468f65453d86b0fe36cb52f987bd74", size = 94909, upload-time = "2025-10-06T14:09:48.648Z" }, + { url = "https://files.pythonhosted.org/packages/60/41/9a1fe0b73dbcefce72e46cf149b0e0a67612d60bfc90fb59c2b2efdfbd86/yarl-1.22.0-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:e1651bf8e0398574646744c1885a41198eba53dc8a9312b954073f845c90a8df", size = 372940, upload-time = "2025-10-06T14:09:50.089Z" }, + { url = "https://files.pythonhosted.org/packages/17/7a/795cb6dfee561961c30b800f0ed616b923a2ec6258b5def2a00bf8231334/yarl-1.22.0-cp312-cp312-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:b8a0588521a26bf92a57a1705b77b8b59044cdceccac7151bd8d229e66b8dedb", size = 345825, upload-time = "2025-10-06T14:09:52.142Z" }, + { url = "https://files.pythonhosted.org/packages/d7/93/a58f4d596d2be2ae7bab1a5846c4d270b894958845753b2c606d666744d3/yarl-1.22.0-cp312-cp312-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:42188e6a615c1a75bcaa6e150c3fe8f3e8680471a6b10150c5f7e83f47cc34d2", size = 386705, upload-time = "2025-10-06T14:09:54.128Z" }, + { url = "https://files.pythonhosted.org/packages/61/92/682279d0e099d0e14d7fd2e176bd04f48de1484f56546a3e1313cd6c8e7c/yarl-1.22.0-cp312-cp312-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:f6d2cb59377d99718913ad9a151030d6f83ef420a2b8f521d94609ecc106ee82", size = 396518, upload-time = "2025-10-06T14:09:55.762Z" }, + { url = "https://files.pythonhosted.org/packages/db/0f/0d52c98b8a885aeda831224b78f3be7ec2e1aa4a62091f9f9188c3c65b56/yarl-1.22.0-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:50678a3b71c751d58d7908edc96d332af328839eea883bb554a43f539101277a", size = 377267, upload-time = "2025-10-06T14:09:57.958Z" }, + { url = "https://files.pythonhosted.org/packages/22/42/d2685e35908cbeaa6532c1fc73e89e7f2efb5d8a7df3959ea8e37177c5a3/yarl-1.22.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:1e8fbaa7cec507aa24ea27a01456e8dd4b6fab829059b69844bd348f2d467124", size = 365797, upload-time = "2025-10-06T14:09:59.527Z" }, + { url = "https://files.pythonhosted.org/packages/a2/83/cf8c7bcc6355631762f7d8bdab920ad09b82efa6b722999dfb05afa6cfac/yarl-1.22.0-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:433885ab5431bc3d3d4f2f9bd15bfa1614c522b0f1405d62c4f926ccd69d04fa", size = 365535, upload-time = "2025-10-06T14:10:01.139Z" }, + { url = "https://files.pythonhosted.org/packages/25/e1/5302ff9b28f0c59cac913b91fe3f16c59a033887e57ce9ca5d41a3a94737/yarl-1.22.0-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:b790b39c7e9a4192dc2e201a282109ed2985a1ddbd5ac08dc56d0e121400a8f7", size = 382324, upload-time = "2025-10-06T14:10:02.756Z" }, + { url = "https://files.pythonhosted.org/packages/bf/cd/4617eb60f032f19ae3a688dc990d8f0d89ee0ea378b61cac81ede3e52fae/yarl-1.22.0-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:31f0b53913220599446872d757257be5898019c85e7971599065bc55065dc99d", size = 383803, upload-time = "2025-10-06T14:10:04.552Z" }, + { url = "https://files.pythonhosted.org/packages/59/65/afc6e62bb506a319ea67b694551dab4a7e6fb7bf604e9bd9f3e11d575fec/yarl-1.22.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:a49370e8f711daec68d09b821a34e1167792ee2d24d405cbc2387be4f158b520", size = 374220, upload-time = "2025-10-06T14:10:06.489Z" }, + { url = "https://files.pythonhosted.org/packages/e7/3d/68bf18d50dc674b942daec86a9ba922d3113d8399b0e52b9897530442da2/yarl-1.22.0-cp312-cp312-win32.whl", hash = "sha256:70dfd4f241c04bd9239d53b17f11e6ab672b9f1420364af63e8531198e3f5fe8", size = 81589, upload-time = "2025-10-06T14:10:09.254Z" }, + { url = "https://files.pythonhosted.org/packages/c8/9a/6ad1a9b37c2f72874f93e691b2e7ecb6137fb2b899983125db4204e47575/yarl-1.22.0-cp312-cp312-win_amd64.whl", hash = "sha256:8884d8b332a5e9b88e23f60bb166890009429391864c685e17bd73a9eda9105c", size = 87213, upload-time = "2025-10-06T14:10:11.369Z" }, + { url = "https://files.pythonhosted.org/packages/44/c5/c21b562d1680a77634d748e30c653c3ca918beb35555cff24986fff54598/yarl-1.22.0-cp312-cp312-win_arm64.whl", hash = "sha256:ea70f61a47f3cc93bdf8b2f368ed359ef02a01ca6393916bc8ff877427181e74", size = 81330, upload-time = "2025-10-06T14:10:13.112Z" }, + { url = "https://files.pythonhosted.org/packages/ea/f3/d67de7260456ee105dc1d162d43a019ecad6b91e2f51809d6cddaa56690e/yarl-1.22.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:8dee9c25c74997f6a750cd317b8ca63545169c098faee42c84aa5e506c819b53", size = 139980, upload-time = "2025-10-06T14:10:14.601Z" }, + { url = "https://files.pythonhosted.org/packages/01/88/04d98af0b47e0ef42597b9b28863b9060bb515524da0a65d5f4db160b2d5/yarl-1.22.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:01e73b85a5434f89fc4fe27dcda2aff08ddf35e4d47bbbea3bdcd25321af538a", size = 93424, upload-time = "2025-10-06T14:10:16.115Z" }, + { url = "https://files.pythonhosted.org/packages/18/91/3274b215fd8442a03975ce6bee5fe6aa57a8326b29b9d3d56234a1dca244/yarl-1.22.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:22965c2af250d20c873cdbee8ff958fb809940aeb2e74ba5f20aaf6b7ac8c70c", size = 93821, upload-time = "2025-10-06T14:10:17.993Z" }, + { url = "https://files.pythonhosted.org/packages/61/3a/caf4e25036db0f2da4ca22a353dfeb3c9d3c95d2761ebe9b14df8fc16eb0/yarl-1.22.0-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:b4f15793aa49793ec8d1c708ab7f9eded1aa72edc5174cae703651555ed1b601", size = 373243, upload-time = "2025-10-06T14:10:19.44Z" }, + { url = "https://files.pythonhosted.org/packages/6e/9e/51a77ac7516e8e7803b06e01f74e78649c24ee1021eca3d6a739cb6ea49c/yarl-1.22.0-cp313-cp313-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:e5542339dcf2747135c5c85f68680353d5cb9ffd741c0f2e8d832d054d41f35a", size = 342361, upload-time = "2025-10-06T14:10:21.124Z" }, + { url = "https://files.pythonhosted.org/packages/d4/f8/33b92454789dde8407f156c00303e9a891f1f51a0330b0fad7c909f87692/yarl-1.22.0-cp313-cp313-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:5c401e05ad47a75869c3ab3e35137f8468b846770587e70d71e11de797d113df", size = 387036, upload-time = "2025-10-06T14:10:22.902Z" }, + { url = "https://files.pythonhosted.org/packages/d9/9a/c5db84ea024f76838220280f732970aa4ee154015d7f5c1bfb60a267af6f/yarl-1.22.0-cp313-cp313-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:243dda95d901c733f5b59214d28b0120893d91777cb8aa043e6ef059d3cddfe2", size = 397671, upload-time = "2025-10-06T14:10:24.523Z" }, + { url = "https://files.pythonhosted.org/packages/11/c9/cd8538dc2e7727095e0c1d867bad1e40c98f37763e6d995c1939f5fdc7b1/yarl-1.22.0-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:bec03d0d388060058f5d291a813f21c011041938a441c593374da6077fe21b1b", size = 377059, upload-time = "2025-10-06T14:10:26.406Z" }, + { url = "https://files.pythonhosted.org/packages/a1/b9/ab437b261702ced75122ed78a876a6dec0a1b0f5e17a4ac7a9a2482d8abe/yarl-1.22.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:b0748275abb8c1e1e09301ee3cf90c8a99678a4e92e4373705f2a2570d581273", size = 365356, upload-time = "2025-10-06T14:10:28.461Z" }, + { url = "https://files.pythonhosted.org/packages/b2/9d/8e1ae6d1d008a9567877b08f0ce4077a29974c04c062dabdb923ed98e6fe/yarl-1.22.0-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:47fdb18187e2a4e18fda2c25c05d8251a9e4a521edaed757fef033e7d8498d9a", size = 361331, upload-time = "2025-10-06T14:10:30.541Z" }, + { url = "https://files.pythonhosted.org/packages/ca/5a/09b7be3905962f145b73beb468cdd53db8aa171cf18c80400a54c5b82846/yarl-1.22.0-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:c7044802eec4524fde550afc28edda0dd5784c4c45f0be151a2d3ba017daca7d", size = 382590, upload-time = "2025-10-06T14:10:33.352Z" }, + { url = "https://files.pythonhosted.org/packages/aa/7f/59ec509abf90eda5048b0bc3e2d7b5099dffdb3e6b127019895ab9d5ef44/yarl-1.22.0-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:139718f35149ff544caba20fce6e8a2f71f1e39b92c700d8438a0b1d2a631a02", size = 385316, upload-time = "2025-10-06T14:10:35.034Z" }, + { url = "https://files.pythonhosted.org/packages/e5/84/891158426bc8036bfdfd862fabd0e0fa25df4176ec793e447f4b85cf1be4/yarl-1.22.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:e1b51bebd221006d3d2f95fbe124b22b247136647ae5dcc8c7acafba66e5ee67", size = 374431, upload-time = "2025-10-06T14:10:37.76Z" }, + { url = "https://files.pythonhosted.org/packages/bb/49/03da1580665baa8bef5e8ed34c6df2c2aca0a2f28bf397ed238cc1bbc6f2/yarl-1.22.0-cp313-cp313-win32.whl", hash = "sha256:d3e32536234a95f513bd374e93d717cf6b2231a791758de6c509e3653f234c95", size = 81555, upload-time = "2025-10-06T14:10:39.649Z" }, + { url = "https://files.pythonhosted.org/packages/9a/ee/450914ae11b419eadd067c6183ae08381cfdfcb9798b90b2b713bbebddda/yarl-1.22.0-cp313-cp313-win_amd64.whl", hash = "sha256:47743b82b76d89a1d20b83e60d5c20314cbd5ba2befc9cda8f28300c4a08ed4d", size = 86965, upload-time = "2025-10-06T14:10:41.313Z" }, + { url = "https://files.pythonhosted.org/packages/98/4d/264a01eae03b6cf629ad69bae94e3b0e5344741e929073678e84bf7a3e3b/yarl-1.22.0-cp313-cp313-win_arm64.whl", hash = "sha256:5d0fcda9608875f7d052eff120c7a5da474a6796fe4d83e152e0e4d42f6d1a9b", size = 81205, upload-time = "2025-10-06T14:10:43.167Z" }, + { url = "https://files.pythonhosted.org/packages/88/fc/6908f062a2f77b5f9f6d69cecb1747260831ff206adcbc5b510aff88df91/yarl-1.22.0-cp313-cp313t-macosx_10_13_universal2.whl", hash = "sha256:719ae08b6972befcba4310e49edb1161a88cdd331e3a694b84466bd938a6ab10", size = 146209, upload-time = "2025-10-06T14:10:44.643Z" }, + { url = "https://files.pythonhosted.org/packages/65/47/76594ae8eab26210b4867be6f49129861ad33da1f1ebdf7051e98492bf62/yarl-1.22.0-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:47d8a5c446df1c4db9d21b49619ffdba90e77c89ec6e283f453856c74b50b9e3", size = 95966, upload-time = "2025-10-06T14:10:46.554Z" }, + { url = "https://files.pythonhosted.org/packages/ab/ce/05e9828a49271ba6b5b038b15b3934e996980dd78abdfeb52a04cfb9467e/yarl-1.22.0-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:cfebc0ac8333520d2d0423cbbe43ae43c8838862ddb898f5ca68565e395516e9", size = 97312, upload-time = "2025-10-06T14:10:48.007Z" }, + { url = "https://files.pythonhosted.org/packages/d1/c5/7dffad5e4f2265b29c9d7ec869c369e4223166e4f9206fc2243ee9eea727/yarl-1.22.0-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:4398557cbf484207df000309235979c79c4356518fd5c99158c7d38203c4da4f", size = 361967, upload-time = "2025-10-06T14:10:49.997Z" }, + { url = "https://files.pythonhosted.org/packages/50/b2/375b933c93a54bff7fc041e1a6ad2c0f6f733ffb0c6e642ce56ee3b39970/yarl-1.22.0-cp313-cp313t-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:2ca6fd72a8cd803be290d42f2dec5cdcd5299eeb93c2d929bf060ad9efaf5de0", size = 323949, upload-time = "2025-10-06T14:10:52.004Z" }, + { url = "https://files.pythonhosted.org/packages/66/50/bfc2a29a1d78644c5a7220ce2f304f38248dc94124a326794e677634b6cf/yarl-1.22.0-cp313-cp313t-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:ca1f59c4e1ab6e72f0a23c13fca5430f889634166be85dbf1013683e49e3278e", size = 361818, upload-time = "2025-10-06T14:10:54.078Z" }, + { url = "https://files.pythonhosted.org/packages/46/96/f3941a46af7d5d0f0498f86d71275696800ddcdd20426298e572b19b91ff/yarl-1.22.0-cp313-cp313t-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:6c5010a52015e7c70f86eb967db0f37f3c8bd503a695a49f8d45700144667708", size = 372626, upload-time = "2025-10-06T14:10:55.767Z" }, + { url = "https://files.pythonhosted.org/packages/c1/42/8b27c83bb875cd89448e42cd627e0fb971fa1675c9ec546393d18826cb50/yarl-1.22.0-cp313-cp313t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:9d7672ecf7557476642c88497c2f8d8542f8e36596e928e9bcba0e42e1e7d71f", size = 341129, upload-time = "2025-10-06T14:10:57.985Z" }, + { url = "https://files.pythonhosted.org/packages/49/36/99ca3122201b382a3cf7cc937b95235b0ac944f7e9f2d5331d50821ed352/yarl-1.22.0-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:3b7c88eeef021579d600e50363e0b6ee4f7f6f728cd3486b9d0f3ee7b946398d", size = 346776, upload-time = "2025-10-06T14:10:59.633Z" }, + { url = "https://files.pythonhosted.org/packages/85/b4/47328bf996acd01a4c16ef9dcd2f59c969f495073616586f78cd5f2efb99/yarl-1.22.0-cp313-cp313t-musllinux_1_2_armv7l.whl", hash = "sha256:f4afb5c34f2c6fecdcc182dfcfc6af6cccf1aa923eed4d6a12e9d96904e1a0d8", size = 334879, upload-time = "2025-10-06T14:11:01.454Z" }, + { url = "https://files.pythonhosted.org/packages/c2/ad/b77d7b3f14a4283bffb8e92c6026496f6de49751c2f97d4352242bba3990/yarl-1.22.0-cp313-cp313t-musllinux_1_2_ppc64le.whl", hash = "sha256:59c189e3e99a59cf8d83cbb31d4db02d66cda5a1a4374e8a012b51255341abf5", size = 350996, upload-time = "2025-10-06T14:11:03.452Z" }, + { url = "https://files.pythonhosted.org/packages/81/c8/06e1d69295792ba54d556f06686cbd6a7ce39c22307100e3fb4a2c0b0a1d/yarl-1.22.0-cp313-cp313t-musllinux_1_2_s390x.whl", hash = "sha256:5a3bf7f62a289fa90f1990422dc8dff5a458469ea71d1624585ec3a4c8d6960f", size = 356047, upload-time = "2025-10-06T14:11:05.115Z" }, + { url = "https://files.pythonhosted.org/packages/4b/b8/4c0e9e9f597074b208d18cef227d83aac36184bfbc6eab204ea55783dbc5/yarl-1.22.0-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:de6b9a04c606978fdfe72666fa216ffcf2d1a9f6a381058d4378f8d7b1e5de62", size = 342947, upload-time = "2025-10-06T14:11:08.137Z" }, + { url = "https://files.pythonhosted.org/packages/e0/e5/11f140a58bf4c6ad7aca69a892bff0ee638c31bea4206748fc0df4ebcb3a/yarl-1.22.0-cp313-cp313t-win32.whl", hash = "sha256:1834bb90991cc2999f10f97f5f01317f99b143284766d197e43cd5b45eb18d03", size = 86943, upload-time = "2025-10-06T14:11:10.284Z" }, + { url = "https://files.pythonhosted.org/packages/31/74/8b74bae38ed7fe6793d0c15a0c8207bbb819cf287788459e5ed230996cdd/yarl-1.22.0-cp313-cp313t-win_amd64.whl", hash = "sha256:ff86011bd159a9d2dfc89c34cfd8aff12875980e3bd6a39ff097887520e60249", size = 93715, upload-time = "2025-10-06T14:11:11.739Z" }, + { url = "https://files.pythonhosted.org/packages/69/66/991858aa4b5892d57aef7ee1ba6b4d01ec3b7eb3060795d34090a3ca3278/yarl-1.22.0-cp313-cp313t-win_arm64.whl", hash = "sha256:7861058d0582b847bc4e3a4a4c46828a410bca738673f35a29ba3ca5db0b473b", size = 83857, upload-time = "2025-10-06T14:11:13.586Z" }, + { url = "https://files.pythonhosted.org/packages/46/b3/e20ef504049f1a1c54a814b4b9bed96d1ac0e0610c3b4da178f87209db05/yarl-1.22.0-cp314-cp314-macosx_10_13_universal2.whl", hash = "sha256:34b36c2c57124530884d89d50ed2c1478697ad7473efd59cfd479945c95650e4", size = 140520, upload-time = "2025-10-06T14:11:15.465Z" }, + { url = "https://files.pythonhosted.org/packages/e4/04/3532d990fdbab02e5ede063676b5c4260e7f3abea2151099c2aa745acc4c/yarl-1.22.0-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:0dd9a702591ca2e543631c2a017e4a547e38a5c0f29eece37d9097e04a7ac683", size = 93504, upload-time = "2025-10-06T14:11:17.106Z" }, + { url = "https://files.pythonhosted.org/packages/11/63/ff458113c5c2dac9a9719ac68ee7c947cb621432bcf28c9972b1c0e83938/yarl-1.22.0-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:594fcab1032e2d2cc3321bb2e51271e7cd2b516c7d9aee780ece81b07ff8244b", size = 94282, upload-time = "2025-10-06T14:11:19.064Z" }, + { url = "https://files.pythonhosted.org/packages/a7/bc/315a56aca762d44a6aaaf7ad253f04d996cb6b27bad34410f82d76ea8038/yarl-1.22.0-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:f3d7a87a78d46a2e3d5b72587ac14b4c16952dd0887dbb051451eceac774411e", size = 372080, upload-time = "2025-10-06T14:11:20.996Z" }, + { url = "https://files.pythonhosted.org/packages/3f/3f/08e9b826ec2e099ea6e7c69a61272f4f6da62cb5b1b63590bb80ca2e4a40/yarl-1.22.0-cp314-cp314-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:852863707010316c973162e703bddabec35e8757e67fcb8ad58829de1ebc8590", size = 338696, upload-time = "2025-10-06T14:11:22.847Z" }, + { url = "https://files.pythonhosted.org/packages/e3/9f/90360108e3b32bd76789088e99538febfea24a102380ae73827f62073543/yarl-1.22.0-cp314-cp314-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:131a085a53bfe839a477c0845acf21efc77457ba2bcf5899618136d64f3303a2", size = 387121, upload-time = "2025-10-06T14:11:24.889Z" }, + { url = "https://files.pythonhosted.org/packages/98/92/ab8d4657bd5b46a38094cfaea498f18bb70ce6b63508fd7e909bd1f93066/yarl-1.22.0-cp314-cp314-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:078a8aefd263f4d4f923a9677b942b445a2be970ca24548a8102689a3a8ab8da", size = 394080, upload-time = "2025-10-06T14:11:27.307Z" }, + { url = "https://files.pythonhosted.org/packages/f5/e7/d8c5a7752fef68205296201f8ec2bf718f5c805a7a7e9880576c67600658/yarl-1.22.0-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:bca03b91c323036913993ff5c738d0842fc9c60c4648e5c8d98331526df89784", size = 372661, upload-time = "2025-10-06T14:11:29.387Z" }, + { url = "https://files.pythonhosted.org/packages/b6/2e/f4d26183c8db0bb82d491b072f3127fb8c381a6206a3a56332714b79b751/yarl-1.22.0-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:68986a61557d37bb90d3051a45b91fa3d5c516d177dfc6dd6f2f436a07ff2b6b", size = 364645, upload-time = "2025-10-06T14:11:31.423Z" }, + { url = "https://files.pythonhosted.org/packages/80/7c/428e5812e6b87cd00ee8e898328a62c95825bf37c7fa87f0b6bb2ad31304/yarl-1.22.0-cp314-cp314-musllinux_1_2_armv7l.whl", hash = "sha256:4792b262d585ff0dff6bcb787f8492e40698443ec982a3568c2096433660c694", size = 355361, upload-time = "2025-10-06T14:11:33.055Z" }, + { url = "https://files.pythonhosted.org/packages/ec/2a/249405fd26776f8b13c067378ef4d7dd49c9098d1b6457cdd152a99e96a9/yarl-1.22.0-cp314-cp314-musllinux_1_2_ppc64le.whl", hash = "sha256:ebd4549b108d732dba1d4ace67614b9545b21ece30937a63a65dd34efa19732d", size = 381451, upload-time = "2025-10-06T14:11:35.136Z" }, + { url = "https://files.pythonhosted.org/packages/67/a8/fb6b1adbe98cf1e2dd9fad71003d3a63a1bc22459c6e15f5714eb9323b93/yarl-1.22.0-cp314-cp314-musllinux_1_2_s390x.whl", hash = "sha256:f87ac53513d22240c7d59203f25cc3beac1e574c6cd681bbfd321987b69f95fd", size = 383814, upload-time = "2025-10-06T14:11:37.094Z" }, + { url = "https://files.pythonhosted.org/packages/d9/f9/3aa2c0e480fb73e872ae2814c43bc1e734740bb0d54e8cb2a95925f98131/yarl-1.22.0-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:22b029f2881599e2f1b06f8f1db2ee63bd309e2293ba2d566e008ba12778b8da", size = 370799, upload-time = "2025-10-06T14:11:38.83Z" }, + { url = "https://files.pythonhosted.org/packages/50/3c/af9dba3b8b5eeb302f36f16f92791f3ea62e3f47763406abf6d5a4a3333b/yarl-1.22.0-cp314-cp314-win32.whl", hash = "sha256:6a635ea45ba4ea8238463b4f7d0e721bad669f80878b7bfd1f89266e2ae63da2", size = 82990, upload-time = "2025-10-06T14:11:40.624Z" }, + { url = "https://files.pythonhosted.org/packages/ac/30/ac3a0c5bdc1d6efd1b41fa24d4897a4329b3b1e98de9449679dd327af4f0/yarl-1.22.0-cp314-cp314-win_amd64.whl", hash = "sha256:0d6e6885777af0f110b0e5d7e5dda8b704efed3894da26220b7f3d887b839a79", size = 88292, upload-time = "2025-10-06T14:11:42.578Z" }, + { url = "https://files.pythonhosted.org/packages/df/0a/227ab4ff5b998a1b7410abc7b46c9b7a26b0ca9e86c34ba4b8d8bc7c63d5/yarl-1.22.0-cp314-cp314-win_arm64.whl", hash = "sha256:8218f4e98d3c10d683584cb40f0424f4b9fd6e95610232dd75e13743b070ee33", size = 82888, upload-time = "2025-10-06T14:11:44.863Z" }, + { url = "https://files.pythonhosted.org/packages/06/5e/a15eb13db90abd87dfbefb9760c0f3f257ac42a5cac7e75dbc23bed97a9f/yarl-1.22.0-cp314-cp314t-macosx_10_13_universal2.whl", hash = "sha256:45c2842ff0e0d1b35a6bf1cd6c690939dacb617a70827f715232b2e0494d55d1", size = 146223, upload-time = "2025-10-06T14:11:46.796Z" }, + { url = "https://files.pythonhosted.org/packages/18/82/9665c61910d4d84f41a5bf6837597c89e665fa88aa4941080704645932a9/yarl-1.22.0-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:d947071e6ebcf2e2bee8fce76e10faca8f7a14808ca36a910263acaacef08eca", size = 95981, upload-time = "2025-10-06T14:11:48.845Z" }, + { url = "https://files.pythonhosted.org/packages/5d/9a/2f65743589809af4d0a6d3aa749343c4b5f4c380cc24a8e94a3c6625a808/yarl-1.22.0-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:334b8721303e61b00019474cc103bdac3d7b1f65e91f0bfedeec2d56dfe74b53", size = 97303, upload-time = "2025-10-06T14:11:50.897Z" }, + { url = "https://files.pythonhosted.org/packages/b0/ab/5b13d3e157505c43c3b43b5a776cbf7b24a02bc4cccc40314771197e3508/yarl-1.22.0-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:1e7ce67c34138a058fd092f67d07a72b8e31ff0c9236e751957465a24b28910c", size = 361820, upload-time = "2025-10-06T14:11:52.549Z" }, + { url = "https://files.pythonhosted.org/packages/fb/76/242a5ef4677615cf95330cfc1b4610e78184400699bdda0acb897ef5e49a/yarl-1.22.0-cp314-cp314t-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:d77e1b2c6d04711478cb1c4ab90db07f1609ccf06a287d5607fcd90dc9863acf", size = 323203, upload-time = "2025-10-06T14:11:54.225Z" }, + { url = "https://files.pythonhosted.org/packages/8c/96/475509110d3f0153b43d06164cf4195c64d16999e0c7e2d8a099adcd6907/yarl-1.22.0-cp314-cp314t-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:c4647674b6150d2cae088fc07de2738a84b8bcedebef29802cf0b0a82ab6face", size = 363173, upload-time = "2025-10-06T14:11:56.069Z" }, + { url = "https://files.pythonhosted.org/packages/c9/66/59db471aecfbd559a1fd48aedd954435558cd98c7d0da8b03cc6c140a32c/yarl-1.22.0-cp314-cp314t-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:efb07073be061c8f79d03d04139a80ba33cbd390ca8f0297aae9cce6411e4c6b", size = 373562, upload-time = "2025-10-06T14:11:58.783Z" }, + { url = "https://files.pythonhosted.org/packages/03/1f/c5d94abc91557384719da10ff166b916107c1b45e4d0423a88457071dd88/yarl-1.22.0-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:e51ac5435758ba97ad69617e13233da53908beccc6cfcd6c34bbed8dcbede486", size = 339828, upload-time = "2025-10-06T14:12:00.686Z" }, + { url = "https://files.pythonhosted.org/packages/5f/97/aa6a143d3afba17b6465733681c70cf175af89f76ec8d9286e08437a7454/yarl-1.22.0-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:33e32a0dd0c8205efa8e83d04fc9f19313772b78522d1bdc7d9aed706bfd6138", size = 347551, upload-time = "2025-10-06T14:12:02.628Z" }, + { url = "https://files.pythonhosted.org/packages/43/3c/45a2b6d80195959239a7b2a8810506d4eea5487dce61c2a3393e7fc3c52e/yarl-1.22.0-cp314-cp314t-musllinux_1_2_armv7l.whl", hash = "sha256:bf4a21e58b9cde0e401e683ebd00f6ed30a06d14e93f7c8fd059f8b6e8f87b6a", size = 334512, upload-time = "2025-10-06T14:12:04.871Z" }, + { url = "https://files.pythonhosted.org/packages/86/a0/c2ab48d74599c7c84cb104ebd799c5813de252bea0f360ffc29d270c2caa/yarl-1.22.0-cp314-cp314t-musllinux_1_2_ppc64le.whl", hash = "sha256:e4b582bab49ac33c8deb97e058cd67c2c50dac0dd134874106d9c774fd272529", size = 352400, upload-time = "2025-10-06T14:12:06.624Z" }, + { url = "https://files.pythonhosted.org/packages/32/75/f8919b2eafc929567d3d8411f72bdb1a2109c01caaab4ebfa5f8ffadc15b/yarl-1.22.0-cp314-cp314t-musllinux_1_2_s390x.whl", hash = "sha256:0b5bcc1a9c4839e7e30b7b30dd47fe5e7e44fb7054ec29b5bb8d526aa1041093", size = 357140, upload-time = "2025-10-06T14:12:08.362Z" }, + { url = "https://files.pythonhosted.org/packages/cf/72/6a85bba382f22cf78add705d8c3731748397d986e197e53ecc7835e76de7/yarl-1.22.0-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:c0232bce2170103ec23c454e54a57008a9a72b5d1c3105dc2496750da8cfa47c", size = 341473, upload-time = "2025-10-06T14:12:10.994Z" }, + { url = "https://files.pythonhosted.org/packages/35/18/55e6011f7c044dc80b98893060773cefcfdbf60dfefb8cb2f58b9bacbd83/yarl-1.22.0-cp314-cp314t-win32.whl", hash = "sha256:8009b3173bcd637be650922ac455946197d858b3630b6d8787aa9e5c4564533e", size = 89056, upload-time = "2025-10-06T14:12:13.317Z" }, + { url = "https://files.pythonhosted.org/packages/f9/86/0f0dccb6e59a9e7f122c5afd43568b1d31b8ab7dda5f1b01fb5c7025c9a9/yarl-1.22.0-cp314-cp314t-win_amd64.whl", hash = "sha256:9fb17ea16e972c63d25d4a97f016d235c78dd2344820eb35bc034bc32012ee27", size = 96292, upload-time = "2025-10-06T14:12:15.398Z" }, + { url = "https://files.pythonhosted.org/packages/48/b7/503c98092fb3b344a179579f55814b613c1fbb1c23b3ec14a7b008a66a6e/yarl-1.22.0-cp314-cp314t-win_arm64.whl", hash = "sha256:9f6d73c1436b934e3f01df1e1b21ff765cd1d28c77dfb9ace207f746d4610ee1", size = 85171, upload-time = "2025-10-06T14:12:16.935Z" }, + { url = "https://files.pythonhosted.org/packages/73/ae/b48f95715333080afb75a4504487cbe142cae1268afc482d06692d605ae6/yarl-1.22.0-py3-none-any.whl", hash = "sha256:1380560bdba02b6b6c90de54133c81c9f2a453dee9912fe58c1dcced1edb7cff", size = 46814, upload-time = "2025-10-06T14:12:53.872Z" }, +] + +[[package]] +name = "zipp" +version = "3.23.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/e3/02/0f2892c661036d50ede074e376733dca2ae7c6eb617489437771209d4180/zipp-3.23.0.tar.gz", hash = "sha256:a07157588a12518c9d4034df3fbbee09c814741a33ff63c05fa29d26a2404166", size = 25547, upload-time = "2025-06-08T17:06:39.4Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/2e/54/647ade08bf0db230bfea292f893923872fd20be6ac6f53b2b936ba839d75/zipp-3.23.0-py3-none-any.whl", hash = "sha256:071652d6115ed432f5ce1d34c336c0adfd6a884660d1e9712a256d3d3bd4b14e", size = 10276, upload-time = "2025-06-08T17:06:38.034Z" }, +] + +[[package]] +name = "zope-event" +version = "6.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/46/33/d3eeac228fc14de76615612ee208be2d8a5b5b0fada36bf9b62d6b40600c/zope_event-6.1.tar.gz", hash = "sha256:6052a3e0cb8565d3d4ef1a3a7809336ac519bc4fe38398cb8d466db09adef4f0", size = 18739, upload-time = "2025-11-07T08:05:49.934Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/c2/b0/956902e5e1302f8c5d124e219c6bf214e2649f92ad5fce85b05c039a04c9/zope_event-6.1-py3-none-any.whl", hash = "sha256:0ca78b6391b694272b23ec1335c0294cc471065ed10f7f606858fc54566c25a0", size = 6414, upload-time = "2025-11-07T08:05:48.874Z" }, +] + +[[package]] +name = "zope-interface" +version = "8.1.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/71/c9/5ec8679a04d37c797d343f650c51ad67d178f0001c363e44b6ac5f97a9da/zope_interface-8.1.1.tar.gz", hash = "sha256:51b10e6e8e238d719636a401f44f1e366146912407b58453936b781a19be19ec", size = 254748, upload-time = "2025-11-15T08:32:52.404Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/08/3d/f5b8dd2512f33bfab4faba71f66f6873603d625212206dd36f12403ae4ca/zope_interface-8.1.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:a16715808408db7252b8c1597ed9008bdad7bf378ed48eb9b0595fad4170e49d", size = 208660, upload-time = "2025-11-15T08:36:53.579Z" }, + { url = "https://files.pythonhosted.org/packages/e5/41/c331adea9b11e05ff9ac4eb7d3032b24c36a3654ae9f2bf4ef2997048211/zope_interface-8.1.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:ce6b58752acc3352c4aa0b55bbeae2a941d61537e6afdad2467a624219025aae", size = 208851, upload-time = "2025-11-15T08:36:54.854Z" }, + { url = "https://files.pythonhosted.org/packages/25/00/7a8019c3bb8b119c5f50f0a4869183a4b699ca004a7f87ce98382e6b364c/zope_interface-8.1.1-cp312-cp312-manylinux1_i686.manylinux2014_i686.manylinux_2_17_i686.manylinux_2_5_i686.whl", hash = "sha256:807778883d07177713136479de7fd566f9056a13aef63b686f0ab4807c6be259", size = 259292, upload-time = "2025-11-15T08:36:56.409Z" }, + { url = "https://files.pythonhosted.org/packages/1a/fc/b70e963bf89345edffdd5d16b61e789fdc09365972b603e13785360fea6f/zope_interface-8.1.1-cp312-cp312-manylinux1_x86_64.manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:50e5eb3b504a7d63dc25211b9298071d5b10a3eb754d6bf2f8ef06cb49f807ab", size = 264741, upload-time = "2025-11-15T08:36:57.675Z" }, + { url = "https://files.pythonhosted.org/packages/96/fe/7d0b5c0692b283901b34847f2b2f50d805bfff4b31de4021ac9dfb516d2a/zope_interface-8.1.1-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:eee6f93b2512ec9466cf30c37548fd3ed7bc4436ab29cd5943d7a0b561f14f0f", size = 264281, upload-time = "2025-11-15T08:36:58.968Z" }, + { url = "https://files.pythonhosted.org/packages/2b/2c/a7cebede1cf2757be158bcb151fe533fa951038cfc5007c7597f9f86804b/zope_interface-8.1.1-cp312-cp312-win_amd64.whl", hash = "sha256:80edee6116d569883c58ff8efcecac3b737733d646802036dc337aa839a5f06b", size = 212327, upload-time = "2025-11-15T08:37:00.4Z" }, + { url = "https://files.pythonhosted.org/packages/85/81/3c3b5386ce4fba4612fd82ffb8a90d76bcfea33ca2b6399f21e94d38484f/zope_interface-8.1.1-cp313-cp313-macosx_10_9_x86_64.whl", hash = "sha256:84f9be6d959640de9da5d14ac1f6a89148b16da766e88db37ed17e936160b0b1", size = 209046, upload-time = "2025-11-15T08:37:01.473Z" }, + { url = "https://files.pythonhosted.org/packages/4a/e3/32b7cb950c4c4326b3760a8e28e5d6f70ad15f852bfd8f9364b58634f74b/zope_interface-8.1.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:531fba91dcb97538f70cf4642a19d6574269460274e3f6004bba6fe684449c51", size = 209104, upload-time = "2025-11-15T08:37:02.887Z" }, + { url = "https://files.pythonhosted.org/packages/a3/3d/c4c68e1752a5f5effa2c1f5eaa4fea4399433c9b058fb7000a34bfb1c447/zope_interface-8.1.1-cp313-cp313-manylinux1_i686.manylinux2014_i686.manylinux_2_17_i686.manylinux_2_5_i686.whl", hash = "sha256:fc65f5633d5a9583ee8d88d1f5de6b46cd42c62e47757cfe86be36fb7c8c4c9b", size = 259277, upload-time = "2025-11-15T08:37:04.389Z" }, + { url = "https://files.pythonhosted.org/packages/fd/5b/cf4437b174af7591ee29bbad728f620cab5f47bd6e9c02f87d59f31a0dda/zope_interface-8.1.1-cp313-cp313-manylinux1_x86_64.manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:efef80ddec4d7d99618ef71bc93b88859248075ca2e1ae1c78636654d3d55533", size = 264742, upload-time = "2025-11-15T08:37:05.613Z" }, + { url = "https://files.pythonhosted.org/packages/0b/0e/0cf77356862852d3d3e62db9aadae5419a1a7d89bf963b219745283ab5ca/zope_interface-8.1.1-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:49aad83525eca3b4747ef51117d302e891f0042b06f32aa1c7023c62642f962b", size = 264252, upload-time = "2025-11-15T08:37:07.035Z" }, + { url = "https://files.pythonhosted.org/packages/8a/10/2af54aa88b2fa172d12364116cc40d325fedbb1877c3bb031b0da6052855/zope_interface-8.1.1-cp313-cp313-win_amd64.whl", hash = "sha256:71cf329a21f98cb2bd9077340a589e316ac8a415cac900575a32544b3dffcb98", size = 212330, upload-time = "2025-11-15T08:37:08.14Z" }, + { url = "https://files.pythonhosted.org/packages/b9/f5/44efbd98ba06cb937fce7a69fcd7a78c4ac7aa4e1ad2125536801376d2d0/zope_interface-8.1.1-cp314-cp314-macosx_10_9_x86_64.whl", hash = "sha256:da311e9d253991ca327601f47c4644d72359bac6950fbb22f971b24cd7850f8c", size = 209099, upload-time = "2025-11-15T08:37:09.395Z" }, + { url = "https://files.pythonhosted.org/packages/fd/36/a19866c09c8485c36a4c6908e1dd3f8820b41c1ee333c291157cf4cf09e7/zope_interface-8.1.1-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:3fb25fca0442c7fb93c4ee40b42e3e033fef2f648730c4b7ae6d43222a3e8946", size = 209240, upload-time = "2025-11-15T08:37:10.687Z" }, + { url = "https://files.pythonhosted.org/packages/c1/28/0dbf40db772d779a4ac8d006a57ad60936d42ad4769a3d5410dcfb98f6f9/zope_interface-8.1.1-cp314-cp314-manylinux1_i686.manylinux2014_i686.manylinux_2_17_i686.manylinux_2_5_i686.whl", hash = "sha256:bac588d0742b4e35efb7c7df1dacc0397b51ed37a17d4169a38019a1cebacf0a", size = 260919, upload-time = "2025-11-15T08:37:11.838Z" }, + { url = "https://files.pythonhosted.org/packages/72/ae/650cd4c01dd1b32c26c800b2c4d852f044552c34a56fbb74d41f569cee31/zope_interface-8.1.1-cp314-cp314-manylinux1_x86_64.manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:3d1f053d2d5e2b393e619bce1e55954885c2e63969159aa521839e719442db49", size = 264102, upload-time = "2025-11-15T08:37:13.241Z" }, + { url = "https://files.pythonhosted.org/packages/46/f0/f534a2c34c006aa090c593cd70eaf94e259fd0786f934698d81f0534d907/zope_interface-8.1.1-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:64a1ad7f4cb17d948c6bdc525a1d60c0e567b2526feb4fa38b38f249961306b8", size = 264276, upload-time = "2025-11-15T08:37:14.369Z" }, + { url = "https://files.pythonhosted.org/packages/5b/a8/d7e9cf03067b767e23908dbab5f6be7735d70cb4818311a248a8c4bb23cc/zope_interface-8.1.1-cp314-cp314-win_amd64.whl", hash = "sha256:169214da1b82b7695d1a36f92d70b11166d66b6b09d03df35d150cc62ac52276", size = 212492, upload-time = "2025-11-15T08:37:15.538Z" }, +]