diff --git a/DO_OPENAPI_COMMIT_SHA.txt b/DO_OPENAPI_COMMIT_SHA.txt index 3e045b5..11ac4a0 100644 --- a/DO_OPENAPI_COMMIT_SHA.txt +++ b/DO_OPENAPI_COMMIT_SHA.txt @@ -1 +1 @@ -cf0a60a +ebfa95a diff --git a/src/pydo/_client.py b/src/pydo/_client.py index 0fe2140..0eee1c6 100644 --- a/src/pydo/_client.py +++ b/src/pydo/_client.py @@ -26,10 +26,12 @@ CdnOperations, CertificatesOperations, DatabasesOperations, + DedicatedInferencesOperations, DomainsOperations, DropletActionsOperations, DropletsOperations, FirewallsOperations, + FunctionsAccessKeyOperations, FunctionsOperations, GenaiOperations, ImageActionsOperations, @@ -49,6 +51,7 @@ ReservedIPsOperations, ReservedIPv6ActionsOperations, ReservedIPv6Operations, + SecurityOperations, SizesOperations, SnapshotsOperations, SpacesKeyOperations, @@ -597,6 +600,8 @@ class GeneratedClient: # pylint: disable=client-accepts-api-version-keyword,too :vartype billing_insights: pydo.operations.BillingInsightsOperations :ivar databases: DatabasesOperations operations :vartype databases: pydo.operations.DatabasesOperations + :ivar dedicated_inferences: DedicatedInferencesOperations operations + :vartype dedicated_inferences: pydo.operations.DedicatedInferencesOperations :ivar domains: DomainsOperations operations :vartype domains: pydo.operations.DomainsOperations :ivar droplets: DropletsOperations operations @@ -609,6 +614,8 @@ class GeneratedClient: # pylint: disable=client-accepts-api-version-keyword,too :vartype firewalls: pydo.operations.FirewallsOperations :ivar functions: FunctionsOperations operations :vartype functions: pydo.operations.FunctionsOperations + :ivar functions_access_key: FunctionsAccessKeyOperations operations + :vartype functions_access_key: pydo.operations.FunctionsAccessKeyOperations :ivar images: ImagesOperations operations :vartype images: pydo.operations.ImagesOperations :ivar image_actions: ImageActionsOperations operations @@ -641,6 +648,8 @@ class GeneratedClient: # pylint: disable=client-accepts-api-version-keyword,too :vartype reserved_ipv6_actions: pydo.operations.ReservedIPv6ActionsOperations :ivar byoip_prefixes: ByoipPrefixesOperations operations :vartype byoip_prefixes: pydo.operations.ByoipPrefixesOperations + :ivar security: SecurityOperations operations + :vartype security: pydo.operations.SecurityOperations :ivar sizes: SizesOperations operations :vartype sizes: pydo.operations.SizesOperations :ivar snapshots: SnapshotsOperations operations @@ -746,6 +755,9 @@ def __init__( self.databases = DatabasesOperations( self._client, self._config, self._serialize, self._deserialize ) + self.dedicated_inferences = DedicatedInferencesOperations( + self._client, self._config, self._serialize, self._deserialize + ) self.domains = DomainsOperations( self._client, self._config, self._serialize, self._deserialize ) @@ -764,6 +776,9 @@ def __init__( self.functions = FunctionsOperations( self._client, self._config, self._serialize, self._deserialize ) + self.functions_access_key = FunctionsAccessKeyOperations( + self._client, self._config, self._serialize, self._deserialize + ) self.images = ImagesOperations( self._client, self._config, self._serialize, self._deserialize ) @@ -812,6 +827,9 @@ def __init__( self.byoip_prefixes = ByoipPrefixesOperations( self._client, self._config, self._serialize, self._deserialize ) + self.security = SecurityOperations( + self._client, self._config, self._serialize, self._deserialize + ) self.sizes = SizesOperations( self._client, self._config, self._serialize, self._deserialize ) diff --git a/src/pydo/aio/_client.py b/src/pydo/aio/_client.py index 6935483..64d498b 100644 --- a/src/pydo/aio/_client.py +++ b/src/pydo/aio/_client.py @@ -26,10 +26,12 @@ CdnOperations, CertificatesOperations, DatabasesOperations, + DedicatedInferencesOperations, DomainsOperations, DropletActionsOperations, DropletsOperations, FirewallsOperations, + FunctionsAccessKeyOperations, FunctionsOperations, GenaiOperations, ImageActionsOperations, @@ -49,6 +51,7 @@ ReservedIPsOperations, ReservedIPv6ActionsOperations, ReservedIPv6Operations, + SecurityOperations, SizesOperations, SnapshotsOperations, SpacesKeyOperations, @@ -597,6 +600,8 @@ class GeneratedClient: # pylint: disable=client-accepts-api-version-keyword,too :vartype billing_insights: pydo.aio.operations.BillingInsightsOperations :ivar databases: DatabasesOperations operations :vartype databases: pydo.aio.operations.DatabasesOperations + :ivar dedicated_inferences: DedicatedInferencesOperations operations + :vartype dedicated_inferences: pydo.aio.operations.DedicatedInferencesOperations :ivar domains: DomainsOperations operations :vartype domains: pydo.aio.operations.DomainsOperations :ivar droplets: DropletsOperations operations @@ -609,6 +614,8 @@ class GeneratedClient: # pylint: disable=client-accepts-api-version-keyword,too :vartype firewalls: pydo.aio.operations.FirewallsOperations :ivar functions: FunctionsOperations operations :vartype functions: pydo.aio.operations.FunctionsOperations + :ivar functions_access_key: FunctionsAccessKeyOperations operations + :vartype functions_access_key: pydo.aio.operations.FunctionsAccessKeyOperations :ivar images: ImagesOperations operations :vartype images: pydo.aio.operations.ImagesOperations :ivar image_actions: ImageActionsOperations operations @@ -641,6 +648,8 @@ class GeneratedClient: # pylint: disable=client-accepts-api-version-keyword,too :vartype reserved_ipv6_actions: pydo.aio.operations.ReservedIPv6ActionsOperations :ivar byoip_prefixes: ByoipPrefixesOperations operations :vartype byoip_prefixes: pydo.aio.operations.ByoipPrefixesOperations + :ivar security: SecurityOperations operations + :vartype security: pydo.aio.operations.SecurityOperations :ivar sizes: SizesOperations operations :vartype sizes: pydo.aio.operations.SizesOperations :ivar snapshots: SnapshotsOperations operations @@ -746,6 +755,9 @@ def __init__( self.databases = DatabasesOperations( self._client, self._config, self._serialize, self._deserialize ) + self.dedicated_inferences = DedicatedInferencesOperations( + self._client, self._config, self._serialize, self._deserialize + ) self.domains = DomainsOperations( self._client, self._config, self._serialize, self._deserialize ) @@ -764,6 +776,9 @@ def __init__( self.functions = FunctionsOperations( self._client, self._config, self._serialize, self._deserialize ) + self.functions_access_key = FunctionsAccessKeyOperations( + self._client, self._config, self._serialize, self._deserialize + ) self.images = ImagesOperations( self._client, self._config, self._serialize, self._deserialize ) @@ -812,6 +827,9 @@ def __init__( self.byoip_prefixes = ByoipPrefixesOperations( self._client, self._config, self._serialize, self._deserialize ) + self.security = SecurityOperations( + self._client, self._config, self._serialize, self._deserialize + ) self.sizes = SizesOperations( self._client, self._config, self._serialize, self._deserialize ) diff --git a/src/pydo/aio/operations/__init__.py b/src/pydo/aio/operations/__init__.py index 4a74b7c..0c8c048 100644 --- a/src/pydo/aio/operations/__init__.py +++ b/src/pydo/aio/operations/__init__.py @@ -17,12 +17,14 @@ from ._operations import InvoicesOperations from ._operations import BillingInsightsOperations from ._operations import DatabasesOperations +from ._operations import DedicatedInferencesOperations from ._operations import DomainsOperations from ._operations import DropletsOperations from ._operations import DropletActionsOperations from ._operations import AutoscalepoolsOperations from ._operations import FirewallsOperations from ._operations import FunctionsOperations +from ._operations import FunctionsAccessKeyOperations from ._operations import ImagesOperations from ._operations import ImageActionsOperations from ._operations import KubernetesOperations @@ -39,6 +41,7 @@ from ._operations import ReservedIPv6Operations from ._operations import ReservedIPv6ActionsOperations from ._operations import ByoipPrefixesOperations +from ._operations import SecurityOperations from ._operations import SizesOperations from ._operations import SnapshotsOperations from ._operations import SpacesKeyOperations @@ -70,12 +73,14 @@ "InvoicesOperations", "BillingInsightsOperations", "DatabasesOperations", + "DedicatedInferencesOperations", "DomainsOperations", "DropletsOperations", "DropletActionsOperations", "AutoscalepoolsOperations", "FirewallsOperations", "FunctionsOperations", + "FunctionsAccessKeyOperations", "ImagesOperations", "ImageActionsOperations", "KubernetesOperations", @@ -92,6 +97,7 @@ "ReservedIPv6Operations", "ReservedIPv6ActionsOperations", "ByoipPrefixesOperations", + "SecurityOperations", "SizesOperations", "SnapshotsOperations", "SpacesKeyOperations", diff --git a/src/pydo/aio/operations/_operations.py b/src/pydo/aio/operations/_operations.py index e68d312..00b59bc 100644 --- a/src/pydo/aio/operations/_operations.py +++ b/src/pydo/aio/operations/_operations.py @@ -49,6 +49,7 @@ build_addons_patch_request, build_apps_assign_alert_destinations_request, build_apps_cancel_deployment_request, + build_apps_cancel_event_request, build_apps_cancel_job_invocation_request, build_apps_commit_rollback_request, build_apps_create_deployment_request, @@ -56,6 +57,8 @@ build_apps_create_rollback_request, build_apps_delete_request, build_apps_get_deployment_request, + build_apps_get_event_logs_request, + build_apps_get_event_request, build_apps_get_exec_active_deployment_request, build_apps_get_exec_request, build_apps_get_health_request, @@ -71,6 +74,7 @@ build_apps_get_request, build_apps_list_alerts_request, build_apps_list_deployments_request, + build_apps_list_events_request, build_apps_list_instance_sizes_request, build_apps_list_job_invocations_request, build_apps_list_metrics_bandwidth_daily_request, @@ -177,6 +181,19 @@ build_databases_update_region_request, build_databases_update_sql_mode_request, build_databases_update_user_request, + build_dedicated_inferences_create_request, + build_dedicated_inferences_create_tokens_request, + build_dedicated_inferences_delete_request, + build_dedicated_inferences_delete_tokens_request, + build_dedicated_inferences_get_accelerator_request, + build_dedicated_inferences_get_ca_request, + build_dedicated_inferences_get_gpu_model_config_request, + build_dedicated_inferences_get_request, + build_dedicated_inferences_list_accelerators_request, + build_dedicated_inferences_list_request, + build_dedicated_inferences_list_sizes_request, + build_dedicated_inferences_list_tokens_request, + build_dedicated_inferences_patch_request, build_domains_create_record_request, build_domains_create_request, build_domains_delete_record_request, @@ -221,6 +238,10 @@ build_firewalls_get_request, build_firewalls_list_request, build_firewalls_update_request, + build_functions_access_key_create_request, + build_functions_access_key_delete_request, + build_functions_access_key_list_request, + build_functions_access_key_update_request, build_functions_create_namespace_request, build_functions_create_trigger_request, build_functions_delete_namespace_request, @@ -522,6 +543,16 @@ build_reserved_ipv6_delete_request, build_reserved_ipv6_get_request, build_reserved_ipv6_list_request, + build_security_create_scan_request, + build_security_create_scan_rule_request, + build_security_create_suppression_request, + build_security_delete_suppression_request, + build_security_get_latest_scan_request, + build_security_get_scan_request, + build_security_list_scan_finding_affected_resources_request, + build_security_list_scans_request, + build_security_list_settings_request, + build_security_update_settings_plan_request, build_sizes_list_request, build_snapshots_delete_request, build_snapshots_get_request, @@ -75697,8 +75728,10 @@ async def get_logs_active_deployment( * BUILD: Build-time logs * DEPLOY: Deploy-time logs * RUN: Live run-time logs - * RUN_RESTARTED: Logs of crashed/restarted instances during runtime. Known values are: - "UNSPECIFIED", "BUILD", "DEPLOY", "RUN", and "RUN_RESTARTED". Default value is "UNSPECIFIED". + * RUN_RESTARTED: Logs of crashed/restarted instances during runtime + * AUTOSCALE_EVENT: Logs of an autoscaling event (requires event_id). Known values are: + "UNSPECIFIED", "BUILD", "DEPLOY", "RUN", "RUN_RESTARTED", and "AUTOSCALE_EVENT". Default value + is "UNSPECIFIED". :paramtype type: str :keyword pod_connection_timeout: An optional time duration to wait if the underlying component instance is not immediately available. Default: ``3m``. Default value is None. @@ -85545,8 +85578,10 @@ async def get_logs( * BUILD: Build-time logs * DEPLOY: Deploy-time logs * RUN: Live run-time logs - * RUN_RESTARTED: Logs of crashed/restarted instances during runtime. Known values are: - "UNSPECIFIED", "BUILD", "DEPLOY", "RUN", and "RUN_RESTARTED". Default value is "UNSPECIFIED". + * RUN_RESTARTED: Logs of crashed/restarted instances during runtime + * AUTOSCALE_EVENT: Logs of an autoscaling event (requires event_id). Known values are: + "UNSPECIFIED", "BUILD", "DEPLOY", "RUN", "RUN_RESTARTED", and "AUTOSCALE_EVENT". Default value + is "UNSPECIFIED". :paramtype type: str :keyword pod_connection_timeout: An optional time duration to wait if the underlying component instance is not immediately available. Default: ``3m``. Default value is None. @@ -85693,8 +85728,10 @@ async def get_logs_aggregate( * BUILD: Build-time logs * DEPLOY: Deploy-time logs * RUN: Live run-time logs - * RUN_RESTARTED: Logs of crashed/restarted instances during runtime. Known values are: - "UNSPECIFIED", "BUILD", "DEPLOY", "RUN", and "RUN_RESTARTED". Default value is "UNSPECIFIED". + * RUN_RESTARTED: Logs of crashed/restarted instances during runtime + * AUTOSCALE_EVENT: Logs of an autoscaling event (requires event_id). Known values are: + "UNSPECIFIED", "BUILD", "DEPLOY", "RUN", "RUN_RESTARTED", and "AUTOSCALE_EVENT". Default value + is "UNSPECIFIED". :paramtype type: str :keyword pod_connection_timeout: An optional time duration to wait if the underlying component instance is not immediately available. Default: ``3m``. Default value is None. @@ -85971,8 +86008,10 @@ async def get_logs_active_deployment_aggregate( * BUILD: Build-time logs * DEPLOY: Deploy-time logs * RUN: Live run-time logs - * RUN_RESTARTED: Logs of crashed/restarted instances during runtime. Known values are: - "UNSPECIFIED", "BUILD", "DEPLOY", "RUN", and "RUN_RESTARTED". Default value is "UNSPECIFIED". + * RUN_RESTARTED: Logs of crashed/restarted instances during runtime + * AUTOSCALE_EVENT: Logs of an autoscaling event (requires event_id). Known values are: + "UNSPECIFIED", "BUILD", "DEPLOY", "RUN", "RUN_RESTARTED", and "AUTOSCALE_EVENT". Default value + is "UNSPECIFIED". :paramtype type: str :keyword pod_connection_timeout: An optional time duration to wait if the underlying component instance is not immediately available. Default: ``3m``. Default value is None. @@ -86731,6 +86770,5301 @@ async def get_job_invocation_logs( return cast(JSON, deserialized) # type: ignore + @distributed_trace_async + async def list_events( + self, + app_id: str, + *, + page: int = 1, + per_page: int = 20, + event_types: Optional[List[str]] = None, + **kwargs: Any + ) -> JSON: + # pylint: disable=line-too-long + """List App Events. + + List all events for an app, including deployments and autoscaling events. + + :param app_id: The app ID. Required. + :type app_id: str + :keyword page: Which 'page' of paginated results to return. Default value is 1. + :paramtype page: int + :keyword per_page: Number of items returned per page. Default value is 20. + :paramtype per_page: int + :keyword event_types: Filter events by event type. Default value is None. + :paramtype event_types: list[str] + :return: JSON object + :rtype: JSON + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 200 + response == { + "events": [ + { + "autoscaling": { + "components": { + "str": { + "from": 0, # Optional. The number of + replicas before scaling. + "to": 0, # Optional. The number of + replicas after scaling. + "triggering_metric": "str" # + Optional. The metric that triggered the scale change. Known + values are "cpu", "requests_per_second", "request_duration". + For inactivity sleep, "scale_from_zero" and "scale_to_zero" + are used. + } + }, + "phase": "str" # Optional. The current phase of the + autoscaling event. Known values are: "UNKNOWN", "PENDING", + "IN_PROGRESS", "SUCCEEDED", "FAILED", and "CANCELED". + }, + "created_at": "2020-02-20 00:00:00", # Optional. When the + event was created. + "deployment": { + "cause": "str", # Optional. What caused this + deployment to be created. + "cloned_from": "str", # Optional. The ID of a + previous deployment that this deployment was cloned from. + "created_at": "2020-02-20 00:00:00", # Optional. The + creation time of the deployment. + "functions": [ + { + "name": "str", # Optional. The name + of this functions component. + "namespace": "str", # Optional. The + namespace where the functions are deployed. + "source_commit_hash": "str" # + Optional. The commit hash of the repository that was used to + build this functions component. + } + ], + "id": "str", # Optional. The ID of the deployment. + "jobs": [ + { + "name": "str", # Optional. The name + of this job. + "source_commit_hash": "str" # + Optional. The commit hash of the repository that was used to + build this job. + } + ], + "phase": "UNKNOWN", # Optional. Default value is + "UNKNOWN". Known values are: "UNKNOWN", "PENDING_BUILD", "BUILDING", + "PENDING_DEPLOY", "DEPLOYING", "ACTIVE", "SUPERSEDED", "ERROR", and + "CANCELED". + "phase_last_updated_at": "2020-02-20 00:00:00", # + Optional. When the deployment phase was last updated. + "progress": { + "error_steps": 0, # Optional. Number of + unsuccessful steps. + "pending_steps": 0, # Optional. Number of + pending steps. + "running_steps": 0, # Optional. Number of + currently running steps. + "steps": [ + { + "component_name": "str", # + Optional. The component name that this step is associated + with. + "ended_at": "2020-02-20 + 00:00:00", # Optional. The end time of this step. + "message_base": "str", # + Optional. The base of a human-readable description of the + step intended to be combined with the component name for + presentation. For example: ``message_base`` = "Building + service" ``component_name`` = "api". + "name": "str", # Optional. + The name of this step. + "reason": { + "code": "str", # + Optional. The error code. + "message": "str" # + Optional. The error message. + }, + "started_at": "2020-02-20 + 00:00:00", # Optional. The start time of this step. + "status": "UNKNOWN", # + Optional. Default value is "UNKNOWN". Known values are: + "UNKNOWN", "PENDING", "RUNNING", "ERROR", and "SUCCESS". + "steps": [ + {} # Optional. Child + steps of this step. + ] + } + ], + "success_steps": 0, # Optional. Number of + successful steps. + "summary_steps": [ + { + "component_name": "str", # + Optional. The component name that this step is associated + with. + "ended_at": "2020-02-20 + 00:00:00", # Optional. The end time of this step. + "message_base": "str", # + Optional. The base of a human-readable description of the + step intended to be combined with the component name for + presentation. For example: ``message_base`` = "Building + service" ``component_name`` = "api". + "name": "str", # Optional. + The name of this step. + "reason": { + "code": "str", # + Optional. The error code. + "message": "str" # + Optional. The error message. + }, + "started_at": "2020-02-20 + 00:00:00", # Optional. The start time of this step. + "status": "UNKNOWN", # + Optional. Default value is "UNKNOWN". Known values are: + "UNKNOWN", "PENDING", "RUNNING", "ERROR", and "SUCCESS". + "steps": [ + {} # Optional. Child + steps of this step. + ] + } + ], + "total_steps": 0 # Optional. Total number of + steps. + }, + "services": [ + { + "name": "str", # Optional. The name + of this service. + "source_commit_hash": "str" # + Optional. The commit hash of the repository that was used to + build this service. + } + ], + "spec": { + "name": "str", # The name of the app. Must + be unique across all apps in the same account. Required. + "databases": [ + { + "name": "str", # The + database's name. The name must be unique across all + components within the same app and cannot use capital + letters. Required. + "cluster_name": "str", # + Optional. The name of the underlying DigitalOcean DBaaS + cluster. This is required for production databases. For + dev databases, if cluster_name is not set, a new cluster + will be provisioned. + "db_name": "str", # + Optional. The name of the MySQL or PostgreSQL database to + configure. + "db_user": "str", # + Optional. The name of the MySQL or PostgreSQL user to + configure. + "engine": "UNSET", # + Optional. Default value is "UNSET". * MYSQL: MySQL * PG: + PostgreSQL * REDIS: Caching * MONGODB: MongoDB * KAFKA: + Kafka * OPENSEARCH: OpenSearch * VALKEY: ValKey. Known + values are: "UNSET", "MYSQL", "PG", "REDIS", "MONGODB", + "KAFKA", "OPENSEARCH", and "VALKEY". + "production": bool, # + Optional. Whether this is a production or dev database. + "version": "str" # Optional. + The version of the database engine. + } + ], + "disable_edge_cache": False, # Optional. + Default value is False. .. role:: raw-html-m2r(raw) :format: + html If set to ``true``"" , the app will **not** be cached at + the edge (CDN). Enable this option if you want to manage CDN + configuration yourself"u2014whether by using an external CDN + provider or by handling static content and caching within your + app. This setting is also recommended for apps that require + real-time data or serve dynamic content, such as those using + Server-Sent Events (SSE) over GET, or hosting an MCP (Model + Context Protocol) Server that utilizes SSE."" + :raw-html-m2r:`
` **Note:** This feature is not available for + static site components."" :raw-html-m2r:`
` For more + information, see `Disable CDN Cache + `_. + "disable_email_obfuscation": False, # + Optional. Default value is False. If set to ``true``"" , email + addresses in the app will not be obfuscated. This is useful for + apps that require email addresses to be visible (in the HTML + markup). + "domains": [ + { + "domain": "str", # The + hostname for the domain. Required. + "minimum_tls_version": "str", + # Optional. The minimum version of TLS a client + application can use to access resources for the domain. + Must be one of the following values wrapped within + quotations: ``"1.2"`` or ``"1.3"``. Known values are: + "1.2" and "1.3". + "type": "UNSPECIFIED", # + Optional. Default value is "UNSPECIFIED". * DEFAULT: The + default ``.ondigitalocean.app`` domain assigned to this + app * PRIMARY: The primary domain for this app that is + displayed as the default in the control panel, used in + bindable environment variables, and any other places that + reference an app's live URL. Only one domain may be set + as primary. * ALIAS: A non-primary domain. Known values + are: "UNSPECIFIED", "DEFAULT", "PRIMARY", and "ALIAS". + "wildcard": bool, # + Optional. Indicates whether the domain includes all + sub-domains, in addition to the given domain. + "zone": "str" # Optional. + Optional. If the domain uses DigitalOcean DNS and you + would like App Platform to automatically manage it for + you, set this to the name of the domain on your account. + For example, If the domain you are adding is + ``app.domain.com``"" , the zone could be ``domain.com``. + } + ], + "egress": { + "type": "AUTOASSIGN" # Optional. + Default value is "AUTOASSIGN". The app egress type. Known + values are: "AUTOASSIGN" and "DEDICATED_IP". + }, + "enhanced_threat_control_enabled": False, # + Optional. Default value is False. If set to ``true``"" , + suspicious requests will go through additional security checks to + help mitigate layer 7 DDoS attacks. + "functions": [ + { + "name": "str", # The name. + Must be unique across all components within the same app. + Required. + "alerts": [ + { + "disabled": + bool, # Optional. Is the alert disabled?. + "operator": + "UNSPECIFIED_OPERATOR", # Optional. Default + value is "UNSPECIFIED_OPERATOR". Known values + are: "UNSPECIFIED_OPERATOR", "GREATER_THAN", and + "LESS_THAN". + "rule": + "UNSPECIFIED_RULE", # Optional. Default value is + "UNSPECIFIED_RULE". Known values are: + "UNSPECIFIED_RULE", "CPU_UTILIZATION", + "MEM_UTILIZATION", "RESTART_COUNT", + "DEPLOYMENT_FAILED", "DEPLOYMENT_LIVE", + "DOMAIN_FAILED", "DOMAIN_LIVE", + "AUTOSCALE_FAILED", "AUTOSCALE_SUCCEEDED", + "FUNCTIONS_ACTIVATION_COUNT", + "FUNCTIONS_AVERAGE_DURATION_MS", + "FUNCTIONS_ERROR_RATE_PER_MINUTE", + "FUNCTIONS_AVERAGE_WAIT_TIME_MS", + "FUNCTIONS_ERROR_COUNT", and + "FUNCTIONS_GB_RATE_PER_SECOND". + "value": 0.0, + # Optional. Threshold value for alert. + "window": + "UNSPECIFIED_WINDOW" # Optional. Default value + is "UNSPECIFIED_WINDOW". Known values are: + "UNSPECIFIED_WINDOW", "FIVE_MINUTES", + "TEN_MINUTES", "THIRTY_MINUTES", and "ONE_HOUR". + } + ], + "bitbucket": { + "branch": "str", # + Optional. The name of the branch to use. + "deploy_on_push": + bool, # Optional. Whether to automatically deploy + new commits made to the repo. + "repo": "str" # + Optional. The name of the repo in the format + owner/repo. Example: ``digitalocean/sample-golang``. + }, + "cors": { + "allow_credentials": + bool, # Optional. Whether browsers should expose the + response to the client-side JavaScript code when the + request"u2019s credentials mode is include. This + configures the ``Access-Control-Allow-Credentials`` + header. + "allow_headers": [ + "str" # + Optional. The set of allowed HTTP request + headers. This configures the + ``Access-Control-Allow-Headers`` header. + ], + "allow_methods": [ + "str" # + Optional. The set of allowed HTTP methods. This + configures the ``Access-Control-Allow-Methods`` + header. + ], + "allow_origins": [ + { + "exact": "str", # Optional. Exact string + match. Only 1 of ``exact``"" , ``prefix``"" , + or ``regex`` must be set. + "prefix": "str", # Optional. Prefix-based + match. Only 1 of ``exact``"" , ``prefix``"" , + or ``regex`` must be set. + "regex": "str" # Optional. RE2 style + regex-based match. Only 1 of ``exact``"" , + ``prefix``"" , or ``regex`` must be set. For + more information about RE2 syntax, see: + https://github.com/google/re2/wiki/Syntax. + } + ], + "expose_headers": [ + "str" # + Optional. The set of HTTP response headers that + browsers are allowed to access. This configures + the ``Access-Control-Expose-Headers`` header. + ], + "max_age": "str" # + Optional. An optional duration specifying how long + browsers can cache the results of a preflight + request. This configures the + ``Access-Control-Max-Age`` header. + }, + "envs": [ + { + "key": "str", + # The variable name. Required. + "scope": + "RUN_AND_BUILD_TIME", # Optional. Default value + is "RUN_AND_BUILD_TIME". * RUN_TIME: Made + available only at run-time * BUILD_TIME: Made + available only at build-time * + RUN_AND_BUILD_TIME: Made available at both build + and run-time. Known values are: "UNSET", + "RUN_TIME", "BUILD_TIME", and + "RUN_AND_BUILD_TIME". + "type": + "GENERAL", # Optional. Default value is + "GENERAL". * GENERAL: A plain-text environment + variable * SECRET: A secret encrypted environment + variable. Known values are: "GENERAL" and + "SECRET". + "value": + "str" # Optional. The value. If the type is + ``SECRET``"" , the value will be encrypted on + first submission. On following submissions, the + encrypted value should be used. + } + ], + "git": { + "branch": "str", # + Optional. The name of the branch to use. + "repo_clone_url": + "str" # Optional. The clone URL of the repo. + Example: + ``https://github.com/digitalocean/sample-golang.git``. + }, + "github": { + "branch": "str", # + Optional. The name of the branch to use. + "deploy_on_push": + bool, # Optional. Whether to automatically deploy + new commits made to the repo. + "repo": "str" # + Optional. The name of the repo in the format + owner/repo. Example: ``digitalocean/sample-golang``. + }, + "gitlab": { + "branch": "str", # + Optional. The name of the branch to use. + "deploy_on_push": + bool, # Optional. Whether to automatically deploy + new commits made to the repo. + "repo": "str" # + Optional. The name of the repo in the format + owner/repo. Example: ``digitalocean/sample-golang``. + }, + "log_destinations": [ + { + "name": + "str", # Required. + "datadog": { + "api_key": "str", # Datadog API key. + Required. + "endpoint": "str" # Optional. Datadog HTTP + log intake endpoint. + }, + "logtail": { + "token": "str" # Optional. Logtail token. + }, + "open_search": { + "basic_auth": { + "password": "str", # Optional. Password + for user defined in User. Is required + when ``endpoint`` is set. Cannot be set + if using a DigitalOcean DBaaS OpenSearch + cluster. + "user": "str" # Optional. Username to + authenticate with. Only required when + ``endpoint`` is set. Defaults to + ``doadmin`` when ``cluster_name`` is set. + }, + "cluster_name": "str", # Optional. The name + of a DigitalOcean DBaaS OpenSearch cluster to + use as a log forwarding destination. Cannot + be specified if ``endpoint`` is also + specified. + "endpoint": "str", # Optional. OpenSearch + API Endpoint. Only HTTPS is supported. + Format: + https://:code:``::code:``. Cannot + be specified if ``cluster_name`` is also + specified. + "index_name": "logs" # Optional. Default + value is "logs". The index name to use for + the logs. If not set, the default index name + is "logs". + }, + "papertrail": + { + "endpoint": "str" # Papertrail syslog + endpoint. Required. + } + } + ], + "routes": [ + { + "path": + "str", # Optional. (Deprecated - Use Ingress + Rules instead). An HTTP path prefix. Paths must + start with / and must be unique across all + components within an app. + "preserve_path_prefix": bool # Optional. An + optional flag to preserve the path that is + forwarded to the backend service. By default, the + HTTP request path will be trimmed from the left + when forwarded to the component. For example, a + component with ``path=/api`` will have requests + to ``/api/list`` trimmed to ``/list``. If this + value is ``true``"" , the path will remain + ``/api/list``. + } + ], + "source_dir": "str" # + Optional. An optional path to the working directory to + use for the build. For Dockerfile builds, this will be + used as the build context. Must be relative to the root + of the repo. + } + ], + "ingress": { + "rules": [ + { + "component": { + "name": + "str", # The name of the component to route to. + Required. + "preserve_path_prefix": "str", # Optional. An + optional flag to preserve the path that is + forwarded to the backend service. By default, the + HTTP request path will be trimmed from the left + when forwarded to the component. For example, a + component with ``path=/api`` will have requests + to ``/api/list`` trimmed to ``/list``. If this + value is ``true``"" , the path will remain + ``/api/list``. Note: this is not applicable for + Functions Components and is mutually exclusive + with ``rewrite``. + "rewrite": + "str" # Optional. An optional field that will + rewrite the path of the component to be what is + specified here. By default, the HTTP request path + will be trimmed from the left when forwarded to + the component. For example, a component with + ``path=/api`` will have requests to ``/api/list`` + trimmed to ``/list``. If you specified the + rewrite to be ``/v1/``"" , requests to + ``/api/list`` would be rewritten to ``/v1/list``. + Note: this is mutually exclusive with + ``preserve_path_prefix``. + }, + "cors": { + "allow_credentials": bool, # Optional. Whether + browsers should expose the response to the + client-side JavaScript code when the + request"u2019s credentials mode is include. This + configures the + ``Access-Control-Allow-Credentials`` header. + "allow_headers": [ + "str" + # Optional. The set of allowed HTTP request + headers. This configures the + ``Access-Control-Allow-Headers`` header. + ], + "allow_methods": [ + "str" + # Optional. The set of allowed HTTP methods. + This configures the + ``Access-Control-Allow-Methods`` header. + ], + "allow_origins": [ + { + "exact": "str", # Optional. Exact string + match. Only 1 of ``exact``"" , + ``prefix``"" , or ``regex`` must be set. + "prefix": "str", # Optional. + Prefix-based match. Only 1 of ``exact``"" + , ``prefix``"" , or ``regex`` must be + set. + "regex": "str" # Optional. RE2 style + regex-based match. Only 1 of ``exact``"" + , ``prefix``"" , or ``regex`` must be + set. For more information about RE2 + syntax, see: + https://github.com/google/re2/wiki/Syntax. + } + ], + "expose_headers": [ + "str" + # Optional. The set of HTTP response headers + that browsers are allowed to access. This + configures the + ``Access-Control-Expose-Headers`` header. + ], + "max_age": + "str" # Optional. An optional duration + specifying how long browsers can cache the + results of a preflight request. This configures + the ``Access-Control-Max-Age`` header. + }, + "match": { + "authority": + { + "exact": "str" # Required. + }, + "path": { + "prefix": "str" # Prefix-based match. For + example, ``/api`` will match ``/api``"" , + ``/api/``"" , and any nested paths such as + ``/api/v1/endpoint``. Required. + } + }, + "redirect": { + "authority": + "str", # Optional. The authority/host to + redirect to. This can be a hostname or IP + address. Note: use ``port`` to set the port. + "port": 0, # + Optional. The port to redirect to. + "redirect_code": 0, # Optional. The redirect + code to use. Defaults to ``302``. Supported + values are 300, 301, 302, 303, 304, 307, 308. + "scheme": + "str", # Optional. The scheme to redirect to. + Supported values are ``http`` or ``https``. + Default: ``https``. + "uri": "str" + # Optional. An optional URI path to redirect to. + Note: if this is specified the whole URI of the + original request will be overwritten to this + value, irrespective of the original request URI + being matched. + } + } + ] + }, + "jobs": [ + { + "autoscaling": { + "max_instance_count": + 0, # Optional. The maximum amount of instances for + this component. Must be more than min_instance_count. + "metrics": { + "cpu": { + "percent": 80 # Optional. Default value is + 80. The average target CPU utilization for + the component. + } + }, + "min_instance_count": + 0 # Optional. The minimum amount of instances for + this component. Must be less than max_instance_count. + }, + "bitbucket": { + "branch": "str", # + Optional. The name of the branch to use. + "deploy_on_push": + bool, # Optional. Whether to automatically deploy + new commits made to the repo. + "repo": "str" # + Optional. The name of the repo in the format + owner/repo. Example: ``digitalocean/sample-golang``. + }, + "build_command": "str", # + Optional. An optional build command to run while building + this component from source. + "dockerfile_path": "str", # + Optional. The path to the Dockerfile relative to the root + of the repo. If set, it will be used to build this + component. Otherwise, App Platform will attempt to build + it using buildpacks. + "environment_slug": "str", # + Optional. An environment slug describing the type of this + app. For a full list, please refer to `the product + documentation + `_. + "envs": [ + { + "key": "str", + # The variable name. Required. + "scope": + "RUN_AND_BUILD_TIME", # Optional. Default value + is "RUN_AND_BUILD_TIME". * RUN_TIME: Made + available only at run-time * BUILD_TIME: Made + available only at build-time * + RUN_AND_BUILD_TIME: Made available at both build + and run-time. Known values are: "UNSET", + "RUN_TIME", "BUILD_TIME", and + "RUN_AND_BUILD_TIME". + "type": + "GENERAL", # Optional. Default value is + "GENERAL". * GENERAL: A plain-text environment + variable * SECRET: A secret encrypted environment + variable. Known values are: "GENERAL" and + "SECRET". + "value": + "str" # Optional. The value. If the type is + ``SECRET``"" , the value will be encrypted on + first submission. On following submissions, the + encrypted value should be used. + } + ], + "git": { + "branch": "str", # + Optional. The name of the branch to use. + "repo_clone_url": + "str" # Optional. The clone URL of the repo. + Example: + ``https://github.com/digitalocean/sample-golang.git``. + }, + "github": { + "branch": "str", # + Optional. The name of the branch to use. + "deploy_on_push": + bool, # Optional. Whether to automatically deploy + new commits made to the repo. + "repo": "str" # + Optional. The name of the repo in the format + owner/repo. Example: ``digitalocean/sample-golang``. + }, + "gitlab": { + "branch": "str", # + Optional. The name of the branch to use. + "deploy_on_push": + bool, # Optional. Whether to automatically deploy + new commits made to the repo. + "repo": "str" # + Optional. The name of the repo in the format + owner/repo. Example: ``digitalocean/sample-golang``. + }, + "image": { + "deploy_on_push": { + "enabled": + bool # Optional. Whether to automatically deploy + new images. Can only be used for images hosted in + DOCR and can only be used with an image tag, not + a specific digest. + }, + "digest": "str", # + Optional. The image digest. Cannot be specified if + tag is provided. + "registry": "str", # + Optional. The registry name. Must be left empty for + the ``DOCR`` registry type. + "registry_credentials": "str", # Optional. The + credentials to be able to pull the image. The value + will be encrypted on first submission. On following + submissions, the encrypted value should be used. * + "$username:$access_token" for registries of type + ``DOCKER_HUB``. * "$username:$access_token" for + registries of type ``GHCR``. + "registry_type": + "str", # Optional. * DOCKER_HUB: The DockerHub + container registry type. * DOCR: The DigitalOcean + container registry type. * GHCR: The Github container + registry type. Known values are: "DOCKER_HUB", + "DOCR", and "GHCR". + "repository": "str", + # Optional. The repository name. + "tag": "latest" # + Optional. Default value is "latest". The repository + tag. Defaults to ``latest`` if not provided and no + digest is provided. Cannot be specified if digest is + provided. + }, + "instance_count": 1, # + Optional. Default value is 1. The amount of instances + that this component should be scaled to. Default: 1. Must + not be set if autoscaling is used. + "instance_size_slug": {}, + "kind": "UNSPECIFIED", # + Optional. Default value is "UNSPECIFIED". * UNSPECIFIED: + Default job type, will auto-complete to POST_DEPLOY kind. + * PRE_DEPLOY: Indicates a job that runs before an app + deployment. * POST_DEPLOY: Indicates a job that runs + after an app deployment. * FAILED_DEPLOY: Indicates a job + that runs after a component fails to deploy. Known values + are: "UNSPECIFIED", "PRE_DEPLOY", "POST_DEPLOY", and + "FAILED_DEPLOY". + "log_destinations": [ + { + "name": + "str", # Required. + "datadog": { + "api_key": "str", # Datadog API key. + Required. + "endpoint": "str" # Optional. Datadog HTTP + log intake endpoint. + }, + "logtail": { + "token": "str" # Optional. Logtail token. + }, + "open_search": { + "basic_auth": { + "password": "str", # Optional. Password + for user defined in User. Is required + when ``endpoint`` is set. Cannot be set + if using a DigitalOcean DBaaS OpenSearch + cluster. + "user": "str" # Optional. Username to + authenticate with. Only required when + ``endpoint`` is set. Defaults to + ``doadmin`` when ``cluster_name`` is set. + }, + "cluster_name": "str", # Optional. The name + of a DigitalOcean DBaaS OpenSearch cluster to + use as a log forwarding destination. Cannot + be specified if ``endpoint`` is also + specified. + "endpoint": "str", # Optional. OpenSearch + API Endpoint. Only HTTPS is supported. + Format: + https://:code:``::code:``. Cannot + be specified if ``cluster_name`` is also + specified. + "index_name": "logs" # Optional. Default + value is "logs". The index name to use for + the logs. If not set, the default index name + is "logs". + }, + "papertrail": + { + "endpoint": "str" # Papertrail syslog + endpoint. Required. + } + } + ], + "name": "str", # Optional. + The name. Must be unique across all components within the + same app. + "run_command": "str", # + Optional. An optional run command to override the + component's default. + "source_dir": "str", # + Optional. An optional path to the working directory to + use for the build. For Dockerfile builds, this will be + used as the build context. Must be relative to the root + of the repo. + "termination": { + "grace_period_seconds": 0 # Optional. The number of + seconds to wait between sending a TERM signal to a + container and issuing a KILL which causes immediate + shutdown. (Default 120). + } + } + ], + "maintenance": { + "archive": bool, # Optional. + Indicates whether the app should be archived. Setting this to + true implies that enabled is set to true. + "enabled": bool, # Optional. + Indicates whether maintenance mode should be enabled for the + app. + "offline_page_url": "str" # + Optional. A custom offline page to display when maintenance + mode is enabled or the app is archived. + }, + "region": "str", # Optional. The slug form + of the geographical origin of the app. Default: ``nearest + available``. Known values are: "atl", "nyc", "sfo", "tor", "ams", + "fra", "lon", "blr", "sgp", and "syd". + "services": [ + { + "autoscaling": { + "max_instance_count": + 0, # Optional. The maximum amount of instances for + this component. Must be more than min_instance_count. + "metrics": { + "cpu": { + "percent": 80 # Optional. Default value is + 80. The average target CPU utilization for + the component. + } + }, + "min_instance_count": + 0 # Optional. The minimum amount of instances for + this component. Must be less than max_instance_count. + }, + "bitbucket": { + "branch": "str", # + Optional. The name of the branch to use. + "deploy_on_push": + bool, # Optional. Whether to automatically deploy + new commits made to the repo. + "repo": "str" # + Optional. The name of the repo in the format + owner/repo. Example: ``digitalocean/sample-golang``. + }, + "build_command": "str", # + Optional. An optional build command to run while building + this component from source. + "cors": { + "allow_credentials": + bool, # Optional. Whether browsers should expose the + response to the client-side JavaScript code when the + request"u2019s credentials mode is include. This + configures the ``Access-Control-Allow-Credentials`` + header. + "allow_headers": [ + "str" # + Optional. The set of allowed HTTP request + headers. This configures the + ``Access-Control-Allow-Headers`` header. + ], + "allow_methods": [ + "str" # + Optional. The set of allowed HTTP methods. This + configures the ``Access-Control-Allow-Methods`` + header. + ], + "allow_origins": [ + { + "exact": "str", # Optional. Exact string + match. Only 1 of ``exact``"" , ``prefix``"" , + or ``regex`` must be set. + "prefix": "str", # Optional. Prefix-based + match. Only 1 of ``exact``"" , ``prefix``"" , + or ``regex`` must be set. + "regex": "str" # Optional. RE2 style + regex-based match. Only 1 of ``exact``"" , + ``prefix``"" , or ``regex`` must be set. For + more information about RE2 syntax, see: + https://github.com/google/re2/wiki/Syntax. + } + ], + "expose_headers": [ + "str" # + Optional. The set of HTTP response headers that + browsers are allowed to access. This configures + the ``Access-Control-Expose-Headers`` header. + ], + "max_age": "str" # + Optional. An optional duration specifying how long + browsers can cache the results of a preflight + request. This configures the + ``Access-Control-Max-Age`` header. + }, + "dockerfile_path": "str", # + Optional. The path to the Dockerfile relative to the root + of the repo. If set, it will be used to build this + component. Otherwise, App Platform will attempt to build + it using buildpacks. + "environment_slug": "str", # + Optional. An environment slug describing the type of this + app. For a full list, please refer to `the product + documentation + `_. + "envs": [ + { + "key": "str", + # The variable name. Required. + "scope": + "RUN_AND_BUILD_TIME", # Optional. Default value + is "RUN_AND_BUILD_TIME". * RUN_TIME: Made + available only at run-time * BUILD_TIME: Made + available only at build-time * + RUN_AND_BUILD_TIME: Made available at both build + and run-time. Known values are: "UNSET", + "RUN_TIME", "BUILD_TIME", and + "RUN_AND_BUILD_TIME". + "type": + "GENERAL", # Optional. Default value is + "GENERAL". * GENERAL: A plain-text environment + variable * SECRET: A secret encrypted environment + variable. Known values are: "GENERAL" and + "SECRET". + "value": + "str" # Optional. The value. If the type is + ``SECRET``"" , the value will be encrypted on + first submission. On following submissions, the + encrypted value should be used. + } + ], + "git": { + "branch": "str", # + Optional. The name of the branch to use. + "repo_clone_url": + "str" # Optional. The clone URL of the repo. + Example: + ``https://github.com/digitalocean/sample-golang.git``. + }, + "github": { + "branch": "str", # + Optional. The name of the branch to use. + "deploy_on_push": + bool, # Optional. Whether to automatically deploy + new commits made to the repo. + "repo": "str" # + Optional. The name of the repo in the format + owner/repo. Example: ``digitalocean/sample-golang``. + }, + "gitlab": { + "branch": "str", # + Optional. The name of the branch to use. + "deploy_on_push": + bool, # Optional. Whether to automatically deploy + new commits made to the repo. + "repo": "str" # + Optional. The name of the repo in the format + owner/repo. Example: ``digitalocean/sample-golang``. + }, + "health_check": { + "failure_threshold": + 0, # Optional. The number of failed health checks + before considered unhealthy. + "http_path": "str", + # Optional. The route path used for the HTTP health + check ping. If not set, the HTTP health check will be + disabled and a TCP health check used instead. + "initial_delay_seconds": 0, # Optional. The number + of seconds to wait before beginning health checks. + "period_seconds": 0, + # Optional. The number of seconds to wait between + health checks. + "port": 0, # + Optional. The port on which the health check will be + performed. If not set, the health check will be + performed on the component's http_port. + "success_threshold": + 0, # Optional. The number of successful health + checks before considered healthy. + "timeout_seconds": 0 + # Optional. The number of seconds after which the + check times out. + }, + "http_port": 0, # Optional. + The internal port on which this service's run command + will listen. Default: 8080 If there is not an environment + variable with the name ``PORT``"" , one will be + automatically added with its value set to the value of + this field. + "image": { + "deploy_on_push": { + "enabled": + bool # Optional. Whether to automatically deploy + new images. Can only be used for images hosted in + DOCR and can only be used with an image tag, not + a specific digest. + }, + "digest": "str", # + Optional. The image digest. Cannot be specified if + tag is provided. + "registry": "str", # + Optional. The registry name. Must be left empty for + the ``DOCR`` registry type. + "registry_credentials": "str", # Optional. The + credentials to be able to pull the image. The value + will be encrypted on first submission. On following + submissions, the encrypted value should be used. * + "$username:$access_token" for registries of type + ``DOCKER_HUB``. * "$username:$access_token" for + registries of type ``GHCR``. + "registry_type": + "str", # Optional. * DOCKER_HUB: The DockerHub + container registry type. * DOCR: The DigitalOcean + container registry type. * GHCR: The Github container + registry type. Known values are: "DOCKER_HUB", + "DOCR", and "GHCR". + "repository": "str", + # Optional. The repository name. + "tag": "latest" # + Optional. Default value is "latest". The repository + tag. Defaults to ``latest`` if not provided and no + digest is provided. Cannot be specified if digest is + provided. + }, + "instance_count": 1, # + Optional. Default value is 1. The amount of instances + that this component should be scaled to. Default: 1. Must + not be set if autoscaling is used. + "instance_size_slug": {}, + "internal_ports": [ + 0 # Optional. The + ports on which this service will listen for internal + traffic. + ], + "liveness_health_check": { + "failure_threshold": + 0, # Optional. The number of failed health checks + before considered unhealthy. + "http_path": "str", + # Optional. The route path used for the HTTP health + check ping. If not set, the HTTP health check will be + disabled and a TCP health check used instead. + "initial_delay_seconds": 0, # Optional. The number + of seconds to wait before beginning health checks. + "period_seconds": 0, + # Optional. The number of seconds to wait between + health checks. + "port": 0, # + Optional. The port on which the health check will be + performed. + "success_threshold": + 0, # Optional. The number of successful health + checks before considered healthy. + "timeout_seconds": 0 + # Optional. The number of seconds after which the + check times out. + }, + "log_destinations": [ + { + "name": + "str", # Required. + "datadog": { + "api_key": "str", # Datadog API key. + Required. + "endpoint": "str" # Optional. Datadog HTTP + log intake endpoint. + }, + "logtail": { + "token": "str" # Optional. Logtail token. + }, + "open_search": { + "basic_auth": { + "password": "str", # Optional. Password + for user defined in User. Is required + when ``endpoint`` is set. Cannot be set + if using a DigitalOcean DBaaS OpenSearch + cluster. + "user": "str" # Optional. Username to + authenticate with. Only required when + ``endpoint`` is set. Defaults to + ``doadmin`` when ``cluster_name`` is set. + }, + "cluster_name": "str", # Optional. The name + of a DigitalOcean DBaaS OpenSearch cluster to + use as a log forwarding destination. Cannot + be specified if ``endpoint`` is also + specified. + "endpoint": "str", # Optional. OpenSearch + API Endpoint. Only HTTPS is supported. + Format: + https://:code:``::code:``. Cannot + be specified if ``cluster_name`` is also + specified. + "index_name": "logs" # Optional. Default + value is "logs". The index name to use for + the logs. If not set, the default index name + is "logs". + }, + "papertrail": + { + "endpoint": "str" # Papertrail syslog + endpoint. Required. + } + } + ], + "name": "str", # Optional. + The name. Must be unique across all components within the + same app. + "protocol": "str", # + Optional. The protocol which the service uses to serve + traffic on the http_port. * ``HTTP``"" : The app is + serving the HTTP protocol. Default. * ``HTTP2``"" : The + app is serving the HTTP/2 protocol. Currently, this needs + to be implemented in the service by serving HTTP/2 + cleartext (h2c). Known values are: "HTTP" and "HTTP2". + "routes": [ + { + "path": + "str", # Optional. (Deprecated - Use Ingress + Rules instead). An HTTP path prefix. Paths must + start with / and must be unique across all + components within an app. + "preserve_path_prefix": bool # Optional. An + optional flag to preserve the path that is + forwarded to the backend service. By default, the + HTTP request path will be trimmed from the left + when forwarded to the component. For example, a + component with ``path=/api`` will have requests + to ``/api/list`` trimmed to ``/list``. If this + value is ``true``"" , the path will remain + ``/api/list``. + } + ], + "run_command": "str", # + Optional. An optional run command to override the + component's default. + "source_dir": "str", # + Optional. An optional path to the working directory to + use for the build. For Dockerfile builds, this will be + used as the build context. Must be relative to the root + of the repo. + "termination": { + "drain_seconds": 0, + # Optional. The number of seconds to wait between + selecting a container instance for termination and + issuing the TERM signal. Selecting a container + instance for termination begins an asynchronous drain + of new requests on upstream load-balancers. (Default + 15). + "grace_period_seconds": 0 # Optional. The number of + seconds to wait between sending a TERM signal to a + container and issuing a KILL which causes immediate + shutdown. (Default 120). + } + } + ], + "static_sites": [ + { + "bitbucket": { + "branch": "str", # + Optional. The name of the branch to use. + "deploy_on_push": + bool, # Optional. Whether to automatically deploy + new commits made to the repo. + "repo": "str" # + Optional. The name of the repo in the format + owner/repo. Example: ``digitalocean/sample-golang``. + }, + "build_command": "str", # + Optional. An optional build command to run while building + this component from source. + "catchall_document": "str", + # Optional. The name of the document to use as the + fallback for any requests to documents that are not found + when serving this static site. Only 1 of + ``catchall_document`` or ``error_document`` can be set. + "cors": { + "allow_credentials": + bool, # Optional. Whether browsers should expose the + response to the client-side JavaScript code when the + request"u2019s credentials mode is include. This + configures the ``Access-Control-Allow-Credentials`` + header. + "allow_headers": [ + "str" # + Optional. The set of allowed HTTP request + headers. This configures the + ``Access-Control-Allow-Headers`` header. + ], + "allow_methods": [ + "str" # + Optional. The set of allowed HTTP methods. This + configures the ``Access-Control-Allow-Methods`` + header. + ], + "allow_origins": [ + { + "exact": "str", # Optional. Exact string + match. Only 1 of ``exact``"" , ``prefix``"" , + or ``regex`` must be set. + "prefix": "str", # Optional. Prefix-based + match. Only 1 of ``exact``"" , ``prefix``"" , + or ``regex`` must be set. + "regex": "str" # Optional. RE2 style + regex-based match. Only 1 of ``exact``"" , + ``prefix``"" , or ``regex`` must be set. For + more information about RE2 syntax, see: + https://github.com/google/re2/wiki/Syntax. + } + ], + "expose_headers": [ + "str" # + Optional. The set of HTTP response headers that + browsers are allowed to access. This configures + the ``Access-Control-Expose-Headers`` header. + ], + "max_age": "str" # + Optional. An optional duration specifying how long + browsers can cache the results of a preflight + request. This configures the + ``Access-Control-Max-Age`` header. + }, + "dockerfile_path": "str", # + Optional. The path to the Dockerfile relative to the root + of the repo. If set, it will be used to build this + component. Otherwise, App Platform will attempt to build + it using buildpacks. + "environment_slug": "str", # + Optional. An environment slug describing the type of this + app. For a full list, please refer to `the product + documentation + `_. + "envs": [ + { + "key": "str", + # The variable name. Required. + "scope": + "RUN_AND_BUILD_TIME", # Optional. Default value + is "RUN_AND_BUILD_TIME". * RUN_TIME: Made + available only at run-time * BUILD_TIME: Made + available only at build-time * + RUN_AND_BUILD_TIME: Made available at both build + and run-time. Known values are: "UNSET", + "RUN_TIME", "BUILD_TIME", and + "RUN_AND_BUILD_TIME". + "type": + "GENERAL", # Optional. Default value is + "GENERAL". * GENERAL: A plain-text environment + variable * SECRET: A secret encrypted environment + variable. Known values are: "GENERAL" and + "SECRET". + "value": + "str" # Optional. The value. If the type is + ``SECRET``"" , the value will be encrypted on + first submission. On following submissions, the + encrypted value should be used. + } + ], + "error_document": "404.html", + # Optional. Default value is "404.html". The name of the + error document to use when serving this static site. + Default: 404.html. If no such file exists within the + built assets, App Platform will supply one. + "git": { + "branch": "str", # + Optional. The name of the branch to use. + "repo_clone_url": + "str" # Optional. The clone URL of the repo. + Example: + ``https://github.com/digitalocean/sample-golang.git``. + }, + "github": { + "branch": "str", # + Optional. The name of the branch to use. + "deploy_on_push": + bool, # Optional. Whether to automatically deploy + new commits made to the repo. + "repo": "str" # + Optional. The name of the repo in the format + owner/repo. Example: ``digitalocean/sample-golang``. + }, + "gitlab": { + "branch": "str", # + Optional. The name of the branch to use. + "deploy_on_push": + bool, # Optional. Whether to automatically deploy + new commits made to the repo. + "repo": "str" # + Optional. The name of the repo in the format + owner/repo. Example: ``digitalocean/sample-golang``. + }, + "image": { + "deploy_on_push": { + "enabled": + bool # Optional. Whether to automatically deploy + new images. Can only be used for images hosted in + DOCR and can only be used with an image tag, not + a specific digest. + }, + "digest": "str", # + Optional. The image digest. Cannot be specified if + tag is provided. + "registry": "str", # + Optional. The registry name. Must be left empty for + the ``DOCR`` registry type. + "registry_credentials": "str", # Optional. The + credentials to be able to pull the image. The value + will be encrypted on first submission. On following + submissions, the encrypted value should be used. * + "$username:$access_token" for registries of type + ``DOCKER_HUB``. * "$username:$access_token" for + registries of type ``GHCR``. + "registry_type": + "str", # Optional. * DOCKER_HUB: The DockerHub + container registry type. * DOCR: The DigitalOcean + container registry type. * GHCR: The Github container + registry type. Known values are: "DOCKER_HUB", + "DOCR", and "GHCR". + "repository": "str", + # Optional. The repository name. + "tag": "latest" # + Optional. Default value is "latest". The repository + tag. Defaults to ``latest`` if not provided and no + digest is provided. Cannot be specified if digest is + provided. + }, + "index_document": + "index.html", # Optional. Default value is "index.html". + The name of the index document to use when serving this + static site. Default: index.html. + "log_destinations": [ + { + "name": + "str", # Required. + "datadog": { + "api_key": "str", # Datadog API key. + Required. + "endpoint": "str" # Optional. Datadog HTTP + log intake endpoint. + }, + "logtail": { + "token": "str" # Optional. Logtail token. + }, + "open_search": { + "basic_auth": { + "password": "str", # Optional. Password + for user defined in User. Is required + when ``endpoint`` is set. Cannot be set + if using a DigitalOcean DBaaS OpenSearch + cluster. + "user": "str" # Optional. Username to + authenticate with. Only required when + ``endpoint`` is set. Defaults to + ``doadmin`` when ``cluster_name`` is set. + }, + "cluster_name": "str", # Optional. The name + of a DigitalOcean DBaaS OpenSearch cluster to + use as a log forwarding destination. Cannot + be specified if ``endpoint`` is also + specified. + "endpoint": "str", # Optional. OpenSearch + API Endpoint. Only HTTPS is supported. + Format: + https://:code:``::code:``. Cannot + be specified if ``cluster_name`` is also + specified. + "index_name": "logs" # Optional. Default + value is "logs". The index name to use for + the logs. If not set, the default index name + is "logs". + }, + "papertrail": + { + "endpoint": "str" # Papertrail syslog + endpoint. Required. + } + } + ], + "name": "str", # Optional. + The name. Must be unique across all components within the + same app. + "output_dir": "str", # + Optional. An optional path to where the built assets will + be located, relative to the build context. If not set, + App Platform will automatically scan for these directory + names: ``_static``"" , ``dist``"" , ``public``"" , + ``build``. + "routes": [ + { + "path": + "str", # Optional. (Deprecated - Use Ingress + Rules instead). An HTTP path prefix. Paths must + start with / and must be unique across all + components within an app. + "preserve_path_prefix": bool # Optional. An + optional flag to preserve the path that is + forwarded to the backend service. By default, the + HTTP request path will be trimmed from the left + when forwarded to the component. For example, a + component with ``path=/api`` will have requests + to ``/api/list`` trimmed to ``/list``. If this + value is ``true``"" , the path will remain + ``/api/list``. + } + ], + "run_command": "str", # + Optional. An optional run command to override the + component's default. + "source_dir": "str" # + Optional. An optional path to the working directory to + use for the build. For Dockerfile builds, this will be + used as the build context. Must be relative to the root + of the repo. + } + ], + "vpc": { + "egress_ips": [ + { + "ip": "str" # + Optional. The egress ips associated with the VPC. + } + ], + "id": "str" # Optional. The ID of + the VPC. + }, + "workers": [ + { + "autoscaling": { + "max_instance_count": + 0, # Optional. The maximum amount of instances for + this component. Must be more than min_instance_count. + "metrics": { + "cpu": { + "percent": 80 # Optional. Default value is + 80. The average target CPU utilization for + the component. + } + }, + "min_instance_count": + 0 # Optional. The minimum amount of instances for + this component. Must be less than max_instance_count. + }, + "bitbucket": { + "branch": "str", # + Optional. The name of the branch to use. + "deploy_on_push": + bool, # Optional. Whether to automatically deploy + new commits made to the repo. + "repo": "str" # + Optional. The name of the repo in the format + owner/repo. Example: ``digitalocean/sample-golang``. + }, + "build_command": "str", # + Optional. An optional build command to run while building + this component from source. + "dockerfile_path": "str", # + Optional. The path to the Dockerfile relative to the root + of the repo. If set, it will be used to build this + component. Otherwise, App Platform will attempt to build + it using buildpacks. + "environment_slug": "str", # + Optional. An environment slug describing the type of this + app. For a full list, please refer to `the product + documentation + `_. + "envs": [ + { + "key": "str", + # The variable name. Required. + "scope": + "RUN_AND_BUILD_TIME", # Optional. Default value + is "RUN_AND_BUILD_TIME". * RUN_TIME: Made + available only at run-time * BUILD_TIME: Made + available only at build-time * + RUN_AND_BUILD_TIME: Made available at both build + and run-time. Known values are: "UNSET", + "RUN_TIME", "BUILD_TIME", and + "RUN_AND_BUILD_TIME". + "type": + "GENERAL", # Optional. Default value is + "GENERAL". * GENERAL: A plain-text environment + variable * SECRET: A secret encrypted environment + variable. Known values are: "GENERAL" and + "SECRET". + "value": + "str" # Optional. The value. If the type is + ``SECRET``"" , the value will be encrypted on + first submission. On following submissions, the + encrypted value should be used. + } + ], + "git": { + "branch": "str", # + Optional. The name of the branch to use. + "repo_clone_url": + "str" # Optional. The clone URL of the repo. + Example: + ``https://github.com/digitalocean/sample-golang.git``. + }, + "github": { + "branch": "str", # + Optional. The name of the branch to use. + "deploy_on_push": + bool, # Optional. Whether to automatically deploy + new commits made to the repo. + "repo": "str" # + Optional. The name of the repo in the format + owner/repo. Example: ``digitalocean/sample-golang``. + }, + "gitlab": { + "branch": "str", # + Optional. The name of the branch to use. + "deploy_on_push": + bool, # Optional. Whether to automatically deploy + new commits made to the repo. + "repo": "str" # + Optional. The name of the repo in the format + owner/repo. Example: ``digitalocean/sample-golang``. + }, + "image": { + "deploy_on_push": { + "enabled": + bool # Optional. Whether to automatically deploy + new images. Can only be used for images hosted in + DOCR and can only be used with an image tag, not + a specific digest. + }, + "digest": "str", # + Optional. The image digest. Cannot be specified if + tag is provided. + "registry": "str", # + Optional. The registry name. Must be left empty for + the ``DOCR`` registry type. + "registry_credentials": "str", # Optional. The + credentials to be able to pull the image. The value + will be encrypted on first submission. On following + submissions, the encrypted value should be used. * + "$username:$access_token" for registries of type + ``DOCKER_HUB``. * "$username:$access_token" for + registries of type ``GHCR``. + "registry_type": + "str", # Optional. * DOCKER_HUB: The DockerHub + container registry type. * DOCR: The DigitalOcean + container registry type. * GHCR: The Github container + registry type. Known values are: "DOCKER_HUB", + "DOCR", and "GHCR". + "repository": "str", + # Optional. The repository name. + "tag": "latest" # + Optional. Default value is "latest". The repository + tag. Defaults to ``latest`` if not provided and no + digest is provided. Cannot be specified if digest is + provided. + }, + "instance_count": 1, # + Optional. Default value is 1. The amount of instances + that this component should be scaled to. Default: 1. Must + not be set if autoscaling is used. + "instance_size_slug": {}, + "liveness_health_check": { + "failure_threshold": + 0, # Optional. The number of failed health checks + before considered unhealthy. + "http_path": "str", + # Optional. The route path used for the HTTP health + check ping. If not set, the HTTP health check will be + disabled and a TCP health check used instead. + "initial_delay_seconds": 0, # Optional. The number + of seconds to wait before beginning health checks. + "period_seconds": 0, + # Optional. The number of seconds to wait between + health checks. + "port": 0, # + Optional. The port on which the health check will be + performed. + "success_threshold": + 0, # Optional. The number of successful health + checks before considered healthy. + "timeout_seconds": 0 + # Optional. The number of seconds after which the + check times out. + }, + "log_destinations": [ + { + "name": + "str", # Required. + "datadog": { + "api_key": "str", # Datadog API key. + Required. + "endpoint": "str" # Optional. Datadog HTTP + log intake endpoint. + }, + "logtail": { + "token": "str" # Optional. Logtail token. + }, + "open_search": { + "basic_auth": { + "password": "str", # Optional. Password + for user defined in User. Is required + when ``endpoint`` is set. Cannot be set + if using a DigitalOcean DBaaS OpenSearch + cluster. + "user": "str" # Optional. Username to + authenticate with. Only required when + ``endpoint`` is set. Defaults to + ``doadmin`` when ``cluster_name`` is set. + }, + "cluster_name": "str", # Optional. The name + of a DigitalOcean DBaaS OpenSearch cluster to + use as a log forwarding destination. Cannot + be specified if ``endpoint`` is also + specified. + "endpoint": "str", # Optional. OpenSearch + API Endpoint. Only HTTPS is supported. + Format: + https://:code:``::code:``. Cannot + be specified if ``cluster_name`` is also + specified. + "index_name": "logs" # Optional. Default + value is "logs". The index name to use for + the logs. If not set, the default index name + is "logs". + }, + "papertrail": + { + "endpoint": "str" # Papertrail syslog + endpoint. Required. + } + } + ], + "name": "str", # Optional. + The name. Must be unique across all components within the + same app. + "run_command": "str", # + Optional. An optional run command to override the + component's default. + "source_dir": "str", # + Optional. An optional path to the working directory to + use for the build. For Dockerfile builds, this will be + used as the build context. Must be relative to the root + of the repo. + "termination": { + "grace_period_seconds": 0 # Optional. The number of + seconds to wait between sending a TERM signal to a + container and issuing a KILL which causes immediate + shutdown. (Default 120). + } + } + ] + }, + "static_sites": [ + { + "name": "str", # Optional. The name + of this static site. + "source_commit_hash": "str" # + Optional. The commit hash of the repository that was used to + build this static site. + } + ], + "tier_slug": "str", # Optional. The current pricing + tier slug of the deployment. + "updated_at": "2020-02-20 00:00:00", # Optional. + When the deployment was last updated. + "workers": [ + { + "name": "str", # Optional. The name + of this worker. + "source_commit_hash": "str" # + Optional. The commit hash of the repository that was used to + build this worker. + } + ] + }, + "deployment_id": "str", # Optional. For deployment events, + this is the same as the deployment's ID. For autoscaling events, this is + the deployment that was autoscaled. + "id": "str", # Optional. The ID of the event (UUID). + "type": "str" # Optional. The type of event. Known values + are: "UNKNOWN", "DEPLOYMENT", and "AUTOSCALING". + } + ], + "links": { + "pages": {} + } + } + # response body for status code(s): 404 + response == { + "id": "str", # A short identifier corresponding to the HTTP status code + returned. For example, the ID for a response returning a 404 status code would + be "not_found.". Required. + "message": "str", # A message providing additional information about the + error, including details to help resolve it when possible. Required. + "request_id": "str" # Optional. Optionally, some endpoints may include a + request ID that should be provided when reporting bugs or opening support + tickets to help identify the issue. + } + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + 401: cast( + Type[HttpResponseError], + lambda response: ClientAuthenticationError(response=response), + ), + 429: HttpResponseError, + 500: HttpResponseError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[JSON] = kwargs.pop("cls", None) + + _request = build_apps_list_events_request( + app_id=app_id, + page=page, + per_page=per_page, + event_types=event_types, + headers=_headers, + params=_params, + ) + _request.url = self._client.format_url(_request.url) + + _stream = False + pipeline_response: PipelineResponse = ( + await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + ) + + response = pipeline_response.http_response + + if response.status_code not in [200, 404]: + if _stream: + await response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore + raise HttpResponseError(response=response) + + response_headers = {} + if response.status_code == 200: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.content: + deserialized = response.json() + else: + deserialized = None + + if response.status_code == 404: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.content: + deserialized = response.json() + else: + deserialized = None + + if cls: + return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore + + return cast(JSON, deserialized) # type: ignore + + @distributed_trace_async + async def get_event(self, app_id: str, event_id: str, **kwargs: Any) -> JSON: + # pylint: disable=line-too-long + """Get an Event. + + Get a single event for an app. + + :param app_id: The app ID. Required. + :type app_id: str + :param event_id: The event ID. Required. + :type event_id: str + :return: JSON object + :rtype: JSON + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 200 + response == { + "event": { + "autoscaling": { + "components": { + "str": { + "from": 0, # Optional. The number of + replicas before scaling. + "to": 0, # Optional. The number of replicas + after scaling. + "triggering_metric": "str" # Optional. The + metric that triggered the scale change. Known values are "cpu", + "requests_per_second", "request_duration". For inactivity sleep, + "scale_from_zero" and "scale_to_zero" are used. + } + }, + "phase": "str" # Optional. The current phase of the + autoscaling event. Known values are: "UNKNOWN", "PENDING", "IN_PROGRESS", + "SUCCEEDED", "FAILED", and "CANCELED". + }, + "created_at": "2020-02-20 00:00:00", # Optional. When the event was + created. + "deployment": { + "cause": "str", # Optional. What caused this deployment to + be created. + "cloned_from": "str", # Optional. The ID of a previous + deployment that this deployment was cloned from. + "created_at": "2020-02-20 00:00:00", # Optional. The + creation time of the deployment. + "functions": [ + { + "name": "str", # Optional. The name of this + functions component. + "namespace": "str", # Optional. The + namespace where the functions are deployed. + "source_commit_hash": "str" # Optional. The + commit hash of the repository that was used to build this + functions component. + } + ], + "id": "str", # Optional. The ID of the deployment. + "jobs": [ + { + "name": "str", # Optional. The name of this + job. + "source_commit_hash": "str" # Optional. The + commit hash of the repository that was used to build this job. + } + ], + "phase": "UNKNOWN", # Optional. Default value is "UNKNOWN". + Known values are: "UNKNOWN", "PENDING_BUILD", "BUILDING", + "PENDING_DEPLOY", "DEPLOYING", "ACTIVE", "SUPERSEDED", "ERROR", and + "CANCELED". + "phase_last_updated_at": "2020-02-20 00:00:00", # Optional. + When the deployment phase was last updated. + "progress": { + "error_steps": 0, # Optional. Number of unsuccessful + steps. + "pending_steps": 0, # Optional. Number of pending + steps. + "running_steps": 0, # Optional. Number of currently + running steps. + "steps": [ + { + "component_name": "str", # Optional. + The component name that this step is associated with. + "ended_at": "2020-02-20 00:00:00", # + Optional. The end time of this step. + "message_base": "str", # Optional. + The base of a human-readable description of the step intended + to be combined with the component name for presentation. For + example: ``message_base`` = "Building service" + ``component_name`` = "api". + "name": "str", # Optional. The name + of this step. + "reason": { + "code": "str", # Optional. + The error code. + "message": "str" # Optional. + The error message. + }, + "started_at": "2020-02-20 00:00:00", + # Optional. The start time of this step. + "status": "UNKNOWN", # Optional. + Default value is "UNKNOWN". Known values are: "UNKNOWN", + "PENDING", "RUNNING", "ERROR", and "SUCCESS". + "steps": [ + {} # Optional. Child steps + of this step. + ] + } + ], + "success_steps": 0, # Optional. Number of successful + steps. + "summary_steps": [ + { + "component_name": "str", # Optional. + The component name that this step is associated with. + "ended_at": "2020-02-20 00:00:00", # + Optional. The end time of this step. + "message_base": "str", # Optional. + The base of a human-readable description of the step intended + to be combined with the component name for presentation. For + example: ``message_base`` = "Building service" + ``component_name`` = "api". + "name": "str", # Optional. The name + of this step. + "reason": { + "code": "str", # Optional. + The error code. + "message": "str" # Optional. + The error message. + }, + "started_at": "2020-02-20 00:00:00", + # Optional. The start time of this step. + "status": "UNKNOWN", # Optional. + Default value is "UNKNOWN". Known values are: "UNKNOWN", + "PENDING", "RUNNING", "ERROR", and "SUCCESS". + "steps": [ + {} # Optional. Child steps + of this step. + ] + } + ], + "total_steps": 0 # Optional. Total number of steps. + }, + "services": [ + { + "name": "str", # Optional. The name of this + service. + "source_commit_hash": "str" # Optional. The + commit hash of the repository that was used to build this + service. + } + ], + "spec": { + "name": "str", # The name of the app. Must be unique + across all apps in the same account. Required. + "databases": [ + { + "name": "str", # The database's + name. The name must be unique across all components within + the same app and cannot use capital letters. Required. + "cluster_name": "str", # Optional. + The name of the underlying DigitalOcean DBaaS cluster. This + is required for production databases. For dev databases, if + cluster_name is not set, a new cluster will be provisioned. + "db_name": "str", # Optional. The + name of the MySQL or PostgreSQL database to configure. + "db_user": "str", # Optional. The + name of the MySQL or PostgreSQL user to configure. + "engine": "UNSET", # Optional. + Default value is "UNSET". * MYSQL: MySQL * PG: PostgreSQL * + REDIS: Caching * MONGODB: MongoDB * KAFKA: Kafka * + OPENSEARCH: OpenSearch * VALKEY: ValKey. Known values are: + "UNSET", "MYSQL", "PG", "REDIS", "MONGODB", "KAFKA", + "OPENSEARCH", and "VALKEY". + "production": bool, # Optional. + Whether this is a production or dev database. + "version": "str" # Optional. The + version of the database engine. + } + ], + "disable_edge_cache": False, # Optional. Default + value is False. .. role:: raw-html-m2r(raw) :format: html If set + to ``true``"" , the app will **not** be cached at the edge (CDN). + Enable this option if you want to manage CDN configuration + yourself"u2014whether by using an external CDN provider or by + handling static content and caching within your app. This setting is + also recommended for apps that require real-time data or serve + dynamic content, such as those using Server-Sent Events (SSE) over + GET, or hosting an MCP (Model Context Protocol) Server that utilizes + SSE."" :raw-html-m2r:`
` **Note:** This feature is not available + for static site components."" :raw-html-m2r:`
` For more + information, see `Disable CDN Cache + `_. + "disable_email_obfuscation": False, # Optional. + Default value is False. If set to ``true``"" , email addresses in the + app will not be obfuscated. This is useful for apps that require + email addresses to be visible (in the HTML markup). + "domains": [ + { + "domain": "str", # The hostname for + the domain. Required. + "minimum_tls_version": "str", # + Optional. The minimum version of TLS a client application can + use to access resources for the domain. Must be one of the + following values wrapped within quotations: ``"1.2"`` or + ``"1.3"``. Known values are: "1.2" and "1.3". + "type": "UNSPECIFIED", # Optional. + Default value is "UNSPECIFIED". * DEFAULT: The default + ``.ondigitalocean.app`` domain assigned to this app * + PRIMARY: The primary domain for this app that is displayed as + the default in the control panel, used in bindable + environment variables, and any other places that reference an + app's live URL. Only one domain may be set as primary. * + ALIAS: A non-primary domain. Known values are: "UNSPECIFIED", + "DEFAULT", "PRIMARY", and "ALIAS". + "wildcard": bool, # Optional. + Indicates whether the domain includes all sub-domains, in + addition to the given domain. + "zone": "str" # Optional. Optional. + If the domain uses DigitalOcean DNS and you would like App + Platform to automatically manage it for you, set this to the + name of the domain on your account. For example, If the + domain you are adding is ``app.domain.com``"" , the zone + could be ``domain.com``. + } + ], + "egress": { + "type": "AUTOASSIGN" # Optional. Default + value is "AUTOASSIGN". The app egress type. Known values are: + "AUTOASSIGN" and "DEDICATED_IP". + }, + "enhanced_threat_control_enabled": False, # + Optional. Default value is False. If set to ``true``"" , suspicious + requests will go through additional security checks to help mitigate + layer 7 DDoS attacks. + "functions": [ + { + "name": "str", # The name. Must be + unique across all components within the same app. Required. + "alerts": [ + { + "disabled": bool, # + Optional. Is the alert disabled?. + "operator": + "UNSPECIFIED_OPERATOR", # Optional. Default value is + "UNSPECIFIED_OPERATOR". Known values are: + "UNSPECIFIED_OPERATOR", "GREATER_THAN", and + "LESS_THAN". + "rule": + "UNSPECIFIED_RULE", # Optional. Default value is + "UNSPECIFIED_RULE". Known values are: + "UNSPECIFIED_RULE", "CPU_UTILIZATION", + "MEM_UTILIZATION", "RESTART_COUNT", + "DEPLOYMENT_FAILED", "DEPLOYMENT_LIVE", + "DOMAIN_FAILED", "DOMAIN_LIVE", "AUTOSCALE_FAILED", + "AUTOSCALE_SUCCEEDED", "FUNCTIONS_ACTIVATION_COUNT", + "FUNCTIONS_AVERAGE_DURATION_MS", + "FUNCTIONS_ERROR_RATE_PER_MINUTE", + "FUNCTIONS_AVERAGE_WAIT_TIME_MS", + "FUNCTIONS_ERROR_COUNT", and + "FUNCTIONS_GB_RATE_PER_SECOND". + "value": 0.0, # + Optional. Threshold value for alert. + "window": + "UNSPECIFIED_WINDOW" # Optional. Default value is + "UNSPECIFIED_WINDOW". Known values are: + "UNSPECIFIED_WINDOW", "FIVE_MINUTES", "TEN_MINUTES", + "THIRTY_MINUTES", and "ONE_HOUR". + } + ], + "bitbucket": { + "branch": "str", # Optional. + The name of the branch to use. + "deploy_on_push": bool, # + Optional. Whether to automatically deploy new commits + made to the repo. + "repo": "str" # Optional. + The name of the repo in the format owner/repo. Example: + ``digitalocean/sample-golang``. + }, + "cors": { + "allow_credentials": bool, # + Optional. Whether browsers should expose the response to + the client-side JavaScript code when the request"u2019s + credentials mode is include. This configures the + ``Access-Control-Allow-Credentials`` header. + "allow_headers": [ + "str" # Optional. + The set of allowed HTTP request headers. This + configures the ``Access-Control-Allow-Headers`` + header. + ], + "allow_methods": [ + "str" # Optional. + The set of allowed HTTP methods. This configures the + ``Access-Control-Allow-Methods`` header. + ], + "allow_origins": [ + { + "exact": + "str", # Optional. Exact string match. Only 1 of + ``exact``"" , ``prefix``"" , or ``regex`` must be + set. + "prefix": + "str", # Optional. Prefix-based match. Only 1 of + ``exact``"" , ``prefix``"" , or ``regex`` must be + set. + "regex": + "str" # Optional. RE2 style regex-based match. + Only 1 of ``exact``"" , ``prefix``"" , or + ``regex`` must be set. For more information about + RE2 syntax, see: + https://github.com/google/re2/wiki/Syntax. + } + ], + "expose_headers": [ + "str" # Optional. + The set of HTTP response headers that browsers are + allowed to access. This configures the + ``Access-Control-Expose-Headers`` header. + ], + "max_age": "str" # Optional. + An optional duration specifying how long browsers can + cache the results of a preflight request. This configures + the ``Access-Control-Max-Age`` header. + }, + "envs": [ + { + "key": "str", # The + variable name. Required. + "scope": + "RUN_AND_BUILD_TIME", # Optional. Default value is + "RUN_AND_BUILD_TIME". * RUN_TIME: Made available only + at run-time * BUILD_TIME: Made available only at + build-time * RUN_AND_BUILD_TIME: Made available at + both build and run-time. Known values are: "UNSET", + "RUN_TIME", "BUILD_TIME", and "RUN_AND_BUILD_TIME". + "type": "GENERAL", # + Optional. Default value is "GENERAL". * GENERAL: A + plain-text environment variable * SECRET: A secret + encrypted environment variable. Known values are: + "GENERAL" and "SECRET". + "value": "str" # + Optional. The value. If the type is ``SECRET``"" , + the value will be encrypted on first submission. On + following submissions, the encrypted value should be + used. + } + ], + "git": { + "branch": "str", # Optional. + The name of the branch to use. + "repo_clone_url": "str" # + Optional. The clone URL of the repo. Example: + ``https://github.com/digitalocean/sample-golang.git``. + }, + "github": { + "branch": "str", # Optional. + The name of the branch to use. + "deploy_on_push": bool, # + Optional. Whether to automatically deploy new commits + made to the repo. + "repo": "str" # Optional. + The name of the repo in the format owner/repo. Example: + ``digitalocean/sample-golang``. + }, + "gitlab": { + "branch": "str", # Optional. + The name of the branch to use. + "deploy_on_push": bool, # + Optional. Whether to automatically deploy new commits + made to the repo. + "repo": "str" # Optional. + The name of the repo in the format owner/repo. Example: + ``digitalocean/sample-golang``. + }, + "log_destinations": [ + { + "name": "str", # + Required. + "datadog": { + "api_key": + "str", # Datadog API key. Required. + "endpoint": + "str" # Optional. Datadog HTTP log intake + endpoint. + }, + "logtail": { + "token": + "str" # Optional. Logtail token. + }, + "open_search": { + "basic_auth": + { + "password": "str", # Optional. Password for + user defined in User. Is required when + ``endpoint`` is set. Cannot be set if using a + DigitalOcean DBaaS OpenSearch cluster. + "user": "str" # Optional. Username to + authenticate with. Only required when + ``endpoint`` is set. Defaults to ``doadmin`` + when ``cluster_name`` is set. + }, + "cluster_name": "str", # Optional. The name of a + DigitalOcean DBaaS OpenSearch cluster to use as a + log forwarding destination. Cannot be specified + if ``endpoint`` is also specified. + "endpoint": + "str", # Optional. OpenSearch API Endpoint. Only + HTTPS is supported. Format: + https://:code:``::code:``. Cannot be + specified if ``cluster_name`` is also specified. + "index_name": + "logs" # Optional. Default value is "logs". The + index name to use for the logs. If not set, the + default index name is "logs". + }, + "papertrail": { + "endpoint": + "str" # Papertrail syslog endpoint. Required. + } + } + ], + "routes": [ + { + "path": "str", # + Optional. (Deprecated - Use Ingress Rules instead). + An HTTP path prefix. Paths must start with / and must + be unique across all components within an app. + "preserve_path_prefix": bool # Optional. An optional + flag to preserve the path that is forwarded to the + backend service. By default, the HTTP request path + will be trimmed from the left when forwarded to the + component. For example, a component with + ``path=/api`` will have requests to ``/api/list`` + trimmed to ``/list``. If this value is ``true``"" , + the path will remain ``/api/list``. + } + ], + "source_dir": "str" # Optional. An + optional path to the working directory to use for the build. + For Dockerfile builds, this will be used as the build + context. Must be relative to the root of the repo. + } + ], + "ingress": { + "rules": [ + { + "component": { + "name": "str", # The + name of the component to route to. Required. + "preserve_path_prefix": "str", # Optional. An + optional flag to preserve the path that is forwarded + to the backend service. By default, the HTTP request + path will be trimmed from the left when forwarded to + the component. For example, a component with + ``path=/api`` will have requests to ``/api/list`` + trimmed to ``/list``. If this value is ``true``"" , + the path will remain ``/api/list``. Note: this is not + applicable for Functions Components and is mutually + exclusive with ``rewrite``. + "rewrite": "str" # + Optional. An optional field that will rewrite the + path of the component to be what is specified here. + By default, the HTTP request path will be trimmed + from the left when forwarded to the component. For + example, a component with ``path=/api`` will have + requests to ``/api/list`` trimmed to ``/list``. If + you specified the rewrite to be ``/v1/``"" , requests + to ``/api/list`` would be rewritten to ``/v1/list``. + Note: this is mutually exclusive with + ``preserve_path_prefix``. + }, + "cors": { + "allow_credentials": + bool, # Optional. Whether browsers should expose the + response to the client-side JavaScript code when the + request"u2019s credentials mode is include. This + configures the ``Access-Control-Allow-Credentials`` + header. + "allow_headers": [ + "str" # + Optional. The set of allowed HTTP request + headers. This configures the + ``Access-Control-Allow-Headers`` header. + ], + "allow_methods": [ + "str" # + Optional. The set of allowed HTTP methods. This + configures the ``Access-Control-Allow-Methods`` + header. + ], + "allow_origins": [ + { + "exact": "str", # Optional. Exact string + match. Only 1 of ``exact``"" , ``prefix``"" , + or ``regex`` must be set. + "prefix": "str", # Optional. Prefix-based + match. Only 1 of ``exact``"" , ``prefix``"" , + or ``regex`` must be set. + "regex": "str" # Optional. RE2 style + regex-based match. Only 1 of ``exact``"" , + ``prefix``"" , or ``regex`` must be set. For + more information about RE2 syntax, see: + https://github.com/google/re2/wiki/Syntax. + } + ], + "expose_headers": [ + "str" # + Optional. The set of HTTP response headers that + browsers are allowed to access. This configures + the ``Access-Control-Expose-Headers`` header. + ], + "max_age": "str" # + Optional. An optional duration specifying how long + browsers can cache the results of a preflight + request. This configures the + ``Access-Control-Max-Age`` header. + }, + "match": { + "authority": { + "exact": + "str" # Required. + }, + "path": { + "prefix": + "str" # Prefix-based match. For example, + ``/api`` will match ``/api``"" , ``/api/``"" , + and any nested paths such as + ``/api/v1/endpoint``. Required. + } + }, + "redirect": { + "authority": "str", + # Optional. The authority/host to redirect to. This + can be a hostname or IP address. Note: use ``port`` + to set the port. + "port": 0, # + Optional. The port to redirect to. + "redirect_code": 0, + # Optional. The redirect code to use. Defaults to + ``302``. Supported values are 300, 301, 302, 303, + 304, 307, 308. + "scheme": "str", # + Optional. The scheme to redirect to. Supported values + are ``http`` or ``https``. Default: ``https``. + "uri": "str" # + Optional. An optional URI path to redirect to. Note: + if this is specified the whole URI of the original + request will be overwritten to this value, + irrespective of the original request URI being + matched. + } + } + ] + }, + "jobs": [ + { + "autoscaling": { + "max_instance_count": 0, # + Optional. The maximum amount of instances for this + component. Must be more than min_instance_count. + "metrics": { + "cpu": { + "percent": 80 + # Optional. Default value is 80. The average + target CPU utilization for the component. + } + }, + "min_instance_count": 0 # + Optional. The minimum amount of instances for this + component. Must be less than max_instance_count. + }, + "bitbucket": { + "branch": "str", # Optional. + The name of the branch to use. + "deploy_on_push": bool, # + Optional. Whether to automatically deploy new commits + made to the repo. + "repo": "str" # Optional. + The name of the repo in the format owner/repo. Example: + ``digitalocean/sample-golang``. + }, + "build_command": "str", # Optional. + An optional build command to run while building this + component from source. + "dockerfile_path": "str", # + Optional. The path to the Dockerfile relative to the root of + the repo. If set, it will be used to build this component. + Otherwise, App Platform will attempt to build it using + buildpacks. + "environment_slug": "str", # + Optional. An environment slug describing the type of this + app. For a full list, please refer to `the product + documentation + `_. + "envs": [ + { + "key": "str", # The + variable name. Required. + "scope": + "RUN_AND_BUILD_TIME", # Optional. Default value is + "RUN_AND_BUILD_TIME". * RUN_TIME: Made available only + at run-time * BUILD_TIME: Made available only at + build-time * RUN_AND_BUILD_TIME: Made available at + both build and run-time. Known values are: "UNSET", + "RUN_TIME", "BUILD_TIME", and "RUN_AND_BUILD_TIME". + "type": "GENERAL", # + Optional. Default value is "GENERAL". * GENERAL: A + plain-text environment variable * SECRET: A secret + encrypted environment variable. Known values are: + "GENERAL" and "SECRET". + "value": "str" # + Optional. The value. If the type is ``SECRET``"" , + the value will be encrypted on first submission. On + following submissions, the encrypted value should be + used. + } + ], + "git": { + "branch": "str", # Optional. + The name of the branch to use. + "repo_clone_url": "str" # + Optional. The clone URL of the repo. Example: + ``https://github.com/digitalocean/sample-golang.git``. + }, + "github": { + "branch": "str", # Optional. + The name of the branch to use. + "deploy_on_push": bool, # + Optional. Whether to automatically deploy new commits + made to the repo. + "repo": "str" # Optional. + The name of the repo in the format owner/repo. Example: + ``digitalocean/sample-golang``. + }, + "gitlab": { + "branch": "str", # Optional. + The name of the branch to use. + "deploy_on_push": bool, # + Optional. Whether to automatically deploy new commits + made to the repo. + "repo": "str" # Optional. + The name of the repo in the format owner/repo. Example: + ``digitalocean/sample-golang``. + }, + "image": { + "deploy_on_push": { + "enabled": bool # + Optional. Whether to automatically deploy new images. + Can only be used for images hosted in DOCR and can + only be used with an image tag, not a specific + digest. + }, + "digest": "str", # Optional. + The image digest. Cannot be specified if tag is provided. + "registry": "str", # + Optional. The registry name. Must be left empty for the + ``DOCR`` registry type. + "registry_credentials": + "str", # Optional. The credentials to be able to pull + the image. The value will be encrypted on first + submission. On following submissions, the encrypted value + should be used. * "$username:$access_token" for + registries of type ``DOCKER_HUB``. * + "$username:$access_token" for registries of type + ``GHCR``. + "registry_type": "str", # + Optional. * DOCKER_HUB: The DockerHub container registry + type. * DOCR: The DigitalOcean container registry type. * + GHCR: The Github container registry type. Known values + are: "DOCKER_HUB", "DOCR", and "GHCR". + "repository": "str", # + Optional. The repository name. + "tag": "latest" # Optional. + Default value is "latest". The repository tag. Defaults + to ``latest`` if not provided and no digest is provided. + Cannot be specified if digest is provided. + }, + "instance_count": 1, # Optional. + Default value is 1. The amount of instances that this + component should be scaled to. Default: 1. Must not be set if + autoscaling is used. + "instance_size_slug": {}, + "kind": "UNSPECIFIED", # Optional. + Default value is "UNSPECIFIED". * UNSPECIFIED: Default job + type, will auto-complete to POST_DEPLOY kind. * PRE_DEPLOY: + Indicates a job that runs before an app deployment. * + POST_DEPLOY: Indicates a job that runs after an app + deployment. * FAILED_DEPLOY: Indicates a job that runs after + a component fails to deploy. Known values are: "UNSPECIFIED", + "PRE_DEPLOY", "POST_DEPLOY", and "FAILED_DEPLOY". + "log_destinations": [ + { + "name": "str", # + Required. + "datadog": { + "api_key": + "str", # Datadog API key. Required. + "endpoint": + "str" # Optional. Datadog HTTP log intake + endpoint. + }, + "logtail": { + "token": + "str" # Optional. Logtail token. + }, + "open_search": { + "basic_auth": + { + "password": "str", # Optional. Password for + user defined in User. Is required when + ``endpoint`` is set. Cannot be set if using a + DigitalOcean DBaaS OpenSearch cluster. + "user": "str" # Optional. Username to + authenticate with. Only required when + ``endpoint`` is set. Defaults to ``doadmin`` + when ``cluster_name`` is set. + }, + "cluster_name": "str", # Optional. The name of a + DigitalOcean DBaaS OpenSearch cluster to use as a + log forwarding destination. Cannot be specified + if ``endpoint`` is also specified. + "endpoint": + "str", # Optional. OpenSearch API Endpoint. Only + HTTPS is supported. Format: + https://:code:``::code:``. Cannot be + specified if ``cluster_name`` is also specified. + "index_name": + "logs" # Optional. Default value is "logs". The + index name to use for the logs. If not set, the + default index name is "logs". + }, + "papertrail": { + "endpoint": + "str" # Papertrail syslog endpoint. Required. + } + } + ], + "name": "str", # Optional. The name. + Must be unique across all components within the same app. + "run_command": "str", # Optional. An + optional run command to override the component's default. + "source_dir": "str", # Optional. An + optional path to the working directory to use for the build. + For Dockerfile builds, this will be used as the build + context. Must be relative to the root of the repo. + "termination": { + "grace_period_seconds": 0 # + Optional. The number of seconds to wait between sending a + TERM signal to a container and issuing a KILL which + causes immediate shutdown. (Default 120). + } + } + ], + "maintenance": { + "archive": bool, # Optional. Indicates + whether the app should be archived. Setting this to true implies + that enabled is set to true. + "enabled": bool, # Optional. Indicates + whether maintenance mode should be enabled for the app. + "offline_page_url": "str" # Optional. A + custom offline page to display when maintenance mode is enabled + or the app is archived. + }, + "region": "str", # Optional. The slug form of the + geographical origin of the app. Default: ``nearest available``. Known + values are: "atl", "nyc", "sfo", "tor", "ams", "fra", "lon", "blr", + "sgp", and "syd". + "services": [ + { + "autoscaling": { + "max_instance_count": 0, # + Optional. The maximum amount of instances for this + component. Must be more than min_instance_count. + "metrics": { + "cpu": { + "percent": 80 + # Optional. Default value is 80. The average + target CPU utilization for the component. + } + }, + "min_instance_count": 0 # + Optional. The minimum amount of instances for this + component. Must be less than max_instance_count. + }, + "bitbucket": { + "branch": "str", # Optional. + The name of the branch to use. + "deploy_on_push": bool, # + Optional. Whether to automatically deploy new commits + made to the repo. + "repo": "str" # Optional. + The name of the repo in the format owner/repo. Example: + ``digitalocean/sample-golang``. + }, + "build_command": "str", # Optional. + An optional build command to run while building this + component from source. + "cors": { + "allow_credentials": bool, # + Optional. Whether browsers should expose the response to + the client-side JavaScript code when the request"u2019s + credentials mode is include. This configures the + ``Access-Control-Allow-Credentials`` header. + "allow_headers": [ + "str" # Optional. + The set of allowed HTTP request headers. This + configures the ``Access-Control-Allow-Headers`` + header. + ], + "allow_methods": [ + "str" # Optional. + The set of allowed HTTP methods. This configures the + ``Access-Control-Allow-Methods`` header. + ], + "allow_origins": [ + { + "exact": + "str", # Optional. Exact string match. Only 1 of + ``exact``"" , ``prefix``"" , or ``regex`` must be + set. + "prefix": + "str", # Optional. Prefix-based match. Only 1 of + ``exact``"" , ``prefix``"" , or ``regex`` must be + set. + "regex": + "str" # Optional. RE2 style regex-based match. + Only 1 of ``exact``"" , ``prefix``"" , or + ``regex`` must be set. For more information about + RE2 syntax, see: + https://github.com/google/re2/wiki/Syntax. + } + ], + "expose_headers": [ + "str" # Optional. + The set of HTTP response headers that browsers are + allowed to access. This configures the + ``Access-Control-Expose-Headers`` header. + ], + "max_age": "str" # Optional. + An optional duration specifying how long browsers can + cache the results of a preflight request. This configures + the ``Access-Control-Max-Age`` header. + }, + "dockerfile_path": "str", # + Optional. The path to the Dockerfile relative to the root of + the repo. If set, it will be used to build this component. + Otherwise, App Platform will attempt to build it using + buildpacks. + "environment_slug": "str", # + Optional. An environment slug describing the type of this + app. For a full list, please refer to `the product + documentation + `_. + "envs": [ + { + "key": "str", # The + variable name. Required. + "scope": + "RUN_AND_BUILD_TIME", # Optional. Default value is + "RUN_AND_BUILD_TIME". * RUN_TIME: Made available only + at run-time * BUILD_TIME: Made available only at + build-time * RUN_AND_BUILD_TIME: Made available at + both build and run-time. Known values are: "UNSET", + "RUN_TIME", "BUILD_TIME", and "RUN_AND_BUILD_TIME". + "type": "GENERAL", # + Optional. Default value is "GENERAL". * GENERAL: A + plain-text environment variable * SECRET: A secret + encrypted environment variable. Known values are: + "GENERAL" and "SECRET". + "value": "str" # + Optional. The value. If the type is ``SECRET``"" , + the value will be encrypted on first submission. On + following submissions, the encrypted value should be + used. + } + ], + "git": { + "branch": "str", # Optional. + The name of the branch to use. + "repo_clone_url": "str" # + Optional. The clone URL of the repo. Example: + ``https://github.com/digitalocean/sample-golang.git``. + }, + "github": { + "branch": "str", # Optional. + The name of the branch to use. + "deploy_on_push": bool, # + Optional. Whether to automatically deploy new commits + made to the repo. + "repo": "str" # Optional. + The name of the repo in the format owner/repo. Example: + ``digitalocean/sample-golang``. + }, + "gitlab": { + "branch": "str", # Optional. + The name of the branch to use. + "deploy_on_push": bool, # + Optional. Whether to automatically deploy new commits + made to the repo. + "repo": "str" # Optional. + The name of the repo in the format owner/repo. Example: + ``digitalocean/sample-golang``. + }, + "health_check": { + "failure_threshold": 0, # + Optional. The number of failed health checks before + considered unhealthy. + "http_path": "str", # + Optional. The route path used for the HTTP health check + ping. If not set, the HTTP health check will be disabled + and a TCP health check used instead. + "initial_delay_seconds": 0, + # Optional. The number of seconds to wait before + beginning health checks. + "period_seconds": 0, # + Optional. The number of seconds to wait between health + checks. + "port": 0, # Optional. The + port on which the health check will be performed. If not + set, the health check will be performed on the + component's http_port. + "success_threshold": 0, # + Optional. The number of successful health checks before + considered healthy. + "timeout_seconds": 0 # + Optional. The number of seconds after which the check + times out. + }, + "http_port": 0, # Optional. The + internal port on which this service's run command will + listen. Default: 8080 If there is not an environment variable + with the name ``PORT``"" , one will be automatically added + with its value set to the value of this field. + "image": { + "deploy_on_push": { + "enabled": bool # + Optional. Whether to automatically deploy new images. + Can only be used for images hosted in DOCR and can + only be used with an image tag, not a specific + digest. + }, + "digest": "str", # Optional. + The image digest. Cannot be specified if tag is provided. + "registry": "str", # + Optional. The registry name. Must be left empty for the + ``DOCR`` registry type. + "registry_credentials": + "str", # Optional. The credentials to be able to pull + the image. The value will be encrypted on first + submission. On following submissions, the encrypted value + should be used. * "$username:$access_token" for + registries of type ``DOCKER_HUB``. * + "$username:$access_token" for registries of type + ``GHCR``. + "registry_type": "str", # + Optional. * DOCKER_HUB: The DockerHub container registry + type. * DOCR: The DigitalOcean container registry type. * + GHCR: The Github container registry type. Known values + are: "DOCKER_HUB", "DOCR", and "GHCR". + "repository": "str", # + Optional. The repository name. + "tag": "latest" # Optional. + Default value is "latest". The repository tag. Defaults + to ``latest`` if not provided and no digest is provided. + Cannot be specified if digest is provided. + }, + "instance_count": 1, # Optional. + Default value is 1. The amount of instances that this + component should be scaled to. Default: 1. Must not be set if + autoscaling is used. + "instance_size_slug": {}, + "internal_ports": [ + 0 # Optional. The ports on + which this service will listen for internal traffic. + ], + "liveness_health_check": { + "failure_threshold": 0, # + Optional. The number of failed health checks before + considered unhealthy. + "http_path": "str", # + Optional. The route path used for the HTTP health check + ping. If not set, the HTTP health check will be disabled + and a TCP health check used instead. + "initial_delay_seconds": 0, + # Optional. The number of seconds to wait before + beginning health checks. + "period_seconds": 0, # + Optional. The number of seconds to wait between health + checks. + "port": 0, # Optional. The + port on which the health check will be performed. + "success_threshold": 0, # + Optional. The number of successful health checks before + considered healthy. + "timeout_seconds": 0 # + Optional. The number of seconds after which the check + times out. + }, + "log_destinations": [ + { + "name": "str", # + Required. + "datadog": { + "api_key": + "str", # Datadog API key. Required. + "endpoint": + "str" # Optional. Datadog HTTP log intake + endpoint. + }, + "logtail": { + "token": + "str" # Optional. Logtail token. + }, + "open_search": { + "basic_auth": + { + "password": "str", # Optional. Password for + user defined in User. Is required when + ``endpoint`` is set. Cannot be set if using a + DigitalOcean DBaaS OpenSearch cluster. + "user": "str" # Optional. Username to + authenticate with. Only required when + ``endpoint`` is set. Defaults to ``doadmin`` + when ``cluster_name`` is set. + }, + "cluster_name": "str", # Optional. The name of a + DigitalOcean DBaaS OpenSearch cluster to use as a + log forwarding destination. Cannot be specified + if ``endpoint`` is also specified. + "endpoint": + "str", # Optional. OpenSearch API Endpoint. Only + HTTPS is supported. Format: + https://:code:``::code:``. Cannot be + specified if ``cluster_name`` is also specified. + "index_name": + "logs" # Optional. Default value is "logs". The + index name to use for the logs. If not set, the + default index name is "logs". + }, + "papertrail": { + "endpoint": + "str" # Papertrail syslog endpoint. Required. + } + } + ], + "name": "str", # Optional. The name. + Must be unique across all components within the same app. + "protocol": "str", # Optional. The + protocol which the service uses to serve traffic on the + http_port. * ``HTTP``"" : The app is serving the HTTP + protocol. Default. * ``HTTP2``"" : The app is serving the + HTTP/2 protocol. Currently, this needs to be implemented in + the service by serving HTTP/2 cleartext (h2c). Known values + are: "HTTP" and "HTTP2". + "routes": [ + { + "path": "str", # + Optional. (Deprecated - Use Ingress Rules instead). + An HTTP path prefix. Paths must start with / and must + be unique across all components within an app. + "preserve_path_prefix": bool # Optional. An optional + flag to preserve the path that is forwarded to the + backend service. By default, the HTTP request path + will be trimmed from the left when forwarded to the + component. For example, a component with + ``path=/api`` will have requests to ``/api/list`` + trimmed to ``/list``. If this value is ``true``"" , + the path will remain ``/api/list``. + } + ], + "run_command": "str", # Optional. An + optional run command to override the component's default. + "source_dir": "str", # Optional. An + optional path to the working directory to use for the build. + For Dockerfile builds, this will be used as the build + context. Must be relative to the root of the repo. + "termination": { + "drain_seconds": 0, # + Optional. The number of seconds to wait between selecting + a container instance for termination and issuing the TERM + signal. Selecting a container instance for termination + begins an asynchronous drain of new requests on upstream + load-balancers. (Default 15). + "grace_period_seconds": 0 # + Optional. The number of seconds to wait between sending a + TERM signal to a container and issuing a KILL which + causes immediate shutdown. (Default 120). + } + } + ], + "static_sites": [ + { + "bitbucket": { + "branch": "str", # Optional. + The name of the branch to use. + "deploy_on_push": bool, # + Optional. Whether to automatically deploy new commits + made to the repo. + "repo": "str" # Optional. + The name of the repo in the format owner/repo. Example: + ``digitalocean/sample-golang``. + }, + "build_command": "str", # Optional. + An optional build command to run while building this + component from source. + "catchall_document": "str", # + Optional. The name of the document to use as the fallback for + any requests to documents that are not found when serving + this static site. Only 1 of ``catchall_document`` or + ``error_document`` can be set. + "cors": { + "allow_credentials": bool, # + Optional. Whether browsers should expose the response to + the client-side JavaScript code when the request"u2019s + credentials mode is include. This configures the + ``Access-Control-Allow-Credentials`` header. + "allow_headers": [ + "str" # Optional. + The set of allowed HTTP request headers. This + configures the ``Access-Control-Allow-Headers`` + header. + ], + "allow_methods": [ + "str" # Optional. + The set of allowed HTTP methods. This configures the + ``Access-Control-Allow-Methods`` header. + ], + "allow_origins": [ + { + "exact": + "str", # Optional. Exact string match. Only 1 of + ``exact``"" , ``prefix``"" , or ``regex`` must be + set. + "prefix": + "str", # Optional. Prefix-based match. Only 1 of + ``exact``"" , ``prefix``"" , or ``regex`` must be + set. + "regex": + "str" # Optional. RE2 style regex-based match. + Only 1 of ``exact``"" , ``prefix``"" , or + ``regex`` must be set. For more information about + RE2 syntax, see: + https://github.com/google/re2/wiki/Syntax. + } + ], + "expose_headers": [ + "str" # Optional. + The set of HTTP response headers that browsers are + allowed to access. This configures the + ``Access-Control-Expose-Headers`` header. + ], + "max_age": "str" # Optional. + An optional duration specifying how long browsers can + cache the results of a preflight request. This configures + the ``Access-Control-Max-Age`` header. + }, + "dockerfile_path": "str", # + Optional. The path to the Dockerfile relative to the root of + the repo. If set, it will be used to build this component. + Otherwise, App Platform will attempt to build it using + buildpacks. + "environment_slug": "str", # + Optional. An environment slug describing the type of this + app. For a full list, please refer to `the product + documentation + `_. + "envs": [ + { + "key": "str", # The + variable name. Required. + "scope": + "RUN_AND_BUILD_TIME", # Optional. Default value is + "RUN_AND_BUILD_TIME". * RUN_TIME: Made available only + at run-time * BUILD_TIME: Made available only at + build-time * RUN_AND_BUILD_TIME: Made available at + both build and run-time. Known values are: "UNSET", + "RUN_TIME", "BUILD_TIME", and "RUN_AND_BUILD_TIME". + "type": "GENERAL", # + Optional. Default value is "GENERAL". * GENERAL: A + plain-text environment variable * SECRET: A secret + encrypted environment variable. Known values are: + "GENERAL" and "SECRET". + "value": "str" # + Optional. The value. If the type is ``SECRET``"" , + the value will be encrypted on first submission. On + following submissions, the encrypted value should be + used. + } + ], + "error_document": "404.html", # + Optional. Default value is "404.html". The name of the error + document to use when serving this static site. Default: + 404.html. If no such file exists within the built assets, App + Platform will supply one. + "git": { + "branch": "str", # Optional. + The name of the branch to use. + "repo_clone_url": "str" # + Optional. The clone URL of the repo. Example: + ``https://github.com/digitalocean/sample-golang.git``. + }, + "github": { + "branch": "str", # Optional. + The name of the branch to use. + "deploy_on_push": bool, # + Optional. Whether to automatically deploy new commits + made to the repo. + "repo": "str" # Optional. + The name of the repo in the format owner/repo. Example: + ``digitalocean/sample-golang``. + }, + "gitlab": { + "branch": "str", # Optional. + The name of the branch to use. + "deploy_on_push": bool, # + Optional. Whether to automatically deploy new commits + made to the repo. + "repo": "str" # Optional. + The name of the repo in the format owner/repo. Example: + ``digitalocean/sample-golang``. + }, + "image": { + "deploy_on_push": { + "enabled": bool # + Optional. Whether to automatically deploy new images. + Can only be used for images hosted in DOCR and can + only be used with an image tag, not a specific + digest. + }, + "digest": "str", # Optional. + The image digest. Cannot be specified if tag is provided. + "registry": "str", # + Optional. The registry name. Must be left empty for the + ``DOCR`` registry type. + "registry_credentials": + "str", # Optional. The credentials to be able to pull + the image. The value will be encrypted on first + submission. On following submissions, the encrypted value + should be used. * "$username:$access_token" for + registries of type ``DOCKER_HUB``. * + "$username:$access_token" for registries of type + ``GHCR``. + "registry_type": "str", # + Optional. * DOCKER_HUB: The DockerHub container registry + type. * DOCR: The DigitalOcean container registry type. * + GHCR: The Github container registry type. Known values + are: "DOCKER_HUB", "DOCR", and "GHCR". + "repository": "str", # + Optional. The repository name. + "tag": "latest" # Optional. + Default value is "latest". The repository tag. Defaults + to ``latest`` if not provided and no digest is provided. + Cannot be specified if digest is provided. + }, + "index_document": "index.html", # + Optional. Default value is "index.html". The name of the + index document to use when serving this static site. Default: + index.html. + "log_destinations": [ + { + "name": "str", # + Required. + "datadog": { + "api_key": + "str", # Datadog API key. Required. + "endpoint": + "str" # Optional. Datadog HTTP log intake + endpoint. + }, + "logtail": { + "token": + "str" # Optional. Logtail token. + }, + "open_search": { + "basic_auth": + { + "password": "str", # Optional. Password for + user defined in User. Is required when + ``endpoint`` is set. Cannot be set if using a + DigitalOcean DBaaS OpenSearch cluster. + "user": "str" # Optional. Username to + authenticate with. Only required when + ``endpoint`` is set. Defaults to ``doadmin`` + when ``cluster_name`` is set. + }, + "cluster_name": "str", # Optional. The name of a + DigitalOcean DBaaS OpenSearch cluster to use as a + log forwarding destination. Cannot be specified + if ``endpoint`` is also specified. + "endpoint": + "str", # Optional. OpenSearch API Endpoint. Only + HTTPS is supported. Format: + https://:code:``::code:``. Cannot be + specified if ``cluster_name`` is also specified. + "index_name": + "logs" # Optional. Default value is "logs". The + index name to use for the logs. If not set, the + default index name is "logs". + }, + "papertrail": { + "endpoint": + "str" # Papertrail syslog endpoint. Required. + } + } + ], + "name": "str", # Optional. The name. + Must be unique across all components within the same app. + "output_dir": "str", # Optional. An + optional path to where the built assets will be located, + relative to the build context. If not set, App Platform will + automatically scan for these directory names: ``_static``"" , + ``dist``"" , ``public``"" , ``build``. + "routes": [ + { + "path": "str", # + Optional. (Deprecated - Use Ingress Rules instead). + An HTTP path prefix. Paths must start with / and must + be unique across all components within an app. + "preserve_path_prefix": bool # Optional. An optional + flag to preserve the path that is forwarded to the + backend service. By default, the HTTP request path + will be trimmed from the left when forwarded to the + component. For example, a component with + ``path=/api`` will have requests to ``/api/list`` + trimmed to ``/list``. If this value is ``true``"" , + the path will remain ``/api/list``. + } + ], + "run_command": "str", # Optional. An + optional run command to override the component's default. + "source_dir": "str" # Optional. An + optional path to the working directory to use for the build. + For Dockerfile builds, this will be used as the build + context. Must be relative to the root of the repo. + } + ], + "vpc": { + "egress_ips": [ + { + "ip": "str" # Optional. The + egress ips associated with the VPC. + } + ], + "id": "str" # Optional. The ID of the VPC. + }, + "workers": [ + { + "autoscaling": { + "max_instance_count": 0, # + Optional. The maximum amount of instances for this + component. Must be more than min_instance_count. + "metrics": { + "cpu": { + "percent": 80 + # Optional. Default value is 80. The average + target CPU utilization for the component. + } + }, + "min_instance_count": 0 # + Optional. The minimum amount of instances for this + component. Must be less than max_instance_count. + }, + "bitbucket": { + "branch": "str", # Optional. + The name of the branch to use. + "deploy_on_push": bool, # + Optional. Whether to automatically deploy new commits + made to the repo. + "repo": "str" # Optional. + The name of the repo in the format owner/repo. Example: + ``digitalocean/sample-golang``. + }, + "build_command": "str", # Optional. + An optional build command to run while building this + component from source. + "dockerfile_path": "str", # + Optional. The path to the Dockerfile relative to the root of + the repo. If set, it will be used to build this component. + Otherwise, App Platform will attempt to build it using + buildpacks. + "environment_slug": "str", # + Optional. An environment slug describing the type of this + app. For a full list, please refer to `the product + documentation + `_. + "envs": [ + { + "key": "str", # The + variable name. Required. + "scope": + "RUN_AND_BUILD_TIME", # Optional. Default value is + "RUN_AND_BUILD_TIME". * RUN_TIME: Made available only + at run-time * BUILD_TIME: Made available only at + build-time * RUN_AND_BUILD_TIME: Made available at + both build and run-time. Known values are: "UNSET", + "RUN_TIME", "BUILD_TIME", and "RUN_AND_BUILD_TIME". + "type": "GENERAL", # + Optional. Default value is "GENERAL". * GENERAL: A + plain-text environment variable * SECRET: A secret + encrypted environment variable. Known values are: + "GENERAL" and "SECRET". + "value": "str" # + Optional. The value. If the type is ``SECRET``"" , + the value will be encrypted on first submission. On + following submissions, the encrypted value should be + used. + } + ], + "git": { + "branch": "str", # Optional. + The name of the branch to use. + "repo_clone_url": "str" # + Optional. The clone URL of the repo. Example: + ``https://github.com/digitalocean/sample-golang.git``. + }, + "github": { + "branch": "str", # Optional. + The name of the branch to use. + "deploy_on_push": bool, # + Optional. Whether to automatically deploy new commits + made to the repo. + "repo": "str" # Optional. + The name of the repo in the format owner/repo. Example: + ``digitalocean/sample-golang``. + }, + "gitlab": { + "branch": "str", # Optional. + The name of the branch to use. + "deploy_on_push": bool, # + Optional. Whether to automatically deploy new commits + made to the repo. + "repo": "str" # Optional. + The name of the repo in the format owner/repo. Example: + ``digitalocean/sample-golang``. + }, + "image": { + "deploy_on_push": { + "enabled": bool # + Optional. Whether to automatically deploy new images. + Can only be used for images hosted in DOCR and can + only be used with an image tag, not a specific + digest. + }, + "digest": "str", # Optional. + The image digest. Cannot be specified if tag is provided. + "registry": "str", # + Optional. The registry name. Must be left empty for the + ``DOCR`` registry type. + "registry_credentials": + "str", # Optional. The credentials to be able to pull + the image. The value will be encrypted on first + submission. On following submissions, the encrypted value + should be used. * "$username:$access_token" for + registries of type ``DOCKER_HUB``. * + "$username:$access_token" for registries of type + ``GHCR``. + "registry_type": "str", # + Optional. * DOCKER_HUB: The DockerHub container registry + type. * DOCR: The DigitalOcean container registry type. * + GHCR: The Github container registry type. Known values + are: "DOCKER_HUB", "DOCR", and "GHCR". + "repository": "str", # + Optional. The repository name. + "tag": "latest" # Optional. + Default value is "latest". The repository tag. Defaults + to ``latest`` if not provided and no digest is provided. + Cannot be specified if digest is provided. + }, + "instance_count": 1, # Optional. + Default value is 1. The amount of instances that this + component should be scaled to. Default: 1. Must not be set if + autoscaling is used. + "instance_size_slug": {}, + "liveness_health_check": { + "failure_threshold": 0, # + Optional. The number of failed health checks before + considered unhealthy. + "http_path": "str", # + Optional. The route path used for the HTTP health check + ping. If not set, the HTTP health check will be disabled + and a TCP health check used instead. + "initial_delay_seconds": 0, + # Optional. The number of seconds to wait before + beginning health checks. + "period_seconds": 0, # + Optional. The number of seconds to wait between health + checks. + "port": 0, # Optional. The + port on which the health check will be performed. + "success_threshold": 0, # + Optional. The number of successful health checks before + considered healthy. + "timeout_seconds": 0 # + Optional. The number of seconds after which the check + times out. + }, + "log_destinations": [ + { + "name": "str", # + Required. + "datadog": { + "api_key": + "str", # Datadog API key. Required. + "endpoint": + "str" # Optional. Datadog HTTP log intake + endpoint. + }, + "logtail": { + "token": + "str" # Optional. Logtail token. + }, + "open_search": { + "basic_auth": + { + "password": "str", # Optional. Password for + user defined in User. Is required when + ``endpoint`` is set. Cannot be set if using a + DigitalOcean DBaaS OpenSearch cluster. + "user": "str" # Optional. Username to + authenticate with. Only required when + ``endpoint`` is set. Defaults to ``doadmin`` + when ``cluster_name`` is set. + }, + "cluster_name": "str", # Optional. The name of a + DigitalOcean DBaaS OpenSearch cluster to use as a + log forwarding destination. Cannot be specified + if ``endpoint`` is also specified. + "endpoint": + "str", # Optional. OpenSearch API Endpoint. Only + HTTPS is supported. Format: + https://:code:``::code:``. Cannot be + specified if ``cluster_name`` is also specified. + "index_name": + "logs" # Optional. Default value is "logs". The + index name to use for the logs. If not set, the + default index name is "logs". + }, + "papertrail": { + "endpoint": + "str" # Papertrail syslog endpoint. Required. + } + } + ], + "name": "str", # Optional. The name. + Must be unique across all components within the same app. + "run_command": "str", # Optional. An + optional run command to override the component's default. + "source_dir": "str", # Optional. An + optional path to the working directory to use for the build. + For Dockerfile builds, this will be used as the build + context. Must be relative to the root of the repo. + "termination": { + "grace_period_seconds": 0 # + Optional. The number of seconds to wait between sending a + TERM signal to a container and issuing a KILL which + causes immediate shutdown. (Default 120). + } + } + ] + }, + "static_sites": [ + { + "name": "str", # Optional. The name of this + static site. + "source_commit_hash": "str" # Optional. The + commit hash of the repository that was used to build this static + site. + } + ], + "tier_slug": "str", # Optional. The current pricing tier + slug of the deployment. + "updated_at": "2020-02-20 00:00:00", # Optional. When the + deployment was last updated. + "workers": [ + { + "name": "str", # Optional. The name of this + worker. + "source_commit_hash": "str" # Optional. The + commit hash of the repository that was used to build this worker. + } + ] + }, + "deployment_id": "str", # Optional. For deployment events, this is + the same as the deployment's ID. For autoscaling events, this is the + deployment that was autoscaled. + "id": "str", # Optional. The ID of the event (UUID). + "type": "str" # Optional. The type of event. Known values are: + "UNKNOWN", "DEPLOYMENT", and "AUTOSCALING". + } + } + # response body for status code(s): 404 + response == { + "id": "str", # A short identifier corresponding to the HTTP status code + returned. For example, the ID for a response returning a 404 status code would + be "not_found.". Required. + "message": "str", # A message providing additional information about the + error, including details to help resolve it when possible. Required. + "request_id": "str" # Optional. Optionally, some endpoints may include a + request ID that should be provided when reporting bugs or opening support + tickets to help identify the issue. + } + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + 401: cast( + Type[HttpResponseError], + lambda response: ClientAuthenticationError(response=response), + ), + 429: HttpResponseError, + 500: HttpResponseError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[JSON] = kwargs.pop("cls", None) + + _request = build_apps_get_event_request( + app_id=app_id, + event_id=event_id, + headers=_headers, + params=_params, + ) + _request.url = self._client.format_url(_request.url) + + _stream = False + pipeline_response: PipelineResponse = ( + await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + ) + + response = pipeline_response.http_response + + if response.status_code not in [200, 404]: + if _stream: + await response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore + raise HttpResponseError(response=response) + + response_headers = {} + if response.status_code == 200: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.content: + deserialized = response.json() + else: + deserialized = None + + if response.status_code == 404: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.content: + deserialized = response.json() + else: + deserialized = None + + if cls: + return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore + + return cast(JSON, deserialized) # type: ignore + + @distributed_trace_async + async def cancel_event(self, app_id: str, event_id: str, **kwargs: Any) -> JSON: + # pylint: disable=line-too-long + """Cancel an Event. + + Cancel an in-progress autoscaling event. + + :param app_id: The app ID. Required. + :type app_id: str + :param event_id: The event ID. Required. + :type event_id: str + :return: JSON object + :rtype: JSON + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 200 + response == { + "event": { + "autoscaling": { + "components": { + "str": { + "from": 0, # Optional. The number of + replicas before scaling. + "to": 0, # Optional. The number of replicas + after scaling. + "triggering_metric": "str" # Optional. The + metric that triggered the scale change. Known values are "cpu", + "requests_per_second", "request_duration". For inactivity sleep, + "scale_from_zero" and "scale_to_zero" are used. + } + }, + "phase": "str" # Optional. The current phase of the + autoscaling event. Known values are: "UNKNOWN", "PENDING", "IN_PROGRESS", + "SUCCEEDED", "FAILED", and "CANCELED". + }, + "created_at": "2020-02-20 00:00:00", # Optional. When the event was + created. + "deployment": { + "cause": "str", # Optional. What caused this deployment to + be created. + "cloned_from": "str", # Optional. The ID of a previous + deployment that this deployment was cloned from. + "created_at": "2020-02-20 00:00:00", # Optional. The + creation time of the deployment. + "functions": [ + { + "name": "str", # Optional. The name of this + functions component. + "namespace": "str", # Optional. The + namespace where the functions are deployed. + "source_commit_hash": "str" # Optional. The + commit hash of the repository that was used to build this + functions component. + } + ], + "id": "str", # Optional. The ID of the deployment. + "jobs": [ + { + "name": "str", # Optional. The name of this + job. + "source_commit_hash": "str" # Optional. The + commit hash of the repository that was used to build this job. + } + ], + "phase": "UNKNOWN", # Optional. Default value is "UNKNOWN". + Known values are: "UNKNOWN", "PENDING_BUILD", "BUILDING", + "PENDING_DEPLOY", "DEPLOYING", "ACTIVE", "SUPERSEDED", "ERROR", and + "CANCELED". + "phase_last_updated_at": "2020-02-20 00:00:00", # Optional. + When the deployment phase was last updated. + "progress": { + "error_steps": 0, # Optional. Number of unsuccessful + steps. + "pending_steps": 0, # Optional. Number of pending + steps. + "running_steps": 0, # Optional. Number of currently + running steps. + "steps": [ + { + "component_name": "str", # Optional. + The component name that this step is associated with. + "ended_at": "2020-02-20 00:00:00", # + Optional. The end time of this step. + "message_base": "str", # Optional. + The base of a human-readable description of the step intended + to be combined with the component name for presentation. For + example: ``message_base`` = "Building service" + ``component_name`` = "api". + "name": "str", # Optional. The name + of this step. + "reason": { + "code": "str", # Optional. + The error code. + "message": "str" # Optional. + The error message. + }, + "started_at": "2020-02-20 00:00:00", + # Optional. The start time of this step. + "status": "UNKNOWN", # Optional. + Default value is "UNKNOWN". Known values are: "UNKNOWN", + "PENDING", "RUNNING", "ERROR", and "SUCCESS". + "steps": [ + {} # Optional. Child steps + of this step. + ] + } + ], + "success_steps": 0, # Optional. Number of successful + steps. + "summary_steps": [ + { + "component_name": "str", # Optional. + The component name that this step is associated with. + "ended_at": "2020-02-20 00:00:00", # + Optional. The end time of this step. + "message_base": "str", # Optional. + The base of a human-readable description of the step intended + to be combined with the component name for presentation. For + example: ``message_base`` = "Building service" + ``component_name`` = "api". + "name": "str", # Optional. The name + of this step. + "reason": { + "code": "str", # Optional. + The error code. + "message": "str" # Optional. + The error message. + }, + "started_at": "2020-02-20 00:00:00", + # Optional. The start time of this step. + "status": "UNKNOWN", # Optional. + Default value is "UNKNOWN". Known values are: "UNKNOWN", + "PENDING", "RUNNING", "ERROR", and "SUCCESS". + "steps": [ + {} # Optional. Child steps + of this step. + ] + } + ], + "total_steps": 0 # Optional. Total number of steps. + }, + "services": [ + { + "name": "str", # Optional. The name of this + service. + "source_commit_hash": "str" # Optional. The + commit hash of the repository that was used to build this + service. + } + ], + "spec": { + "name": "str", # The name of the app. Must be unique + across all apps in the same account. Required. + "databases": [ + { + "name": "str", # The database's + name. The name must be unique across all components within + the same app and cannot use capital letters. Required. + "cluster_name": "str", # Optional. + The name of the underlying DigitalOcean DBaaS cluster. This + is required for production databases. For dev databases, if + cluster_name is not set, a new cluster will be provisioned. + "db_name": "str", # Optional. The + name of the MySQL or PostgreSQL database to configure. + "db_user": "str", # Optional. The + name of the MySQL or PostgreSQL user to configure. + "engine": "UNSET", # Optional. + Default value is "UNSET". * MYSQL: MySQL * PG: PostgreSQL * + REDIS: Caching * MONGODB: MongoDB * KAFKA: Kafka * + OPENSEARCH: OpenSearch * VALKEY: ValKey. Known values are: + "UNSET", "MYSQL", "PG", "REDIS", "MONGODB", "KAFKA", + "OPENSEARCH", and "VALKEY". + "production": bool, # Optional. + Whether this is a production or dev database. + "version": "str" # Optional. The + version of the database engine. + } + ], + "disable_edge_cache": False, # Optional. Default + value is False. .. role:: raw-html-m2r(raw) :format: html If set + to ``true``"" , the app will **not** be cached at the edge (CDN). + Enable this option if you want to manage CDN configuration + yourself"u2014whether by using an external CDN provider or by + handling static content and caching within your app. This setting is + also recommended for apps that require real-time data or serve + dynamic content, such as those using Server-Sent Events (SSE) over + GET, or hosting an MCP (Model Context Protocol) Server that utilizes + SSE."" :raw-html-m2r:`
` **Note:** This feature is not available + for static site components."" :raw-html-m2r:`
` For more + information, see `Disable CDN Cache + `_. + "disable_email_obfuscation": False, # Optional. + Default value is False. If set to ``true``"" , email addresses in the + app will not be obfuscated. This is useful for apps that require + email addresses to be visible (in the HTML markup). + "domains": [ + { + "domain": "str", # The hostname for + the domain. Required. + "minimum_tls_version": "str", # + Optional. The minimum version of TLS a client application can + use to access resources for the domain. Must be one of the + following values wrapped within quotations: ``"1.2"`` or + ``"1.3"``. Known values are: "1.2" and "1.3". + "type": "UNSPECIFIED", # Optional. + Default value is "UNSPECIFIED". * DEFAULT: The default + ``.ondigitalocean.app`` domain assigned to this app * + PRIMARY: The primary domain for this app that is displayed as + the default in the control panel, used in bindable + environment variables, and any other places that reference an + app's live URL. Only one domain may be set as primary. * + ALIAS: A non-primary domain. Known values are: "UNSPECIFIED", + "DEFAULT", "PRIMARY", and "ALIAS". + "wildcard": bool, # Optional. + Indicates whether the domain includes all sub-domains, in + addition to the given domain. + "zone": "str" # Optional. Optional. + If the domain uses DigitalOcean DNS and you would like App + Platform to automatically manage it for you, set this to the + name of the domain on your account. For example, If the + domain you are adding is ``app.domain.com``"" , the zone + could be ``domain.com``. + } + ], + "egress": { + "type": "AUTOASSIGN" # Optional. Default + value is "AUTOASSIGN". The app egress type. Known values are: + "AUTOASSIGN" and "DEDICATED_IP". + }, + "enhanced_threat_control_enabled": False, # + Optional. Default value is False. If set to ``true``"" , suspicious + requests will go through additional security checks to help mitigate + layer 7 DDoS attacks. + "functions": [ + { + "name": "str", # The name. Must be + unique across all components within the same app. Required. + "alerts": [ + { + "disabled": bool, # + Optional. Is the alert disabled?. + "operator": + "UNSPECIFIED_OPERATOR", # Optional. Default value is + "UNSPECIFIED_OPERATOR". Known values are: + "UNSPECIFIED_OPERATOR", "GREATER_THAN", and + "LESS_THAN". + "rule": + "UNSPECIFIED_RULE", # Optional. Default value is + "UNSPECIFIED_RULE". Known values are: + "UNSPECIFIED_RULE", "CPU_UTILIZATION", + "MEM_UTILIZATION", "RESTART_COUNT", + "DEPLOYMENT_FAILED", "DEPLOYMENT_LIVE", + "DOMAIN_FAILED", "DOMAIN_LIVE", "AUTOSCALE_FAILED", + "AUTOSCALE_SUCCEEDED", "FUNCTIONS_ACTIVATION_COUNT", + "FUNCTIONS_AVERAGE_DURATION_MS", + "FUNCTIONS_ERROR_RATE_PER_MINUTE", + "FUNCTIONS_AVERAGE_WAIT_TIME_MS", + "FUNCTIONS_ERROR_COUNT", and + "FUNCTIONS_GB_RATE_PER_SECOND". + "value": 0.0, # + Optional. Threshold value for alert. + "window": + "UNSPECIFIED_WINDOW" # Optional. Default value is + "UNSPECIFIED_WINDOW". Known values are: + "UNSPECIFIED_WINDOW", "FIVE_MINUTES", "TEN_MINUTES", + "THIRTY_MINUTES", and "ONE_HOUR". + } + ], + "bitbucket": { + "branch": "str", # Optional. + The name of the branch to use. + "deploy_on_push": bool, # + Optional. Whether to automatically deploy new commits + made to the repo. + "repo": "str" # Optional. + The name of the repo in the format owner/repo. Example: + ``digitalocean/sample-golang``. + }, + "cors": { + "allow_credentials": bool, # + Optional. Whether browsers should expose the response to + the client-side JavaScript code when the request"u2019s + credentials mode is include. This configures the + ``Access-Control-Allow-Credentials`` header. + "allow_headers": [ + "str" # Optional. + The set of allowed HTTP request headers. This + configures the ``Access-Control-Allow-Headers`` + header. + ], + "allow_methods": [ + "str" # Optional. + The set of allowed HTTP methods. This configures the + ``Access-Control-Allow-Methods`` header. + ], + "allow_origins": [ + { + "exact": + "str", # Optional. Exact string match. Only 1 of + ``exact``"" , ``prefix``"" , or ``regex`` must be + set. + "prefix": + "str", # Optional. Prefix-based match. Only 1 of + ``exact``"" , ``prefix``"" , or ``regex`` must be + set. + "regex": + "str" # Optional. RE2 style regex-based match. + Only 1 of ``exact``"" , ``prefix``"" , or + ``regex`` must be set. For more information about + RE2 syntax, see: + https://github.com/google/re2/wiki/Syntax. + } + ], + "expose_headers": [ + "str" # Optional. + The set of HTTP response headers that browsers are + allowed to access. This configures the + ``Access-Control-Expose-Headers`` header. + ], + "max_age": "str" # Optional. + An optional duration specifying how long browsers can + cache the results of a preflight request. This configures + the ``Access-Control-Max-Age`` header. + }, + "envs": [ + { + "key": "str", # The + variable name. Required. + "scope": + "RUN_AND_BUILD_TIME", # Optional. Default value is + "RUN_AND_BUILD_TIME". * RUN_TIME: Made available only + at run-time * BUILD_TIME: Made available only at + build-time * RUN_AND_BUILD_TIME: Made available at + both build and run-time. Known values are: "UNSET", + "RUN_TIME", "BUILD_TIME", and "RUN_AND_BUILD_TIME". + "type": "GENERAL", # + Optional. Default value is "GENERAL". * GENERAL: A + plain-text environment variable * SECRET: A secret + encrypted environment variable. Known values are: + "GENERAL" and "SECRET". + "value": "str" # + Optional. The value. If the type is ``SECRET``"" , + the value will be encrypted on first submission. On + following submissions, the encrypted value should be + used. + } + ], + "git": { + "branch": "str", # Optional. + The name of the branch to use. + "repo_clone_url": "str" # + Optional. The clone URL of the repo. Example: + ``https://github.com/digitalocean/sample-golang.git``. + }, + "github": { + "branch": "str", # Optional. + The name of the branch to use. + "deploy_on_push": bool, # + Optional. Whether to automatically deploy new commits + made to the repo. + "repo": "str" # Optional. + The name of the repo in the format owner/repo. Example: + ``digitalocean/sample-golang``. + }, + "gitlab": { + "branch": "str", # Optional. + The name of the branch to use. + "deploy_on_push": bool, # + Optional. Whether to automatically deploy new commits + made to the repo. + "repo": "str" # Optional. + The name of the repo in the format owner/repo. Example: + ``digitalocean/sample-golang``. + }, + "log_destinations": [ + { + "name": "str", # + Required. + "datadog": { + "api_key": + "str", # Datadog API key. Required. + "endpoint": + "str" # Optional. Datadog HTTP log intake + endpoint. + }, + "logtail": { + "token": + "str" # Optional. Logtail token. + }, + "open_search": { + "basic_auth": + { + "password": "str", # Optional. Password for + user defined in User. Is required when + ``endpoint`` is set. Cannot be set if using a + DigitalOcean DBaaS OpenSearch cluster. + "user": "str" # Optional. Username to + authenticate with. Only required when + ``endpoint`` is set. Defaults to ``doadmin`` + when ``cluster_name`` is set. + }, + "cluster_name": "str", # Optional. The name of a + DigitalOcean DBaaS OpenSearch cluster to use as a + log forwarding destination. Cannot be specified + if ``endpoint`` is also specified. + "endpoint": + "str", # Optional. OpenSearch API Endpoint. Only + HTTPS is supported. Format: + https://:code:``::code:``. Cannot be + specified if ``cluster_name`` is also specified. + "index_name": + "logs" # Optional. Default value is "logs". The + index name to use for the logs. If not set, the + default index name is "logs". + }, + "papertrail": { + "endpoint": + "str" # Papertrail syslog endpoint. Required. + } + } + ], + "routes": [ + { + "path": "str", # + Optional. (Deprecated - Use Ingress Rules instead). + An HTTP path prefix. Paths must start with / and must + be unique across all components within an app. + "preserve_path_prefix": bool # Optional. An optional + flag to preserve the path that is forwarded to the + backend service. By default, the HTTP request path + will be trimmed from the left when forwarded to the + component. For example, a component with + ``path=/api`` will have requests to ``/api/list`` + trimmed to ``/list``. If this value is ``true``"" , + the path will remain ``/api/list``. + } + ], + "source_dir": "str" # Optional. An + optional path to the working directory to use for the build. + For Dockerfile builds, this will be used as the build + context. Must be relative to the root of the repo. + } + ], + "ingress": { + "rules": [ + { + "component": { + "name": "str", # The + name of the component to route to. Required. + "preserve_path_prefix": "str", # Optional. An + optional flag to preserve the path that is forwarded + to the backend service. By default, the HTTP request + path will be trimmed from the left when forwarded to + the component. For example, a component with + ``path=/api`` will have requests to ``/api/list`` + trimmed to ``/list``. If this value is ``true``"" , + the path will remain ``/api/list``. Note: this is not + applicable for Functions Components and is mutually + exclusive with ``rewrite``. + "rewrite": "str" # + Optional. An optional field that will rewrite the + path of the component to be what is specified here. + By default, the HTTP request path will be trimmed + from the left when forwarded to the component. For + example, a component with ``path=/api`` will have + requests to ``/api/list`` trimmed to ``/list``. If + you specified the rewrite to be ``/v1/``"" , requests + to ``/api/list`` would be rewritten to ``/v1/list``. + Note: this is mutually exclusive with + ``preserve_path_prefix``. + }, + "cors": { + "allow_credentials": + bool, # Optional. Whether browsers should expose the + response to the client-side JavaScript code when the + request"u2019s credentials mode is include. This + configures the ``Access-Control-Allow-Credentials`` + header. + "allow_headers": [ + "str" # + Optional. The set of allowed HTTP request + headers. This configures the + ``Access-Control-Allow-Headers`` header. + ], + "allow_methods": [ + "str" # + Optional. The set of allowed HTTP methods. This + configures the ``Access-Control-Allow-Methods`` + header. + ], + "allow_origins": [ + { + "exact": "str", # Optional. Exact string + match. Only 1 of ``exact``"" , ``prefix``"" , + or ``regex`` must be set. + "prefix": "str", # Optional. Prefix-based + match. Only 1 of ``exact``"" , ``prefix``"" , + or ``regex`` must be set. + "regex": "str" # Optional. RE2 style + regex-based match. Only 1 of ``exact``"" , + ``prefix``"" , or ``regex`` must be set. For + more information about RE2 syntax, see: + https://github.com/google/re2/wiki/Syntax. + } + ], + "expose_headers": [ + "str" # + Optional. The set of HTTP response headers that + browsers are allowed to access. This configures + the ``Access-Control-Expose-Headers`` header. + ], + "max_age": "str" # + Optional. An optional duration specifying how long + browsers can cache the results of a preflight + request. This configures the + ``Access-Control-Max-Age`` header. + }, + "match": { + "authority": { + "exact": + "str" # Required. + }, + "path": { + "prefix": + "str" # Prefix-based match. For example, + ``/api`` will match ``/api``"" , ``/api/``"" , + and any nested paths such as + ``/api/v1/endpoint``. Required. + } + }, + "redirect": { + "authority": "str", + # Optional. The authority/host to redirect to. This + can be a hostname or IP address. Note: use ``port`` + to set the port. + "port": 0, # + Optional. The port to redirect to. + "redirect_code": 0, + # Optional. The redirect code to use. Defaults to + ``302``. Supported values are 300, 301, 302, 303, + 304, 307, 308. + "scheme": "str", # + Optional. The scheme to redirect to. Supported values + are ``http`` or ``https``. Default: ``https``. + "uri": "str" # + Optional. An optional URI path to redirect to. Note: + if this is specified the whole URI of the original + request will be overwritten to this value, + irrespective of the original request URI being + matched. + } + } + ] + }, + "jobs": [ + { + "autoscaling": { + "max_instance_count": 0, # + Optional. The maximum amount of instances for this + component. Must be more than min_instance_count. + "metrics": { + "cpu": { + "percent": 80 + # Optional. Default value is 80. The average + target CPU utilization for the component. + } + }, + "min_instance_count": 0 # + Optional. The minimum amount of instances for this + component. Must be less than max_instance_count. + }, + "bitbucket": { + "branch": "str", # Optional. + The name of the branch to use. + "deploy_on_push": bool, # + Optional. Whether to automatically deploy new commits + made to the repo. + "repo": "str" # Optional. + The name of the repo in the format owner/repo. Example: + ``digitalocean/sample-golang``. + }, + "build_command": "str", # Optional. + An optional build command to run while building this + component from source. + "dockerfile_path": "str", # + Optional. The path to the Dockerfile relative to the root of + the repo. If set, it will be used to build this component. + Otherwise, App Platform will attempt to build it using + buildpacks. + "environment_slug": "str", # + Optional. An environment slug describing the type of this + app. For a full list, please refer to `the product + documentation + `_. + "envs": [ + { + "key": "str", # The + variable name. Required. + "scope": + "RUN_AND_BUILD_TIME", # Optional. Default value is + "RUN_AND_BUILD_TIME". * RUN_TIME: Made available only + at run-time * BUILD_TIME: Made available only at + build-time * RUN_AND_BUILD_TIME: Made available at + both build and run-time. Known values are: "UNSET", + "RUN_TIME", "BUILD_TIME", and "RUN_AND_BUILD_TIME". + "type": "GENERAL", # + Optional. Default value is "GENERAL". * GENERAL: A + plain-text environment variable * SECRET: A secret + encrypted environment variable. Known values are: + "GENERAL" and "SECRET". + "value": "str" # + Optional. The value. If the type is ``SECRET``"" , + the value will be encrypted on first submission. On + following submissions, the encrypted value should be + used. + } + ], + "git": { + "branch": "str", # Optional. + The name of the branch to use. + "repo_clone_url": "str" # + Optional. The clone URL of the repo. Example: + ``https://github.com/digitalocean/sample-golang.git``. + }, + "github": { + "branch": "str", # Optional. + The name of the branch to use. + "deploy_on_push": bool, # + Optional. Whether to automatically deploy new commits + made to the repo. + "repo": "str" # Optional. + The name of the repo in the format owner/repo. Example: + ``digitalocean/sample-golang``. + }, + "gitlab": { + "branch": "str", # Optional. + The name of the branch to use. + "deploy_on_push": bool, # + Optional. Whether to automatically deploy new commits + made to the repo. + "repo": "str" # Optional. + The name of the repo in the format owner/repo. Example: + ``digitalocean/sample-golang``. + }, + "image": { + "deploy_on_push": { + "enabled": bool # + Optional. Whether to automatically deploy new images. + Can only be used for images hosted in DOCR and can + only be used with an image tag, not a specific + digest. + }, + "digest": "str", # Optional. + The image digest. Cannot be specified if tag is provided. + "registry": "str", # + Optional. The registry name. Must be left empty for the + ``DOCR`` registry type. + "registry_credentials": + "str", # Optional. The credentials to be able to pull + the image. The value will be encrypted on first + submission. On following submissions, the encrypted value + should be used. * "$username:$access_token" for + registries of type ``DOCKER_HUB``. * + "$username:$access_token" for registries of type + ``GHCR``. + "registry_type": "str", # + Optional. * DOCKER_HUB: The DockerHub container registry + type. * DOCR: The DigitalOcean container registry type. * + GHCR: The Github container registry type. Known values + are: "DOCKER_HUB", "DOCR", and "GHCR". + "repository": "str", # + Optional. The repository name. + "tag": "latest" # Optional. + Default value is "latest". The repository tag. Defaults + to ``latest`` if not provided and no digest is provided. + Cannot be specified if digest is provided. + }, + "instance_count": 1, # Optional. + Default value is 1. The amount of instances that this + component should be scaled to. Default: 1. Must not be set if + autoscaling is used. + "instance_size_slug": {}, + "kind": "UNSPECIFIED", # Optional. + Default value is "UNSPECIFIED". * UNSPECIFIED: Default job + type, will auto-complete to POST_DEPLOY kind. * PRE_DEPLOY: + Indicates a job that runs before an app deployment. * + POST_DEPLOY: Indicates a job that runs after an app + deployment. * FAILED_DEPLOY: Indicates a job that runs after + a component fails to deploy. Known values are: "UNSPECIFIED", + "PRE_DEPLOY", "POST_DEPLOY", and "FAILED_DEPLOY". + "log_destinations": [ + { + "name": "str", # + Required. + "datadog": { + "api_key": + "str", # Datadog API key. Required. + "endpoint": + "str" # Optional. Datadog HTTP log intake + endpoint. + }, + "logtail": { + "token": + "str" # Optional. Logtail token. + }, + "open_search": { + "basic_auth": + { + "password": "str", # Optional. Password for + user defined in User. Is required when + ``endpoint`` is set. Cannot be set if using a + DigitalOcean DBaaS OpenSearch cluster. + "user": "str" # Optional. Username to + authenticate with. Only required when + ``endpoint`` is set. Defaults to ``doadmin`` + when ``cluster_name`` is set. + }, + "cluster_name": "str", # Optional. The name of a + DigitalOcean DBaaS OpenSearch cluster to use as a + log forwarding destination. Cannot be specified + if ``endpoint`` is also specified. + "endpoint": + "str", # Optional. OpenSearch API Endpoint. Only + HTTPS is supported. Format: + https://:code:``::code:``. Cannot be + specified if ``cluster_name`` is also specified. + "index_name": + "logs" # Optional. Default value is "logs". The + index name to use for the logs. If not set, the + default index name is "logs". + }, + "papertrail": { + "endpoint": + "str" # Papertrail syslog endpoint. Required. + } + } + ], + "name": "str", # Optional. The name. + Must be unique across all components within the same app. + "run_command": "str", # Optional. An + optional run command to override the component's default. + "source_dir": "str", # Optional. An + optional path to the working directory to use for the build. + For Dockerfile builds, this will be used as the build + context. Must be relative to the root of the repo. + "termination": { + "grace_period_seconds": 0 # + Optional. The number of seconds to wait between sending a + TERM signal to a container and issuing a KILL which + causes immediate shutdown. (Default 120). + } + } + ], + "maintenance": { + "archive": bool, # Optional. Indicates + whether the app should be archived. Setting this to true implies + that enabled is set to true. + "enabled": bool, # Optional. Indicates + whether maintenance mode should be enabled for the app. + "offline_page_url": "str" # Optional. A + custom offline page to display when maintenance mode is enabled + or the app is archived. + }, + "region": "str", # Optional. The slug form of the + geographical origin of the app. Default: ``nearest available``. Known + values are: "atl", "nyc", "sfo", "tor", "ams", "fra", "lon", "blr", + "sgp", and "syd". + "services": [ + { + "autoscaling": { + "max_instance_count": 0, # + Optional. The maximum amount of instances for this + component. Must be more than min_instance_count. + "metrics": { + "cpu": { + "percent": 80 + # Optional. Default value is 80. The average + target CPU utilization for the component. + } + }, + "min_instance_count": 0 # + Optional. The minimum amount of instances for this + component. Must be less than max_instance_count. + }, + "bitbucket": { + "branch": "str", # Optional. + The name of the branch to use. + "deploy_on_push": bool, # + Optional. Whether to automatically deploy new commits + made to the repo. + "repo": "str" # Optional. + The name of the repo in the format owner/repo. Example: + ``digitalocean/sample-golang``. + }, + "build_command": "str", # Optional. + An optional build command to run while building this + component from source. + "cors": { + "allow_credentials": bool, # + Optional. Whether browsers should expose the response to + the client-side JavaScript code when the request"u2019s + credentials mode is include. This configures the + ``Access-Control-Allow-Credentials`` header. + "allow_headers": [ + "str" # Optional. + The set of allowed HTTP request headers. This + configures the ``Access-Control-Allow-Headers`` + header. + ], + "allow_methods": [ + "str" # Optional. + The set of allowed HTTP methods. This configures the + ``Access-Control-Allow-Methods`` header. + ], + "allow_origins": [ + { + "exact": + "str", # Optional. Exact string match. Only 1 of + ``exact``"" , ``prefix``"" , or ``regex`` must be + set. + "prefix": + "str", # Optional. Prefix-based match. Only 1 of + ``exact``"" , ``prefix``"" , or ``regex`` must be + set. + "regex": + "str" # Optional. RE2 style regex-based match. + Only 1 of ``exact``"" , ``prefix``"" , or + ``regex`` must be set. For more information about + RE2 syntax, see: + https://github.com/google/re2/wiki/Syntax. + } + ], + "expose_headers": [ + "str" # Optional. + The set of HTTP response headers that browsers are + allowed to access. This configures the + ``Access-Control-Expose-Headers`` header. + ], + "max_age": "str" # Optional. + An optional duration specifying how long browsers can + cache the results of a preflight request. This configures + the ``Access-Control-Max-Age`` header. + }, + "dockerfile_path": "str", # + Optional. The path to the Dockerfile relative to the root of + the repo. If set, it will be used to build this component. + Otherwise, App Platform will attempt to build it using + buildpacks. + "environment_slug": "str", # + Optional. An environment slug describing the type of this + app. For a full list, please refer to `the product + documentation + `_. + "envs": [ + { + "key": "str", # The + variable name. Required. + "scope": + "RUN_AND_BUILD_TIME", # Optional. Default value is + "RUN_AND_BUILD_TIME". * RUN_TIME: Made available only + at run-time * BUILD_TIME: Made available only at + build-time * RUN_AND_BUILD_TIME: Made available at + both build and run-time. Known values are: "UNSET", + "RUN_TIME", "BUILD_TIME", and "RUN_AND_BUILD_TIME". + "type": "GENERAL", # + Optional. Default value is "GENERAL". * GENERAL: A + plain-text environment variable * SECRET: A secret + encrypted environment variable. Known values are: + "GENERAL" and "SECRET". + "value": "str" # + Optional. The value. If the type is ``SECRET``"" , + the value will be encrypted on first submission. On + following submissions, the encrypted value should be + used. + } + ], + "git": { + "branch": "str", # Optional. + The name of the branch to use. + "repo_clone_url": "str" # + Optional. The clone URL of the repo. Example: + ``https://github.com/digitalocean/sample-golang.git``. + }, + "github": { + "branch": "str", # Optional. + The name of the branch to use. + "deploy_on_push": bool, # + Optional. Whether to automatically deploy new commits + made to the repo. + "repo": "str" # Optional. + The name of the repo in the format owner/repo. Example: + ``digitalocean/sample-golang``. + }, + "gitlab": { + "branch": "str", # Optional. + The name of the branch to use. + "deploy_on_push": bool, # + Optional. Whether to automatically deploy new commits + made to the repo. + "repo": "str" # Optional. + The name of the repo in the format owner/repo. Example: + ``digitalocean/sample-golang``. + }, + "health_check": { + "failure_threshold": 0, # + Optional. The number of failed health checks before + considered unhealthy. + "http_path": "str", # + Optional. The route path used for the HTTP health check + ping. If not set, the HTTP health check will be disabled + and a TCP health check used instead. + "initial_delay_seconds": 0, + # Optional. The number of seconds to wait before + beginning health checks. + "period_seconds": 0, # + Optional. The number of seconds to wait between health + checks. + "port": 0, # Optional. The + port on which the health check will be performed. If not + set, the health check will be performed on the + component's http_port. + "success_threshold": 0, # + Optional. The number of successful health checks before + considered healthy. + "timeout_seconds": 0 # + Optional. The number of seconds after which the check + times out. + }, + "http_port": 0, # Optional. The + internal port on which this service's run command will + listen. Default: 8080 If there is not an environment variable + with the name ``PORT``"" , one will be automatically added + with its value set to the value of this field. + "image": { + "deploy_on_push": { + "enabled": bool # + Optional. Whether to automatically deploy new images. + Can only be used for images hosted in DOCR and can + only be used with an image tag, not a specific + digest. + }, + "digest": "str", # Optional. + The image digest. Cannot be specified if tag is provided. + "registry": "str", # + Optional. The registry name. Must be left empty for the + ``DOCR`` registry type. + "registry_credentials": + "str", # Optional. The credentials to be able to pull + the image. The value will be encrypted on first + submission. On following submissions, the encrypted value + should be used. * "$username:$access_token" for + registries of type ``DOCKER_HUB``. * + "$username:$access_token" for registries of type + ``GHCR``. + "registry_type": "str", # + Optional. * DOCKER_HUB: The DockerHub container registry + type. * DOCR: The DigitalOcean container registry type. * + GHCR: The Github container registry type. Known values + are: "DOCKER_HUB", "DOCR", and "GHCR". + "repository": "str", # + Optional. The repository name. + "tag": "latest" # Optional. + Default value is "latest". The repository tag. Defaults + to ``latest`` if not provided and no digest is provided. + Cannot be specified if digest is provided. + }, + "instance_count": 1, # Optional. + Default value is 1. The amount of instances that this + component should be scaled to. Default: 1. Must not be set if + autoscaling is used. + "instance_size_slug": {}, + "internal_ports": [ + 0 # Optional. The ports on + which this service will listen for internal traffic. + ], + "liveness_health_check": { + "failure_threshold": 0, # + Optional. The number of failed health checks before + considered unhealthy. + "http_path": "str", # + Optional. The route path used for the HTTP health check + ping. If not set, the HTTP health check will be disabled + and a TCP health check used instead. + "initial_delay_seconds": 0, + # Optional. The number of seconds to wait before + beginning health checks. + "period_seconds": 0, # + Optional. The number of seconds to wait between health + checks. + "port": 0, # Optional. The + port on which the health check will be performed. + "success_threshold": 0, # + Optional. The number of successful health checks before + considered healthy. + "timeout_seconds": 0 # + Optional. The number of seconds after which the check + times out. + }, + "log_destinations": [ + { + "name": "str", # + Required. + "datadog": { + "api_key": + "str", # Datadog API key. Required. + "endpoint": + "str" # Optional. Datadog HTTP log intake + endpoint. + }, + "logtail": { + "token": + "str" # Optional. Logtail token. + }, + "open_search": { + "basic_auth": + { + "password": "str", # Optional. Password for + user defined in User. Is required when + ``endpoint`` is set. Cannot be set if using a + DigitalOcean DBaaS OpenSearch cluster. + "user": "str" # Optional. Username to + authenticate with. Only required when + ``endpoint`` is set. Defaults to ``doadmin`` + when ``cluster_name`` is set. + }, + "cluster_name": "str", # Optional. The name of a + DigitalOcean DBaaS OpenSearch cluster to use as a + log forwarding destination. Cannot be specified + if ``endpoint`` is also specified. + "endpoint": + "str", # Optional. OpenSearch API Endpoint. Only + HTTPS is supported. Format: + https://:code:``::code:``. Cannot be + specified if ``cluster_name`` is also specified. + "index_name": + "logs" # Optional. Default value is "logs". The + index name to use for the logs. If not set, the + default index name is "logs". + }, + "papertrail": { + "endpoint": + "str" # Papertrail syslog endpoint. Required. + } + } + ], + "name": "str", # Optional. The name. + Must be unique across all components within the same app. + "protocol": "str", # Optional. The + protocol which the service uses to serve traffic on the + http_port. * ``HTTP``"" : The app is serving the HTTP + protocol. Default. * ``HTTP2``"" : The app is serving the + HTTP/2 protocol. Currently, this needs to be implemented in + the service by serving HTTP/2 cleartext (h2c). Known values + are: "HTTP" and "HTTP2". + "routes": [ + { + "path": "str", # + Optional. (Deprecated - Use Ingress Rules instead). + An HTTP path prefix. Paths must start with / and must + be unique across all components within an app. + "preserve_path_prefix": bool # Optional. An optional + flag to preserve the path that is forwarded to the + backend service. By default, the HTTP request path + will be trimmed from the left when forwarded to the + component. For example, a component with + ``path=/api`` will have requests to ``/api/list`` + trimmed to ``/list``. If this value is ``true``"" , + the path will remain ``/api/list``. + } + ], + "run_command": "str", # Optional. An + optional run command to override the component's default. + "source_dir": "str", # Optional. An + optional path to the working directory to use for the build. + For Dockerfile builds, this will be used as the build + context. Must be relative to the root of the repo. + "termination": { + "drain_seconds": 0, # + Optional. The number of seconds to wait between selecting + a container instance for termination and issuing the TERM + signal. Selecting a container instance for termination + begins an asynchronous drain of new requests on upstream + load-balancers. (Default 15). + "grace_period_seconds": 0 # + Optional. The number of seconds to wait between sending a + TERM signal to a container and issuing a KILL which + causes immediate shutdown. (Default 120). + } + } + ], + "static_sites": [ + { + "bitbucket": { + "branch": "str", # Optional. + The name of the branch to use. + "deploy_on_push": bool, # + Optional. Whether to automatically deploy new commits + made to the repo. + "repo": "str" # Optional. + The name of the repo in the format owner/repo. Example: + ``digitalocean/sample-golang``. + }, + "build_command": "str", # Optional. + An optional build command to run while building this + component from source. + "catchall_document": "str", # + Optional. The name of the document to use as the fallback for + any requests to documents that are not found when serving + this static site. Only 1 of ``catchall_document`` or + ``error_document`` can be set. + "cors": { + "allow_credentials": bool, # + Optional. Whether browsers should expose the response to + the client-side JavaScript code when the request"u2019s + credentials mode is include. This configures the + ``Access-Control-Allow-Credentials`` header. + "allow_headers": [ + "str" # Optional. + The set of allowed HTTP request headers. This + configures the ``Access-Control-Allow-Headers`` + header. + ], + "allow_methods": [ + "str" # Optional. + The set of allowed HTTP methods. This configures the + ``Access-Control-Allow-Methods`` header. + ], + "allow_origins": [ + { + "exact": + "str", # Optional. Exact string match. Only 1 of + ``exact``"" , ``prefix``"" , or ``regex`` must be + set. + "prefix": + "str", # Optional. Prefix-based match. Only 1 of + ``exact``"" , ``prefix``"" , or ``regex`` must be + set. + "regex": + "str" # Optional. RE2 style regex-based match. + Only 1 of ``exact``"" , ``prefix``"" , or + ``regex`` must be set. For more information about + RE2 syntax, see: + https://github.com/google/re2/wiki/Syntax. + } + ], + "expose_headers": [ + "str" # Optional. + The set of HTTP response headers that browsers are + allowed to access. This configures the + ``Access-Control-Expose-Headers`` header. + ], + "max_age": "str" # Optional. + An optional duration specifying how long browsers can + cache the results of a preflight request. This configures + the ``Access-Control-Max-Age`` header. + }, + "dockerfile_path": "str", # + Optional. The path to the Dockerfile relative to the root of + the repo. If set, it will be used to build this component. + Otherwise, App Platform will attempt to build it using + buildpacks. + "environment_slug": "str", # + Optional. An environment slug describing the type of this + app. For a full list, please refer to `the product + documentation + `_. + "envs": [ + { + "key": "str", # The + variable name. Required. + "scope": + "RUN_AND_BUILD_TIME", # Optional. Default value is + "RUN_AND_BUILD_TIME". * RUN_TIME: Made available only + at run-time * BUILD_TIME: Made available only at + build-time * RUN_AND_BUILD_TIME: Made available at + both build and run-time. Known values are: "UNSET", + "RUN_TIME", "BUILD_TIME", and "RUN_AND_BUILD_TIME". + "type": "GENERAL", # + Optional. Default value is "GENERAL". * GENERAL: A + plain-text environment variable * SECRET: A secret + encrypted environment variable. Known values are: + "GENERAL" and "SECRET". + "value": "str" # + Optional. The value. If the type is ``SECRET``"" , + the value will be encrypted on first submission. On + following submissions, the encrypted value should be + used. + } + ], + "error_document": "404.html", # + Optional. Default value is "404.html". The name of the error + document to use when serving this static site. Default: + 404.html. If no such file exists within the built assets, App + Platform will supply one. + "git": { + "branch": "str", # Optional. + The name of the branch to use. + "repo_clone_url": "str" # + Optional. The clone URL of the repo. Example: + ``https://github.com/digitalocean/sample-golang.git``. + }, + "github": { + "branch": "str", # Optional. + The name of the branch to use. + "deploy_on_push": bool, # + Optional. Whether to automatically deploy new commits + made to the repo. + "repo": "str" # Optional. + The name of the repo in the format owner/repo. Example: + ``digitalocean/sample-golang``. + }, + "gitlab": { + "branch": "str", # Optional. + The name of the branch to use. + "deploy_on_push": bool, # + Optional. Whether to automatically deploy new commits + made to the repo. + "repo": "str" # Optional. + The name of the repo in the format owner/repo. Example: + ``digitalocean/sample-golang``. + }, + "image": { + "deploy_on_push": { + "enabled": bool # + Optional. Whether to automatically deploy new images. + Can only be used for images hosted in DOCR and can + only be used with an image tag, not a specific + digest. + }, + "digest": "str", # Optional. + The image digest. Cannot be specified if tag is provided. + "registry": "str", # + Optional. The registry name. Must be left empty for the + ``DOCR`` registry type. + "registry_credentials": + "str", # Optional. The credentials to be able to pull + the image. The value will be encrypted on first + submission. On following submissions, the encrypted value + should be used. * "$username:$access_token" for + registries of type ``DOCKER_HUB``. * + "$username:$access_token" for registries of type + ``GHCR``. + "registry_type": "str", # + Optional. * DOCKER_HUB: The DockerHub container registry + type. * DOCR: The DigitalOcean container registry type. * + GHCR: The Github container registry type. Known values + are: "DOCKER_HUB", "DOCR", and "GHCR". + "repository": "str", # + Optional. The repository name. + "tag": "latest" # Optional. + Default value is "latest". The repository tag. Defaults + to ``latest`` if not provided and no digest is provided. + Cannot be specified if digest is provided. + }, + "index_document": "index.html", # + Optional. Default value is "index.html". The name of the + index document to use when serving this static site. Default: + index.html. + "log_destinations": [ + { + "name": "str", # + Required. + "datadog": { + "api_key": + "str", # Datadog API key. Required. + "endpoint": + "str" # Optional. Datadog HTTP log intake + endpoint. + }, + "logtail": { + "token": + "str" # Optional. Logtail token. + }, + "open_search": { + "basic_auth": + { + "password": "str", # Optional. Password for + user defined in User. Is required when + ``endpoint`` is set. Cannot be set if using a + DigitalOcean DBaaS OpenSearch cluster. + "user": "str" # Optional. Username to + authenticate with. Only required when + ``endpoint`` is set. Defaults to ``doadmin`` + when ``cluster_name`` is set. + }, + "cluster_name": "str", # Optional. The name of a + DigitalOcean DBaaS OpenSearch cluster to use as a + log forwarding destination. Cannot be specified + if ``endpoint`` is also specified. + "endpoint": + "str", # Optional. OpenSearch API Endpoint. Only + HTTPS is supported. Format: + https://:code:``::code:``. Cannot be + specified if ``cluster_name`` is also specified. + "index_name": + "logs" # Optional. Default value is "logs". The + index name to use for the logs. If not set, the + default index name is "logs". + }, + "papertrail": { + "endpoint": + "str" # Papertrail syslog endpoint. Required. + } + } + ], + "name": "str", # Optional. The name. + Must be unique across all components within the same app. + "output_dir": "str", # Optional. An + optional path to where the built assets will be located, + relative to the build context. If not set, App Platform will + automatically scan for these directory names: ``_static``"" , + ``dist``"" , ``public``"" , ``build``. + "routes": [ + { + "path": "str", # + Optional. (Deprecated - Use Ingress Rules instead). + An HTTP path prefix. Paths must start with / and must + be unique across all components within an app. + "preserve_path_prefix": bool # Optional. An optional + flag to preserve the path that is forwarded to the + backend service. By default, the HTTP request path + will be trimmed from the left when forwarded to the + component. For example, a component with + ``path=/api`` will have requests to ``/api/list`` + trimmed to ``/list``. If this value is ``true``"" , + the path will remain ``/api/list``. + } + ], + "run_command": "str", # Optional. An + optional run command to override the component's default. + "source_dir": "str" # Optional. An + optional path to the working directory to use for the build. + For Dockerfile builds, this will be used as the build + context. Must be relative to the root of the repo. + } + ], + "vpc": { + "egress_ips": [ + { + "ip": "str" # Optional. The + egress ips associated with the VPC. + } + ], + "id": "str" # Optional. The ID of the VPC. + }, + "workers": [ + { + "autoscaling": { + "max_instance_count": 0, # + Optional. The maximum amount of instances for this + component. Must be more than min_instance_count. + "metrics": { + "cpu": { + "percent": 80 + # Optional. Default value is 80. The average + target CPU utilization for the component. + } + }, + "min_instance_count": 0 # + Optional. The minimum amount of instances for this + component. Must be less than max_instance_count. + }, + "bitbucket": { + "branch": "str", # Optional. + The name of the branch to use. + "deploy_on_push": bool, # + Optional. Whether to automatically deploy new commits + made to the repo. + "repo": "str" # Optional. + The name of the repo in the format owner/repo. Example: + ``digitalocean/sample-golang``. + }, + "build_command": "str", # Optional. + An optional build command to run while building this + component from source. + "dockerfile_path": "str", # + Optional. The path to the Dockerfile relative to the root of + the repo. If set, it will be used to build this component. + Otherwise, App Platform will attempt to build it using + buildpacks. + "environment_slug": "str", # + Optional. An environment slug describing the type of this + app. For a full list, please refer to `the product + documentation + `_. + "envs": [ + { + "key": "str", # The + variable name. Required. + "scope": + "RUN_AND_BUILD_TIME", # Optional. Default value is + "RUN_AND_BUILD_TIME". * RUN_TIME: Made available only + at run-time * BUILD_TIME: Made available only at + build-time * RUN_AND_BUILD_TIME: Made available at + both build and run-time. Known values are: "UNSET", + "RUN_TIME", "BUILD_TIME", and "RUN_AND_BUILD_TIME". + "type": "GENERAL", # + Optional. Default value is "GENERAL". * GENERAL: A + plain-text environment variable * SECRET: A secret + encrypted environment variable. Known values are: + "GENERAL" and "SECRET". + "value": "str" # + Optional. The value. If the type is ``SECRET``"" , + the value will be encrypted on first submission. On + following submissions, the encrypted value should be + used. + } + ], + "git": { + "branch": "str", # Optional. + The name of the branch to use. + "repo_clone_url": "str" # + Optional. The clone URL of the repo. Example: + ``https://github.com/digitalocean/sample-golang.git``. + }, + "github": { + "branch": "str", # Optional. + The name of the branch to use. + "deploy_on_push": bool, # + Optional. Whether to automatically deploy new commits + made to the repo. + "repo": "str" # Optional. + The name of the repo in the format owner/repo. Example: + ``digitalocean/sample-golang``. + }, + "gitlab": { + "branch": "str", # Optional. + The name of the branch to use. + "deploy_on_push": bool, # + Optional. Whether to automatically deploy new commits + made to the repo. + "repo": "str" # Optional. + The name of the repo in the format owner/repo. Example: + ``digitalocean/sample-golang``. + }, + "image": { + "deploy_on_push": { + "enabled": bool # + Optional. Whether to automatically deploy new images. + Can only be used for images hosted in DOCR and can + only be used with an image tag, not a specific + digest. + }, + "digest": "str", # Optional. + The image digest. Cannot be specified if tag is provided. + "registry": "str", # + Optional. The registry name. Must be left empty for the + ``DOCR`` registry type. + "registry_credentials": + "str", # Optional. The credentials to be able to pull + the image. The value will be encrypted on first + submission. On following submissions, the encrypted value + should be used. * "$username:$access_token" for + registries of type ``DOCKER_HUB``. * + "$username:$access_token" for registries of type + ``GHCR``. + "registry_type": "str", # + Optional. * DOCKER_HUB: The DockerHub container registry + type. * DOCR: The DigitalOcean container registry type. * + GHCR: The Github container registry type. Known values + are: "DOCKER_HUB", "DOCR", and "GHCR". + "repository": "str", # + Optional. The repository name. + "tag": "latest" # Optional. + Default value is "latest". The repository tag. Defaults + to ``latest`` if not provided and no digest is provided. + Cannot be specified if digest is provided. + }, + "instance_count": 1, # Optional. + Default value is 1. The amount of instances that this + component should be scaled to. Default: 1. Must not be set if + autoscaling is used. + "instance_size_slug": {}, + "liveness_health_check": { + "failure_threshold": 0, # + Optional. The number of failed health checks before + considered unhealthy. + "http_path": "str", # + Optional. The route path used for the HTTP health check + ping. If not set, the HTTP health check will be disabled + and a TCP health check used instead. + "initial_delay_seconds": 0, + # Optional. The number of seconds to wait before + beginning health checks. + "period_seconds": 0, # + Optional. The number of seconds to wait between health + checks. + "port": 0, # Optional. The + port on which the health check will be performed. + "success_threshold": 0, # + Optional. The number of successful health checks before + considered healthy. + "timeout_seconds": 0 # + Optional. The number of seconds after which the check + times out. + }, + "log_destinations": [ + { + "name": "str", # + Required. + "datadog": { + "api_key": + "str", # Datadog API key. Required. + "endpoint": + "str" # Optional. Datadog HTTP log intake + endpoint. + }, + "logtail": { + "token": + "str" # Optional. Logtail token. + }, + "open_search": { + "basic_auth": + { + "password": "str", # Optional. Password for + user defined in User. Is required when + ``endpoint`` is set. Cannot be set if using a + DigitalOcean DBaaS OpenSearch cluster. + "user": "str" # Optional. Username to + authenticate with. Only required when + ``endpoint`` is set. Defaults to ``doadmin`` + when ``cluster_name`` is set. + }, + "cluster_name": "str", # Optional. The name of a + DigitalOcean DBaaS OpenSearch cluster to use as a + log forwarding destination. Cannot be specified + if ``endpoint`` is also specified. + "endpoint": + "str", # Optional. OpenSearch API Endpoint. Only + HTTPS is supported. Format: + https://:code:``::code:``. Cannot be + specified if ``cluster_name`` is also specified. + "index_name": + "logs" # Optional. Default value is "logs". The + index name to use for the logs. If not set, the + default index name is "logs". + }, + "papertrail": { + "endpoint": + "str" # Papertrail syslog endpoint. Required. + } + } + ], + "name": "str", # Optional. The name. + Must be unique across all components within the same app. + "run_command": "str", # Optional. An + optional run command to override the component's default. + "source_dir": "str", # Optional. An + optional path to the working directory to use for the build. + For Dockerfile builds, this will be used as the build + context. Must be relative to the root of the repo. + "termination": { + "grace_period_seconds": 0 # + Optional. The number of seconds to wait between sending a + TERM signal to a container and issuing a KILL which + causes immediate shutdown. (Default 120). + } + } + ] + }, + "static_sites": [ + { + "name": "str", # Optional. The name of this + static site. + "source_commit_hash": "str" # Optional. The + commit hash of the repository that was used to build this static + site. + } + ], + "tier_slug": "str", # Optional. The current pricing tier + slug of the deployment. + "updated_at": "2020-02-20 00:00:00", # Optional. When the + deployment was last updated. + "workers": [ + { + "name": "str", # Optional. The name of this + worker. + "source_commit_hash": "str" # Optional. The + commit hash of the repository that was used to build this worker. + } + ] + }, + "deployment_id": "str", # Optional. For deployment events, this is + the same as the deployment's ID. For autoscaling events, this is the + deployment that was autoscaled. + "id": "str", # Optional. The ID of the event (UUID). + "type": "str" # Optional. The type of event. Known values are: + "UNKNOWN", "DEPLOYMENT", and "AUTOSCALING". + } + } + # response body for status code(s): 404 + response == { + "id": "str", # A short identifier corresponding to the HTTP status code + returned. For example, the ID for a response returning a 404 status code would + be "not_found.". Required. + "message": "str", # A message providing additional information about the + error, including details to help resolve it when possible. Required. + "request_id": "str" # Optional. Optionally, some endpoints may include a + request ID that should be provided when reporting bugs or opening support + tickets to help identify the issue. + } + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + 401: cast( + Type[HttpResponseError], + lambda response: ClientAuthenticationError(response=response), + ), + 429: HttpResponseError, + 500: HttpResponseError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[JSON] = kwargs.pop("cls", None) + + _request = build_apps_cancel_event_request( + app_id=app_id, + event_id=event_id, + headers=_headers, + params=_params, + ) + _request.url = self._client.format_url(_request.url) + + _stream = False + pipeline_response: PipelineResponse = ( + await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + ) + + response = pipeline_response.http_response + + if response.status_code not in [200, 404]: + if _stream: + await response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore + raise HttpResponseError(response=response) + + response_headers = {} + if response.status_code == 200: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.content: + deserialized = response.json() + else: + deserialized = None + + if response.status_code == 404: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.content: + deserialized = response.json() + else: + deserialized = None + + if cls: + return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore + + return cast(JSON, deserialized) # type: ignore + + @distributed_trace_async + async def get_event_logs( + self, + app_id: str, + event_id: str, + *, + follow: Optional[bool] = None, + type: str = "UNSPECIFIED", + pod_connection_timeout: Optional[str] = None, + **kwargs: Any + ) -> JSON: + # pylint: disable=line-too-long + """Retrieve Event Logs. + + Retrieve the logs of an autoscaling event for an app. + + :param app_id: The app ID. Required. + :type app_id: str + :param event_id: The event ID. Required. + :type event_id: str + :keyword follow: Whether the logs should follow live updates. Default value is None. + :paramtype follow: bool + :keyword type: The type of logs to retrieve + + + * BUILD: Build-time logs + * DEPLOY: Deploy-time logs + * RUN: Live run-time logs + * RUN_RESTARTED: Logs of crashed/restarted instances during runtime + * AUTOSCALE_EVENT: Logs of an autoscaling event (requires event_id). Known values are: + "UNSPECIFIED", "BUILD", "DEPLOY", "RUN", "RUN_RESTARTED", and "AUTOSCALE_EVENT". Default value + is "UNSPECIFIED". + :paramtype type: str + :keyword pod_connection_timeout: An optional time duration to wait if the underlying component + instance is not immediately available. Default: ``3m``. Default value is None. + :paramtype pod_connection_timeout: str + :return: JSON object + :rtype: JSON + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 200 + response == { + "historic_urls": [ + "str" # Optional. A list of URLs to archived log files. + ], + "live_url": "str" # Optional. A URL of the real-time live logs. This URL may + use either the ``https://`` or ``wss://`` protocols and will keep pushing live + logs as they become available. + } + # response body for status code(s): 404 + response == { + "id": "str", # A short identifier corresponding to the HTTP status code + returned. For example, the ID for a response returning a 404 status code would + be "not_found.". Required. + "message": "str", # A message providing additional information about the + error, including details to help resolve it when possible. Required. + "request_id": "str" # Optional. Optionally, some endpoints may include a + request ID that should be provided when reporting bugs or opening support + tickets to help identify the issue. + } + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + 401: cast( + Type[HttpResponseError], + lambda response: ClientAuthenticationError(response=response), + ), + 429: HttpResponseError, + 500: HttpResponseError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[JSON] = kwargs.pop("cls", None) + + _request = build_apps_get_event_logs_request( + app_id=app_id, + event_id=event_id, + follow=follow, + type=type, + pod_connection_timeout=pod_connection_timeout, + headers=_headers, + params=_params, + ) + _request.url = self._client.format_url(_request.url) + + _stream = False + pipeline_response: PipelineResponse = ( + await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + ) + + response = pipeline_response.http_response + + if response.status_code not in [200, 404]: + if _stream: + await response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore + raise HttpResponseError(response=response) + + response_headers = {} + if response.status_code == 200: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.content: + deserialized = response.json() + else: + deserialized = None + + if response.status_code == 404: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.content: + deserialized = response.json() + else: + deserialized = None + + if cls: + return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore + + return cast(JSON, deserialized) # type: ignore + @distributed_trace_async async def list_instance_sizes(self, **kwargs: Any) -> JSON: # pylint: disable=line-too-long @@ -104673,71 +110007,1256 @@ async def list_options(self, **kwargs: Any) -> JSON: "version": "str" # Optional. The engine version. } ], - "mysql": [ + "mysql": [ + { + "end_of_availability": "str", # Optional. A + timestamp referring to the date when the particular version will no + longer be available for creating new clusters. If null, the version + does not have an end of availability timeline. + "end_of_life": "str", # Optional. A timestamp + referring to the date when the particular version will no longer be + supported. If null, the version does not have an end of life + timeline. + "version": "str" # Optional. The engine version. + } + ], + "opensearch": [ + { + "end_of_availability": "str", # Optional. A + timestamp referring to the date when the particular version will no + longer be available for creating new clusters. If null, the version + does not have an end of availability timeline. + "end_of_life": "str", # Optional. A timestamp + referring to the date when the particular version will no longer be + supported. If null, the version does not have an end of life + timeline. + "version": "str" # Optional. The engine version. + } + ], + "pg": [ + { + "end_of_availability": "str", # Optional. A + timestamp referring to the date when the particular version will no + longer be available for creating new clusters. If null, the version + does not have an end of availability timeline. + "end_of_life": "str", # Optional. A timestamp + referring to the date when the particular version will no longer be + supported. If null, the version does not have an end of life + timeline. + "version": "str" # Optional. The engine version. + } + ], + "redis": [ + { + "end_of_availability": "str", # Optional. A + timestamp referring to the date when the particular version will no + longer be available for creating new clusters. If null, the version + does not have an end of availability timeline. + "end_of_life": "str", # Optional. A timestamp + referring to the date when the particular version will no longer be + supported. If null, the version does not have an end of life + timeline. + "version": "str" # Optional. The engine version. + } + ], + "valkey": [ + { + "end_of_availability": "str", # Optional. A + timestamp referring to the date when the particular version will no + longer be available for creating new clusters. If null, the version + does not have an end of availability timeline. + "end_of_life": "str", # Optional. A timestamp + referring to the date when the particular version will no longer be + supported. If null, the version does not have an end of life + timeline. + "version": "str" # Optional. The engine version. + } + ] + } + } + # response body for status code(s): 404 + response == { + "id": "str", # A short identifier corresponding to the HTTP status code + returned. For example, the ID for a response returning a 404 status code would + be "not_found.". Required. + "message": "str", # A message providing additional information about the + error, including details to help resolve it when possible. Required. + "request_id": "str" # Optional. Optionally, some endpoints may include a + request ID that should be provided when reporting bugs or opening support + tickets to help identify the issue. + } + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + 401: cast( + Type[HttpResponseError], + lambda response: ClientAuthenticationError(response=response), + ), + 429: HttpResponseError, + 500: HttpResponseError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[JSON] = kwargs.pop("cls", None) + + _request = build_databases_list_options_request( + headers=_headers, + params=_params, + ) + _request.url = self._client.format_url(_request.url) + + _stream = False + pipeline_response: PipelineResponse = ( + await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + ) + + response = pipeline_response.http_response + + if response.status_code not in [200, 404]: + if _stream: + await response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore + raise HttpResponseError(response=response) + + response_headers = {} + if response.status_code == 200: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.content: + deserialized = response.json() + else: + deserialized = None + + if response.status_code == 404: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.content: + deserialized = response.json() + else: + deserialized = None + + if cls: + return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore + + return cast(JSON, deserialized) # type: ignore + + @distributed_trace_async + async def list_clusters( + self, *, tag_name: Optional[str] = None, **kwargs: Any + ) -> JSON: + # pylint: disable=line-too-long + """List All Database Clusters. + + To list all of the database clusters available on your account, send a GET request to + ``/v2/databases``. To limit the results to database clusters with a specific tag, include the + ``tag_name`` query parameter set to the name of the tag. For example, + ``/v2/databases?tag_name=$TAG_NAME``. + + The result will be a JSON object with a ``databases`` key. This will be set to an array of + database objects, each of which will contain the standard database attributes. + + The embedded ``connection`` and ``private_connection`` objects will contain the information + needed to access the database cluster. For multi-node clusters, the ``standby_connection`` and + ``standby_private_connection`` objects will contain the information needed to connect to the + cluster's standby node(s). + + The embedded ``maintenance_window`` object will contain information about any scheduled + maintenance for the database cluster. + + :keyword tag_name: Limits the results to database clusters with a specific + tag.:code:`
`:code:`
`Requires ``tag:read`` scope. Default value is None. + :paramtype tag_name: str + :return: JSON object + :rtype: JSON + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 200 + response == { + "databases": [ + { + "engine": "str", # A slug representing the database engine + used for the cluster. The possible values are: "pg" for PostgreSQL, + "mysql" for MySQL, "redis" for Caching, "mongodb" for MongoDB, "kafka" + for Kafka, "opensearch" for OpenSearch, and "valkey" for Valkey. + Required. Known values are: "pg", "mysql", "redis", "valkey", "mongodb", + "kafka", and "opensearch". + "name": "str", # A unique, human-readable name referring to + a database cluster. Required. + "num_nodes": 0, # The number of nodes in the database + cluster. Required. + "region": "str", # The slug identifier for the region where + the database cluster is located. Required. + "size": "str", # The slug identifier representing the size + of the nodes in the database cluster. Required. + "connection": { + "database": "str", # Optional. The name of the + default database. + "host": "str", # Optional. The FQDN pointing to the + database cluster's current primary node. + "password": "str", # Optional. The randomly + generated password for the default + user.:code:`
`:code:`
`Requires ``database:view_credentials`` + scope. + "port": 0, # Optional. The port on which the + database cluster is listening. + "ssl": bool, # Optional. A boolean value indicating + if the connection should be made over SSL. + "uri": "str", # Optional. A connection string in the + format accepted by the ``psql`` command. This is provided as a + convenience and should be able to be constructed by the other + attributes. + "user": "str" # Optional. The default user for the + database.:code:`
`:code:`
`Requires + ``database:view_credentials`` scope. + }, + "created_at": "2020-02-20 00:00:00", # Optional. A time + value given in ISO8601 combined date and time format that represents when + the database cluster was created. + "db_names": [ + "str" # Optional. An array of strings containing the + names of databases created in the database cluster. + ], + "do_settings": { + "service_cnames": [ + "str" # Optional. An array of custom CNAMEs + for the database cluster. Each CNAME must be a valid RFC 1123 + hostname (e.g., "db.example.com"). Maximum of 16 CNAMEs allowed, + each up to 253 characters. + ] + }, + "id": "str", # Optional. A unique ID that can be used to + identify and reference a database cluster. + "maintenance_window": { + "day": "str", # The day of the week on which to + apply maintenance updates. Required. + "hour": "str", # The hour in UTC at which + maintenance updates will be applied in 24 hour format. Required. + "description": [ + "str" # Optional. A list of strings, each + containing information about a pending maintenance update. + ], + "pending": bool # Optional. A boolean value + indicating whether any maintenance is scheduled to be performed in + the next window. + }, + "metrics_endpoints": [ + { + "host": "str", # Optional. A FQDN pointing + to the database cluster's node(s). + "port": 0 # Optional. The port on which a + service is listening. + } + ], + "private_connection": { + "database": "str", # Optional. The name of the + default database. + "host": "str", # Optional. The FQDN pointing to the + database cluster's current primary node. + "password": "str", # Optional. The randomly + generated password for the default + user.:code:`
`:code:`
`Requires ``database:view_credentials`` + scope. + "port": 0, # Optional. The port on which the + database cluster is listening. + "ssl": bool, # Optional. A boolean value indicating + if the connection should be made over SSL. + "uri": "str", # Optional. A connection string in the + format accepted by the ``psql`` command. This is provided as a + convenience and should be able to be constructed by the other + attributes. + "user": "str" # Optional. The default user for the + database.:code:`
`:code:`
`Requires + ``database:view_credentials`` scope. + }, + "private_network_uuid": "str", # Optional. A string + specifying the UUID of the VPC to which the database cluster will be + assigned. If excluded, the cluster when creating a new database cluster, + it will be assigned to your account's default VPC for the region. + :code:`
`:code:`
`Requires ``vpc:read`` scope. + "project_id": "str", # Optional. The ID of the project that + the database cluster is assigned to. If excluded when creating a new + database cluster, it will be assigned to your default + project.:code:`
`:code:`
`Requires ``project:read`` scope. + "rules": [ + { + "type": "str", # The type of resource that + the firewall rule allows to access the database cluster. + Required. Known values are: "droplet", "k8s", "ip_addr", "tag", + and "app". + "value": "str", # The ID of the specific + resource, the name of a tag applied to a group of resources, or + the IP address that the firewall rule allows to access the + database cluster. Required. + "cluster_uuid": "str", # Optional. A unique + ID for the database cluster to which the rule is applied. + "created_at": "2020-02-20 00:00:00", # + Optional. A time value given in ISO8601 combined date and time + format that represents when the firewall rule was created. + "description": "str", # Optional. A + human-readable description of the rule. + "uuid": "str" # Optional. A unique ID for + the firewall rule itself. + } + ], + "schema_registry_connection": { + "host": "str", # Optional. The FQDN pointing to the + schema registry connection uri. + "password": "str", # Optional. The randomly + generated password for the schema + registry.:code:`
`:code:`
`Requires + ``database:view_credentials`` scope. + "port": 0, # Optional. The port on which the schema + registry is listening. + "ssl": bool, # Optional. A boolean value indicating + if the connection should be made over SSL. + "uri": "str", # Optional. This is provided as a + convenience and should be able to be constructed by the other + attributes. + "user": "str" # Optional. The default user for the + schema registry.:code:`
`:code:`
`Requires + ``database:view_credentials`` scope. + }, + "semantic_version": "str", # Optional. A string representing + the semantic version of the database engine in use for the cluster. + "standby_connection": { + "database": "str", # Optional. The name of the + default database. + "host": "str", # Optional. The FQDN pointing to the + database cluster's current primary node. + "password": "str", # Optional. The randomly + generated password for the default + user.:code:`
`:code:`
`Requires ``database:view_credentials`` + scope. + "port": 0, # Optional. The port on which the + database cluster is listening. + "ssl": bool, # Optional. A boolean value indicating + if the connection should be made over SSL. + "uri": "str", # Optional. A connection string in the + format accepted by the ``psql`` command. This is provided as a + convenience and should be able to be constructed by the other + attributes. + "user": "str" # Optional. The default user for the + database.:code:`
`:code:`
`Requires + ``database:view_credentials`` scope. + }, + "standby_private_connection": { + "database": "str", # Optional. The name of the + default database. + "host": "str", # Optional. The FQDN pointing to the + database cluster's current primary node. + "password": "str", # Optional. The randomly + generated password for the default + user.:code:`
`:code:`
`Requires ``database:view_credentials`` + scope. + "port": 0, # Optional. The port on which the + database cluster is listening. + "ssl": bool, # Optional. A boolean value indicating + if the connection should be made over SSL. + "uri": "str", # Optional. A connection string in the + format accepted by the ``psql`` command. This is provided as a + convenience and should be able to be constructed by the other + attributes. + "user": "str" # Optional. The default user for the + database.:code:`
`:code:`
`Requires + ``database:view_credentials`` scope. + }, + "status": "str", # Optional. A string representing the + current status of the database cluster. Known values are: "creating", + "online", "resizing", "migrating", and "forking". + "storage_size_mib": 0, # Optional. Additional storage added + to the cluster, in MiB. If null, no additional storage is added to the + cluster, beyond what is provided as a base amount from the 'size' and any + previously added additional storage. + "tags": [ + "str" # Optional. An array of tags that have been + applied to the database cluster. :code:`
`:code:`
`Requires + ``tag:read`` scope. + ], + "ui_connection": { + "host": "str", # Optional. The FQDN pointing to the + opensearch cluster's current primary node. + "password": "str", # Optional. The randomly + generated password for the default + user.:code:`
`:code:`
`Requires ``database:view_credentials`` + scope. + "port": 0, # Optional. The port on which the + opensearch dashboard is listening. + "ssl": bool, # Optional. A boolean value indicating + if the connection should be made over SSL. + "uri": "str", # Optional. This is provided as a + convenience and should be able to be constructed by the other + attributes. + "user": "str" # Optional. The default user for the + opensearch dashboard.:code:`
`:code:`
`Requires + ``database:view_credentials`` scope. + }, + "users": [ + { + "name": "str", # The name of a database + user. Required. + "access_cert": "str", # Optional. Access + certificate for TLS client authentication. (Kafka only). + "access_key": "str", # Optional. Access key + for TLS client authentication. (Kafka only). + "mysql_settings": { + "auth_plugin": "str" # A string + specifying the authentication method to be used for + connections to the MySQL user account. The valid values are + ``mysql_native_password`` or ``caching_sha2_password``. If + excluded when creating a new user, the default for the + version of MySQL in use will be used. As of MySQL 8.0, the + default is ``caching_sha2_password``. Required. Known values + are: "mysql_native_password" and "caching_sha2_password". + }, + "password": "str", # Optional. A randomly + generated password for the database user.:code:`
`Requires + ``database:view_credentials`` scope. + "role": "str", # Optional. A string + representing the database user's role. The value will be either + "primary" or "normal". Known values are: "primary" and "normal". + "settings": { + "acl": [ + { + "permission": "str", + # Permission set applied to the ACL. 'consume' allows + for messages to be consumed from the topic. 'produce' + allows for messages to be published to the topic. + 'produceconsume' allows for both 'consume' and + 'produce' permission. 'admin' allows for + 'produceconsume' as well as any operations to + administer the topic (delete, update). Required. + Known values are: "admin", "consume", "produce", and + "produceconsume". + "topic": "str", # A + regex for matching the topic(s) that this ACL should + apply to. Required. + "id": "str" # + Optional. An identifier for the ACL. Will be computed + after the ACL is created/updated. + } + ], + "mongo_user_settings": { + "databases": [ + "str" # Optional. A + list of databases to which the user should have + access. When the database is set to ``admin``"" , the + user will have access to all databases based on the + user's role i.e. a user with the role ``readOnly`` + assigned to the ``admin`` database will have read + access to all databases. + ], + "role": "str" # Optional. + The role to assign to the user with each role mapping to + a MongoDB built-in role. ``readOnly`` maps to a `read + `_ + role. ``readWrite`` maps to a `readWrite + `_ + role. ``dbAdmin`` maps to a `dbAdmin + `_ + role. Known values are: "readOnly", "readWrite", and + "dbAdmin". + }, + "opensearch_acl": [ + { + "index": "str", # + Optional. A regex for matching the indexes that this + ACL should apply to. + "permission": "str" + # Optional. Permission set applied to the ACL. 'read' + allows user to read from the index. 'write' allows + for user to write to the index. 'readwrite' allows + for both 'read' and 'write' permission. + 'deny'(default) restricts user from performing any + operation over an index. 'admin' allows for + 'readwrite' as well as any operations to administer + the index. Known values are: "deny", "admin", "read", + "readwrite", and "write". + } + ], + "pg_allow_replication": bool # + Optional. For Postgres clusters, set to ``true`` for a user + with replication rights. This option is not currently + supported for other database engines. + } + } + ], + "version": "str", # Optional. A string representing the + version of the database engine in use for the cluster. + "version_end_of_availability": "str", # Optional. A + timestamp referring to the date when the particular version will no + longer be available for creating new clusters. If null, the version does + not have an end of availability timeline. + "version_end_of_life": "str" # Optional. A timestamp + referring to the date when the particular version will no longer be + supported. If null, the version does not have an end of life timeline. + } + ] + } + # response body for status code(s): 404 + response == { + "id": "str", # A short identifier corresponding to the HTTP status code + returned. For example, the ID for a response returning a 404 status code would + be "not_found.". Required. + "message": "str", # A message providing additional information about the + error, including details to help resolve it when possible. Required. + "request_id": "str" # Optional. Optionally, some endpoints may include a + request ID that should be provided when reporting bugs or opening support + tickets to help identify the issue. + } + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + 401: cast( + Type[HttpResponseError], + lambda response: ClientAuthenticationError(response=response), + ), + 429: HttpResponseError, + 500: HttpResponseError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[JSON] = kwargs.pop("cls", None) + + _request = build_databases_list_clusters_request( + tag_name=tag_name, + headers=_headers, + params=_params, + ) + _request.url = self._client.format_url(_request.url) + + _stream = False + pipeline_response: PipelineResponse = ( + await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + ) + + response = pipeline_response.http_response + + if response.status_code not in [200, 404]: + if _stream: + await response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore + raise HttpResponseError(response=response) + + response_headers = {} + if response.status_code == 200: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.content: + deserialized = response.json() + else: + deserialized = None + + if response.status_code == 404: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.content: + deserialized = response.json() + else: + deserialized = None + + if cls: + return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore + + return cast(JSON, deserialized) # type: ignore + + @overload + async def create_cluster( + self, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> JSON: + # pylint: disable=line-too-long + """Create a New Database Cluster. + + To create a database cluster, send a POST request to ``/v2/databases``. To see a list of + options for each engine, such as available regions, size slugs, and versions, send a GET + request to the ``/v2/databases/options`` endpoint. The available sizes for the + ``storage_size_mib`` field depends on the cluster's size. To see a list of available sizes, see + `Managed Database Pricing `_. + + The create response returns a JSON object with a key called ``database``. The value of this is + an object that contains the standard attributes associated with a database cluster. The initial + value of the database cluster's ``status`` attribute is ``creating``. When the cluster is ready + to receive traffic, this changes to ``online``. + + The embedded ``connection`` and ``private_connection`` objects contains the information needed + to access the database cluster. For multi-node clusters, the ``standby_connection`` and + ``standby_private_connection`` objects contain the information needed to connect to the + cluster's standby node(s). + + DigitalOcean managed PostgreSQL and MySQL database clusters take automated daily backups. To + create a new database cluster based on a backup of an existing cluster, send a POST request to + ``/v2/databases``. In addition to the standard database cluster attributes, the JSON body must + include a key named ``backup_restore`` with the name of the original database cluster and the + timestamp of the backup to be restored. Creating a database from a backup is the same as + forking a database in the control panel. + Note: Caching cluster creates are no longer supported as of 2025-04-30T00:00:00Z. Backups are + also not supported for Caching or Valkey clusters. + + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: JSON object + :rtype: JSON + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "engine": "str", # A slug representing the database engine used for the + cluster. The possible values are: "pg" for PostgreSQL, "mysql" for MySQL, "redis" + for Caching, "mongodb" for MongoDB, "kafka" for Kafka, "opensearch" for + OpenSearch, and "valkey" for Valkey. Required. Known values are: "pg", "mysql", + "redis", "valkey", "mongodb", "kafka", and "opensearch". + "name": "str", # A unique, human-readable name referring to a database + cluster. Required. + "num_nodes": 0, # The number of nodes in the database cluster. Required. + "region": "str", # The slug identifier for the region where the database + cluster is located. Required. + "size": "str", # The slug identifier representing the size of the nodes in + the database cluster. Required. + "autoscale": { + "storage": { + "enabled": bool, # Whether storage autoscaling is enabled + for the cluster. Required. + "increment_gib": 0, # Optional. The amount of additional + storage to add (in GiB) when autoscaling is triggered. + "threshold_percent": 0 # Optional. The storage usage + threshold percentage that triggers autoscaling. When storage usage + exceeds this percentage, additional storage will be added automatically. + } + }, + "backup_restore": { + "database_name": "str", # The name of an existing database cluster + from which the backup will be restored. Required. + "backup_created_at": "2020-02-20 00:00:00" # Optional. The timestamp + of an existing database cluster backup in ISO8601 combined date and time + format. The most recent backup will be used if excluded. + }, + "connection": { + "database": "str", # Optional. The name of the default database. + "host": "str", # Optional. The FQDN pointing to the database + cluster's current primary node. + "password": "str", # Optional. The randomly generated password for + the default user.:code:`
`:code:`
`Requires + ``database:view_credentials`` scope. + "port": 0, # Optional. The port on which the database cluster is + listening. + "ssl": bool, # Optional. A boolean value indicating if the + connection should be made over SSL. + "uri": "str", # Optional. A connection string in the format accepted + by the ``psql`` command. This is provided as a convenience and should be able + to be constructed by the other attributes. + "user": "str" # Optional. The default user for the + database.:code:`
`:code:`
`Requires ``database:view_credentials`` + scope. + }, + "created_at": "2020-02-20 00:00:00", # Optional. A time value given in + ISO8601 combined date and time format that represents when the database cluster + was created. + "db_names": [ + "str" # Optional. An array of strings containing the names of + databases created in the database cluster. + ], + "do_settings": { + "service_cnames": [ + "str" # Optional. An array of custom CNAMEs for the database + cluster. Each CNAME must be a valid RFC 1123 hostname (e.g., + "db.example.com"). Maximum of 16 CNAMEs allowed, each up to 253 + characters. + ] + }, + "id": "str", # Optional. A unique ID that can be used to identify and + reference a database cluster. + "maintenance_window": { + "day": "str", # The day of the week on which to apply maintenance + updates. Required. + "hour": "str", # The hour in UTC at which maintenance updates will + be applied in 24 hour format. Required. + "description": [ + "str" # Optional. A list of strings, each containing + information about a pending maintenance update. + ], + "pending": bool # Optional. A boolean value indicating whether any + maintenance is scheduled to be performed in the next window. + }, + "metrics_endpoints": [ + { + "host": "str", # Optional. A FQDN pointing to the database + cluster's node(s). + "port": 0 # Optional. The port on which a service is + listening. + } + ], + "private_connection": { + "database": "str", # Optional. The name of the default database. + "host": "str", # Optional. The FQDN pointing to the database + cluster's current primary node. + "password": "str", # Optional. The randomly generated password for + the default user.:code:`
`:code:`
`Requires + ``database:view_credentials`` scope. + "port": 0, # Optional. The port on which the database cluster is + listening. + "ssl": bool, # Optional. A boolean value indicating if the + connection should be made over SSL. + "uri": "str", # Optional. A connection string in the format accepted + by the ``psql`` command. This is provided as a convenience and should be able + to be constructed by the other attributes. + "user": "str" # Optional. The default user for the + database.:code:`
`:code:`
`Requires ``database:view_credentials`` + scope. + }, + "private_network_uuid": "str", # Optional. A string specifying the UUID of + the VPC to which the database cluster will be assigned. If excluded, the cluster + when creating a new database cluster, it will be assigned to your account's + default VPC for the region. :code:`
`:code:`
`Requires ``vpc:read`` scope. + "project_id": "str", # Optional. The ID of the project that the database + cluster is assigned to. If excluded when creating a new database cluster, it will + be assigned to your default project.:code:`
`:code:`
`Requires + ``project:update`` scope. + "rules": [ + { + "type": "str", # The type of resource that the firewall rule + allows to access the database cluster. Required. Known values are: + "droplet", "k8s", "ip_addr", "tag", and "app". + "value": "str", # The ID of the specific resource, the name + of a tag applied to a group of resources, or the IP address that the + firewall rule allows to access the database cluster. Required. + "cluster_uuid": "str", # Optional. A unique ID for the + database cluster to which the rule is applied. + "created_at": "2020-02-20 00:00:00", # Optional. A time + value given in ISO8601 combined date and time format that represents when + the firewall rule was created. + "description": "str", # Optional. A human-readable + description of the rule. + "uuid": "str" # Optional. A unique ID for the firewall rule + itself. + } + ], + "schema_registry_connection": { + "host": "str", # Optional. The FQDN pointing to the schema registry + connection uri. + "password": "str", # Optional. The randomly generated password for + the schema registry.:code:`
`:code:`
`Requires + ``database:view_credentials`` scope. + "port": 0, # Optional. The port on which the schema registry is + listening. + "ssl": bool, # Optional. A boolean value indicating if the + connection should be made over SSL. + "uri": "str", # Optional. This is provided as a convenience and + should be able to be constructed by the other attributes. + "user": "str" # Optional. The default user for the schema + registry.:code:`
`:code:`
`Requires ``database:view_credentials`` + scope. + }, + "semantic_version": "str", # Optional. A string representing the semantic + version of the database engine in use for the cluster. + "standby_connection": { + "database": "str", # Optional. The name of the default database. + "host": "str", # Optional. The FQDN pointing to the database + cluster's current primary node. + "password": "str", # Optional. The randomly generated password for + the default user.:code:`
`:code:`
`Requires + ``database:view_credentials`` scope. + "port": 0, # Optional. The port on which the database cluster is + listening. + "ssl": bool, # Optional. A boolean value indicating if the + connection should be made over SSL. + "uri": "str", # Optional. A connection string in the format accepted + by the ``psql`` command. This is provided as a convenience and should be able + to be constructed by the other attributes. + "user": "str" # Optional. The default user for the + database.:code:`
`:code:`
`Requires ``database:view_credentials`` + scope. + }, + "standby_private_connection": { + "database": "str", # Optional. The name of the default database. + "host": "str", # Optional. The FQDN pointing to the database + cluster's current primary node. + "password": "str", # Optional. The randomly generated password for + the default user.:code:`
`:code:`
`Requires + ``database:view_credentials`` scope. + "port": 0, # Optional. The port on which the database cluster is + listening. + "ssl": bool, # Optional. A boolean value indicating if the + connection should be made over SSL. + "uri": "str", # Optional. A connection string in the format accepted + by the ``psql`` command. This is provided as a convenience and should be able + to be constructed by the other attributes. + "user": "str" # Optional. The default user for the + database.:code:`
`:code:`
`Requires ``database:view_credentials`` + scope. + }, + "status": "str", # Optional. A string representing the current status of the + database cluster. Known values are: "creating", "online", "resizing", + "migrating", and "forking". + "storage_size_mib": 0, # Optional. Additional storage added to the cluster, + in MiB. If null, no additional storage is added to the cluster, beyond what is + provided as a base amount from the 'size' and any previously added additional + storage. + "tags": [ + "str" # Optional. An array of tags (as strings) to apply to the + database cluster. :code:`
`:code:`
`Requires ``tag:create`` scope. + ], + "ui_connection": { + "host": "str", # Optional. The FQDN pointing to the opensearch + cluster's current primary node. + "password": "str", # Optional. The randomly generated password for + the default user.:code:`
`:code:`
`Requires + ``database:view_credentials`` scope. + "port": 0, # Optional. The port on which the opensearch dashboard is + listening. + "ssl": bool, # Optional. A boolean value indicating if the + connection should be made over SSL. + "uri": "str", # Optional. This is provided as a convenience and + should be able to be constructed by the other attributes. + "user": "str" # Optional. The default user for the opensearch + dashboard.:code:`
`:code:`
`Requires ``database:view_credentials`` + scope. + }, + "users": [ + { + "name": "str", # The name of a database user. Required. + "access_cert": "str", # Optional. Access certificate for TLS + client authentication. (Kafka only). + "access_key": "str", # Optional. Access key for TLS client + authentication. (Kafka only). + "mysql_settings": { + "auth_plugin": "str" # A string specifying the + authentication method to be used for connections to the MySQL user + account. The valid values are ``mysql_native_password`` or + ``caching_sha2_password``. If excluded when creating a new user, the + default for the version of MySQL in use will be used. As of MySQL + 8.0, the default is ``caching_sha2_password``. Required. Known values + are: "mysql_native_password" and "caching_sha2_password". + }, + "password": "str", # Optional. A randomly generated password + for the database user.:code:`
`Requires ``database:view_credentials`` + scope. + "role": "str", # Optional. A string representing the + database user's role. The value will be either "primary" or "normal". + Known values are: "primary" and "normal". + "settings": { + "acl": [ + { + "permission": "str", # Permission + set applied to the ACL. 'consume' allows for messages to be + consumed from the topic. 'produce' allows for messages to be + published to the topic. 'produceconsume' allows for both + 'consume' and 'produce' permission. 'admin' allows for + 'produceconsume' as well as any operations to administer the + topic (delete, update). Required. Known values are: "admin", + "consume", "produce", and "produceconsume". + "topic": "str", # A regex for + matching the topic(s) that this ACL should apply to. + Required. + "id": "str" # Optional. An + identifier for the ACL. Will be computed after the ACL is + created/updated. + } + ], + "mongo_user_settings": { + "databases": [ + "str" # Optional. A list of + databases to which the user should have access. When the + database is set to ``admin``"" , the user will have access to + all databases based on the user's role i.e. a user with the + role ``readOnly`` assigned to the ``admin`` database will + have read access to all databases. + ], + "role": "str" # Optional. The role to assign + to the user with each role mapping to a MongoDB built-in role. + ``readOnly`` maps to a `read + `_ + role. ``readWrite`` maps to a `readWrite + `_ + role. ``dbAdmin`` maps to a `dbAdmin + `_ + role. Known values are: "readOnly", "readWrite", and "dbAdmin". + }, + "opensearch_acl": [ + { + "index": "str", # Optional. A regex + for matching the indexes that this ACL should apply to. + "permission": "str" # Optional. + Permission set applied to the ACL. 'read' allows user to read + from the index. 'write' allows for user to write to the + index. 'readwrite' allows for both 'read' and 'write' + permission. 'deny'(default) restricts user from performing + any operation over an index. 'admin' allows for 'readwrite' + as well as any operations to administer the index. Known + values are: "deny", "admin", "read", "readwrite", and + "write". + } + ], + "pg_allow_replication": bool # Optional. For + Postgres clusters, set to ``true`` for a user with replication + rights. This option is not currently supported for other database + engines. + } + } + ], + "version": "str", # Optional. A string representing the version of the + database engine in use for the cluster. + "version_end_of_availability": "str", # Optional. A timestamp referring to + the date when the particular version will no longer be available for creating new + clusters. If null, the version does not have an end of availability timeline. + "version_end_of_life": "str" # Optional. A timestamp referring to the date + when the particular version will no longer be supported. If null, the version + does not have an end of life timeline. + } + + # response body for status code(s): 201 + response == { + "database": { + "engine": "str", # A slug representing the database engine used for + the cluster. The possible values are: "pg" for PostgreSQL, "mysql" for MySQL, + "redis" for Caching, "mongodb" for MongoDB, "kafka" for Kafka, "opensearch" + for OpenSearch, and "valkey" for Valkey. Required. Known values are: "pg", + "mysql", "redis", "valkey", "mongodb", "kafka", and "opensearch". + "name": "str", # A unique, human-readable name referring to a + database cluster. Required. + "num_nodes": 0, # The number of nodes in the database cluster. + Required. + "region": "str", # The slug identifier for the region where the + database cluster is located. Required. + "size": "str", # The slug identifier representing the size of the + nodes in the database cluster. Required. + "connection": { + "database": "str", # Optional. The name of the default + database. + "host": "str", # Optional. The FQDN pointing to the database + cluster's current primary node. + "password": "str", # Optional. The randomly generated + password for the default user.:code:`
`:code:`
`Requires + ``database:view_credentials`` scope. + "port": 0, # Optional. The port on which the database + cluster is listening. + "ssl": bool, # Optional. A boolean value indicating if the + connection should be made over SSL. + "uri": "str", # Optional. A connection string in the format + accepted by the ``psql`` command. This is provided as a convenience and + should be able to be constructed by the other attributes. + "user": "str" # Optional. The default user for the + database.:code:`
`:code:`
`Requires ``database:view_credentials`` + scope. + }, + "created_at": "2020-02-20 00:00:00", # Optional. A time value given + in ISO8601 combined date and time format that represents when the database + cluster was created. + "db_names": [ + "str" # Optional. An array of strings containing the names + of databases created in the database cluster. + ], + "do_settings": { + "service_cnames": [ + "str" # Optional. An array of custom CNAMEs for the + database cluster. Each CNAME must be a valid RFC 1123 hostname (e.g., + "db.example.com"). Maximum of 16 CNAMEs allowed, each up to 253 + characters. + ] + }, + "id": "str", # Optional. A unique ID that can be used to identify + and reference a database cluster. + "maintenance_window": { + "day": "str", # The day of the week on which to apply + maintenance updates. Required. + "hour": "str", # The hour in UTC at which maintenance + updates will be applied in 24 hour format. Required. + "description": [ + "str" # Optional. A list of strings, each containing + information about a pending maintenance update. + ], + "pending": bool # Optional. A boolean value indicating + whether any maintenance is scheduled to be performed in the next window. + }, + "metrics_endpoints": [ { - "end_of_availability": "str", # Optional. A - timestamp referring to the date when the particular version will no - longer be available for creating new clusters. If null, the version - does not have an end of availability timeline. - "end_of_life": "str", # Optional. A timestamp - referring to the date when the particular version will no longer be - supported. If null, the version does not have an end of life - timeline. - "version": "str" # Optional. The engine version. + "host": "str", # Optional. A FQDN pointing to the + database cluster's node(s). + "port": 0 # Optional. The port on which a service is + listening. } ], - "opensearch": [ + "private_connection": { + "database": "str", # Optional. The name of the default + database. + "host": "str", # Optional. The FQDN pointing to the database + cluster's current primary node. + "password": "str", # Optional. The randomly generated + password for the default user.:code:`
`:code:`
`Requires + ``database:view_credentials`` scope. + "port": 0, # Optional. The port on which the database + cluster is listening. + "ssl": bool, # Optional. A boolean value indicating if the + connection should be made over SSL. + "uri": "str", # Optional. A connection string in the format + accepted by the ``psql`` command. This is provided as a convenience and + should be able to be constructed by the other attributes. + "user": "str" # Optional. The default user for the + database.:code:`
`:code:`
`Requires ``database:view_credentials`` + scope. + }, + "private_network_uuid": "str", # Optional. A string specifying the + UUID of the VPC to which the database cluster will be assigned. If excluded, + the cluster when creating a new database cluster, it will be assigned to your + account's default VPC for the region. :code:`
`:code:`
`Requires + ``vpc:read`` scope. + "project_id": "str", # Optional. The ID of the project that the + database cluster is assigned to. If excluded when creating a new database + cluster, it will be assigned to your default + project.:code:`
`:code:`
`Requires ``project:read`` scope. + "rules": [ { - "end_of_availability": "str", # Optional. A - timestamp referring to the date when the particular version will no - longer be available for creating new clusters. If null, the version - does not have an end of availability timeline. - "end_of_life": "str", # Optional. A timestamp - referring to the date when the particular version will no longer be - supported. If null, the version does not have an end of life - timeline. - "version": "str" # Optional. The engine version. + "type": "str", # The type of resource that the + firewall rule allows to access the database cluster. Required. Known + values are: "droplet", "k8s", "ip_addr", "tag", and "app". + "value": "str", # The ID of the specific resource, + the name of a tag applied to a group of resources, or the IP address + that the firewall rule allows to access the database cluster. + Required. + "cluster_uuid": "str", # Optional. A unique ID for + the database cluster to which the rule is applied. + "created_at": "2020-02-20 00:00:00", # Optional. A + time value given in ISO8601 combined date and time format that + represents when the firewall rule was created. + "description": "str", # Optional. A human-readable + description of the rule. + "uuid": "str" # Optional. A unique ID for the + firewall rule itself. } ], - "pg": [ - { - "end_of_availability": "str", # Optional. A - timestamp referring to the date when the particular version will no - longer be available for creating new clusters. If null, the version - does not have an end of availability timeline. - "end_of_life": "str", # Optional. A timestamp - referring to the date when the particular version will no longer be - supported. If null, the version does not have an end of life - timeline. - "version": "str" # Optional. The engine version. - } + "schema_registry_connection": { + "host": "str", # Optional. The FQDN pointing to the schema + registry connection uri. + "password": "str", # Optional. The randomly generated + password for the schema registry.:code:`
`:code:`
`Requires + ``database:view_credentials`` scope. + "port": 0, # Optional. The port on which the schema registry + is listening. + "ssl": bool, # Optional. A boolean value indicating if the + connection should be made over SSL. + "uri": "str", # Optional. This is provided as a convenience + and should be able to be constructed by the other attributes. + "user": "str" # Optional. The default user for the schema + registry.:code:`
`:code:`
`Requires ``database:view_credentials`` + scope. + }, + "semantic_version": "str", # Optional. A string representing the + semantic version of the database engine in use for the cluster. + "standby_connection": { + "database": "str", # Optional. The name of the default + database. + "host": "str", # Optional. The FQDN pointing to the database + cluster's current primary node. + "password": "str", # Optional. The randomly generated + password for the default user.:code:`
`:code:`
`Requires + ``database:view_credentials`` scope. + "port": 0, # Optional. The port on which the database + cluster is listening. + "ssl": bool, # Optional. A boolean value indicating if the + connection should be made over SSL. + "uri": "str", # Optional. A connection string in the format + accepted by the ``psql`` command. This is provided as a convenience and + should be able to be constructed by the other attributes. + "user": "str" # Optional. The default user for the + database.:code:`
`:code:`
`Requires ``database:view_credentials`` + scope. + }, + "standby_private_connection": { + "database": "str", # Optional. The name of the default + database. + "host": "str", # Optional. The FQDN pointing to the database + cluster's current primary node. + "password": "str", # Optional. The randomly generated + password for the default user.:code:`
`:code:`
`Requires + ``database:view_credentials`` scope. + "port": 0, # Optional. The port on which the database + cluster is listening. + "ssl": bool, # Optional. A boolean value indicating if the + connection should be made over SSL. + "uri": "str", # Optional. A connection string in the format + accepted by the ``psql`` command. This is provided as a convenience and + should be able to be constructed by the other attributes. + "user": "str" # Optional. The default user for the + database.:code:`
`:code:`
`Requires ``database:view_credentials`` + scope. + }, + "status": "str", # Optional. A string representing the current + status of the database cluster. Known values are: "creating", "online", + "resizing", "migrating", and "forking". + "storage_size_mib": 0, # Optional. Additional storage added to the + cluster, in MiB. If null, no additional storage is added to the cluster, + beyond what is provided as a base amount from the 'size' and any previously + added additional storage. + "tags": [ + "str" # Optional. An array of tags that have been applied to + the database cluster. :code:`
`:code:`
`Requires ``tag:read`` + scope. ], - "redis": [ + "ui_connection": { + "host": "str", # Optional. The FQDN pointing to the + opensearch cluster's current primary node. + "password": "str", # Optional. The randomly generated + password for the default user.:code:`
`:code:`
`Requires + ``database:view_credentials`` scope. + "port": 0, # Optional. The port on which the opensearch + dashboard is listening. + "ssl": bool, # Optional. A boolean value indicating if the + connection should be made over SSL. + "uri": "str", # Optional. This is provided as a convenience + and should be able to be constructed by the other attributes. + "user": "str" # Optional. The default user for the + opensearch dashboard.:code:`
`:code:`
`Requires + ``database:view_credentials`` scope. + }, + "users": [ { - "end_of_availability": "str", # Optional. A - timestamp referring to the date when the particular version will no - longer be available for creating new clusters. If null, the version - does not have an end of availability timeline. - "end_of_life": "str", # Optional. A timestamp - referring to the date when the particular version will no longer be - supported. If null, the version does not have an end of life - timeline. - "version": "str" # Optional. The engine version. + "name": "str", # The name of a database user. + Required. + "access_cert": "str", # Optional. Access certificate + for TLS client authentication. (Kafka only). + "access_key": "str", # Optional. Access key for TLS + client authentication. (Kafka only). + "mysql_settings": { + "auth_plugin": "str" # A string specifying + the authentication method to be used for connections to the MySQL + user account. The valid values are ``mysql_native_password`` or + ``caching_sha2_password``. If excluded when creating a new user, + the default for the version of MySQL in use will be used. As of + MySQL 8.0, the default is ``caching_sha2_password``. Required. + Known values are: "mysql_native_password" and + "caching_sha2_password". + }, + "password": "str", # Optional. A randomly generated + password for the database user.:code:`
`Requires + ``database:view_credentials`` scope. + "role": "str", # Optional. A string representing the + database user's role. The value will be either "primary" or "normal". + Known values are: "primary" and "normal". + "settings": { + "acl": [ + { + "permission": "str", # + Permission set applied to the ACL. 'consume' allows for + messages to be consumed from the topic. 'produce' allows + for messages to be published to the topic. + 'produceconsume' allows for both 'consume' and 'produce' + permission. 'admin' allows for 'produceconsume' as well + as any operations to administer the topic (delete, + update). Required. Known values are: "admin", "consume", + "produce", and "produceconsume". + "topic": "str", # A regex + for matching the topic(s) that this ACL should apply to. + Required. + "id": "str" # Optional. An + identifier for the ACL. Will be computed after the ACL is + created/updated. + } + ], + "mongo_user_settings": { + "databases": [ + "str" # Optional. A list of + databases to which the user should have access. When the + database is set to ``admin``"" , the user will have + access to all databases based on the user's role i.e. a + user with the role ``readOnly`` assigned to the ``admin`` + database will have read access to all databases. + ], + "role": "str" # Optional. The role + to assign to the user with each role mapping to a MongoDB + built-in role. ``readOnly`` maps to a `read + `_ + role. ``readWrite`` maps to a `readWrite + `_ + role. ``dbAdmin`` maps to a `dbAdmin + `_ + role. Known values are: "readOnly", "readWrite", and + "dbAdmin". + }, + "opensearch_acl": [ + { + "index": "str", # Optional. + A regex for matching the indexes that this ACL should + apply to. + "permission": "str" # + Optional. Permission set applied to the ACL. 'read' + allows user to read from the index. 'write' allows for + user to write to the index. 'readwrite' allows for both + 'read' and 'write' permission. 'deny'(default) restricts + user from performing any operation over an index. 'admin' + allows for 'readwrite' as well as any operations to + administer the index. Known values are: "deny", "admin", + "read", "readwrite", and "write". + } + ], + "pg_allow_replication": bool # Optional. For + Postgres clusters, set to ``true`` for a user with replication + rights. This option is not currently supported for other database + engines. + } } ], - "valkey": [ - { - "end_of_availability": "str", # Optional. A - timestamp referring to the date when the particular version will no - longer be available for creating new clusters. If null, the version - does not have an end of availability timeline. - "end_of_life": "str", # Optional. A timestamp - referring to the date when the particular version will no longer be - supported. If null, the version does not have an end of life - timeline. - "version": "str" # Optional. The engine version. - } - ] + "version": "str", # Optional. A string representing the version of + the database engine in use for the cluster. + "version_end_of_availability": "str", # Optional. A timestamp + referring to the date when the particular version will no longer be available + for creating new clusters. If null, the version does not have an end of + availability timeline. + "version_end_of_life": "str" # Optional. A timestamp referring to + the date when the particular version will no longer be supported. If null, + the version does not have an end of life timeline. } } # response body for status code(s): 404 @@ -104752,109 +111271,44 @@ async def list_options(self, **kwargs: Any) -> JSON: tickets to help identify the issue. } """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - 401: cast( - Type[HttpResponseError], - lambda response: ClientAuthenticationError(response=response), - ), - 429: HttpResponseError, - 500: HttpResponseError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = kwargs.pop("headers", {}) or {} - _params = kwargs.pop("params", {}) or {} - - cls: ClsType[JSON] = kwargs.pop("cls", None) - - _request = build_databases_list_options_request( - headers=_headers, - params=_params, - ) - _request.url = self._client.format_url(_request.url) - - _stream = False - pipeline_response: PipelineResponse = ( - await self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - ) - - response = pipeline_response.http_response - - if response.status_code not in [200, 404]: - if _stream: - await response.read() # Load the body in memory and close the socket - map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore - raise HttpResponseError(response=response) - - response_headers = {} - if response.status_code == 200: - response_headers["ratelimit-limit"] = self._deserialize( - "int", response.headers.get("ratelimit-limit") - ) - response_headers["ratelimit-remaining"] = self._deserialize( - "int", response.headers.get("ratelimit-remaining") - ) - response_headers["ratelimit-reset"] = self._deserialize( - "int", response.headers.get("ratelimit-reset") - ) - - if response.content: - deserialized = response.json() - else: - deserialized = None - - if response.status_code == 404: - response_headers["ratelimit-limit"] = self._deserialize( - "int", response.headers.get("ratelimit-limit") - ) - response_headers["ratelimit-remaining"] = self._deserialize( - "int", response.headers.get("ratelimit-remaining") - ) - response_headers["ratelimit-reset"] = self._deserialize( - "int", response.headers.get("ratelimit-reset") - ) - - if response.content: - deserialized = response.json() - else: - deserialized = None - - if cls: - return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore - - return cast(JSON, deserialized) # type: ignore - @distributed_trace_async - async def list_clusters( - self, *, tag_name: Optional[str] = None, **kwargs: Any + @overload + async def create_cluster( + self, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any ) -> JSON: # pylint: disable=line-too-long - """List All Database Clusters. + """Create a New Database Cluster. - To list all of the database clusters available on your account, send a GET request to - ``/v2/databases``. To limit the results to database clusters with a specific tag, include the - ``tag_name`` query parameter set to the name of the tag. For example, - ``/v2/databases?tag_name=$TAG_NAME``. + To create a database cluster, send a POST request to ``/v2/databases``. To see a list of + options for each engine, such as available regions, size slugs, and versions, send a GET + request to the ``/v2/databases/options`` endpoint. The available sizes for the + ``storage_size_mib`` field depends on the cluster's size. To see a list of available sizes, see + `Managed Database Pricing `_. - The result will be a JSON object with a ``databases`` key. This will be set to an array of - database objects, each of which will contain the standard database attributes. + The create response returns a JSON object with a key called ``database``. The value of this is + an object that contains the standard attributes associated with a database cluster. The initial + value of the database cluster's ``status`` attribute is ``creating``. When the cluster is ready + to receive traffic, this changes to ``online``. - The embedded ``connection`` and ``private_connection`` objects will contain the information - needed to access the database cluster. For multi-node clusters, the ``standby_connection`` and - ``standby_private_connection`` objects will contain the information needed to connect to the + The embedded ``connection`` and ``private_connection`` objects contains the information needed + to access the database cluster. For multi-node clusters, the ``standby_connection`` and + ``standby_private_connection`` objects contain the information needed to connect to the cluster's standby node(s). - The embedded ``maintenance_window`` object will contain information about any scheduled - maintenance for the database cluster. + DigitalOcean managed PostgreSQL and MySQL database clusters take automated daily backups. To + create a new database cluster based on a backup of an existing cluster, send a POST request to + ``/v2/databases``. In addition to the standard database cluster attributes, the JSON body must + include a key named ``backup_restore`` with the name of the original database cluster and the + timestamp of the backup to be restored. Creating a database from a backup is the same as + forking a database in the control panel. + Note: Caching cluster creates are no longer supported as of 2025-04-30T00:00:00Z. Backups are + also not supported for Caching or Valkey clusters. - :keyword tag_name: Limits the results to database clusters with a specific - tag.:code:`
`:code:`
`Requires ``tag:read`` scope. Default value is None. - :paramtype tag_name: str + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str :return: JSON object :rtype: JSON :raises ~azure.core.exceptions.HttpResponseError: @@ -104862,327 +111316,307 @@ async def list_clusters( Example: .. code-block:: python - # response body for status code(s): 200 + # response body for status code(s): 201 response == { - "databases": [ - { - "engine": "str", # A slug representing the database engine - used for the cluster. The possible values are: "pg" for PostgreSQL, - "mysql" for MySQL, "redis" for Caching, "mongodb" for MongoDB, "kafka" - for Kafka, "opensearch" for OpenSearch, and "valkey" for Valkey. - Required. Known values are: "pg", "mysql", "redis", "valkey", "mongodb", - "kafka", and "opensearch". - "name": "str", # A unique, human-readable name referring to - a database cluster. Required. - "num_nodes": 0, # The number of nodes in the database - cluster. Required. - "region": "str", # The slug identifier for the region where - the database cluster is located. Required. - "size": "str", # The slug identifier representing the size - of the nodes in the database cluster. Required. - "connection": { - "database": "str", # Optional. The name of the - default database. - "host": "str", # Optional. The FQDN pointing to the - database cluster's current primary node. - "password": "str", # Optional. The randomly - generated password for the default - user.:code:`
`:code:`
`Requires ``database:view_credentials`` - scope. - "port": 0, # Optional. The port on which the - database cluster is listening. - "ssl": bool, # Optional. A boolean value indicating - if the connection should be made over SSL. - "uri": "str", # Optional. A connection string in the - format accepted by the ``psql`` command. This is provided as a - convenience and should be able to be constructed by the other - attributes. - "user": "str" # Optional. The default user for the - database.:code:`
`:code:`
`Requires - ``database:view_credentials`` scope. - }, - "created_at": "2020-02-20 00:00:00", # Optional. A time - value given in ISO8601 combined date and time format that represents when - the database cluster was created. - "db_names": [ - "str" # Optional. An array of strings containing the - names of databases created in the database cluster. - ], - "do_settings": { - "service_cnames": [ - "str" # Optional. An array of custom CNAMEs - for the database cluster. Each CNAME must be a valid RFC 1123 - hostname (e.g., "db.example.com"). Maximum of 16 CNAMEs allowed, - each up to 253 characters. - ] - }, - "id": "str", # Optional. A unique ID that can be used to - identify and reference a database cluster. - "maintenance_window": { - "day": "str", # The day of the week on which to - apply maintenance updates. Required. - "hour": "str", # The hour in UTC at which - maintenance updates will be applied in 24 hour format. Required. - "description": [ - "str" # Optional. A list of strings, each - containing information about a pending maintenance update. - ], - "pending": bool # Optional. A boolean value - indicating whether any maintenance is scheduled to be performed in - the next window. - }, - "metrics_endpoints": [ - { - "host": "str", # Optional. A FQDN pointing - to the database cluster's node(s). - "port": 0 # Optional. The port on which a - service is listening. - } - ], - "private_connection": { - "database": "str", # Optional. The name of the - default database. - "host": "str", # Optional. The FQDN pointing to the - database cluster's current primary node. - "password": "str", # Optional. The randomly - generated password for the default - user.:code:`
`:code:`
`Requires ``database:view_credentials`` - scope. - "port": 0, # Optional. The port on which the - database cluster is listening. - "ssl": bool, # Optional. A boolean value indicating - if the connection should be made over SSL. - "uri": "str", # Optional. A connection string in the - format accepted by the ``psql`` command. This is provided as a - convenience and should be able to be constructed by the other - attributes. - "user": "str" # Optional. The default user for the - database.:code:`
`:code:`
`Requires - ``database:view_credentials`` scope. - }, - "private_network_uuid": "str", # Optional. A string - specifying the UUID of the VPC to which the database cluster will be - assigned. If excluded, the cluster when creating a new database cluster, - it will be assigned to your account's default VPC for the region. - :code:`
`:code:`
`Requires ``vpc:read`` scope. - "project_id": "str", # Optional. The ID of the project that - the database cluster is assigned to. If excluded when creating a new - database cluster, it will be assigned to your default - project.:code:`
`:code:`
`Requires ``project:read`` scope. - "rules": [ - { - "type": "str", # The type of resource that - the firewall rule allows to access the database cluster. - Required. Known values are: "droplet", "k8s", "ip_addr", "tag", - and "app". - "value": "str", # The ID of the specific - resource, the name of a tag applied to a group of resources, or - the IP address that the firewall rule allows to access the - database cluster. Required. - "cluster_uuid": "str", # Optional. A unique - ID for the database cluster to which the rule is applied. - "created_at": "2020-02-20 00:00:00", # - Optional. A time value given in ISO8601 combined date and time - format that represents when the firewall rule was created. - "description": "str", # Optional. A - human-readable description of the rule. - "uuid": "str" # Optional. A unique ID for - the firewall rule itself. - } - ], - "schema_registry_connection": { - "host": "str", # Optional. The FQDN pointing to the - schema registry connection uri. - "password": "str", # Optional. The randomly - generated password for the schema - registry.:code:`
`:code:`
`Requires - ``database:view_credentials`` scope. - "port": 0, # Optional. The port on which the schema - registry is listening. - "ssl": bool, # Optional. A boolean value indicating - if the connection should be made over SSL. - "uri": "str", # Optional. This is provided as a - convenience and should be able to be constructed by the other - attributes. - "user": "str" # Optional. The default user for the - schema registry.:code:`
`:code:`
`Requires - ``database:view_credentials`` scope. - }, - "semantic_version": "str", # Optional. A string representing - the semantic version of the database engine in use for the cluster. - "standby_connection": { - "database": "str", # Optional. The name of the - default database. - "host": "str", # Optional. The FQDN pointing to the - database cluster's current primary node. - "password": "str", # Optional. The randomly - generated password for the default - user.:code:`
`:code:`
`Requires ``database:view_credentials`` - scope. - "port": 0, # Optional. The port on which the - database cluster is listening. - "ssl": bool, # Optional. A boolean value indicating - if the connection should be made over SSL. - "uri": "str", # Optional. A connection string in the - format accepted by the ``psql`` command. This is provided as a - convenience and should be able to be constructed by the other - attributes. - "user": "str" # Optional. The default user for the - database.:code:`
`:code:`
`Requires - ``database:view_credentials`` scope. - }, - "standby_private_connection": { - "database": "str", # Optional. The name of the - default database. - "host": "str", # Optional. The FQDN pointing to the - database cluster's current primary node. - "password": "str", # Optional. The randomly - generated password for the default - user.:code:`
`:code:`
`Requires ``database:view_credentials`` - scope. - "port": 0, # Optional. The port on which the - database cluster is listening. - "ssl": bool, # Optional. A boolean value indicating - if the connection should be made over SSL. - "uri": "str", # Optional. A connection string in the - format accepted by the ``psql`` command. This is provided as a - convenience and should be able to be constructed by the other - attributes. - "user": "str" # Optional. The default user for the - database.:code:`
`:code:`
`Requires - ``database:view_credentials`` scope. - }, - "status": "str", # Optional. A string representing the - current status of the database cluster. Known values are: "creating", - "online", "resizing", "migrating", and "forking". - "storage_size_mib": 0, # Optional. Additional storage added - to the cluster, in MiB. If null, no additional storage is added to the - cluster, beyond what is provided as a base amount from the 'size' and any - previously added additional storage. - "tags": [ - "str" # Optional. An array of tags that have been - applied to the database cluster. :code:`
`:code:`
`Requires - ``tag:read`` scope. + "database": { + "engine": "str", # A slug representing the database engine used for + the cluster. The possible values are: "pg" for PostgreSQL, "mysql" for MySQL, + "redis" for Caching, "mongodb" for MongoDB, "kafka" for Kafka, "opensearch" + for OpenSearch, and "valkey" for Valkey. Required. Known values are: "pg", + "mysql", "redis", "valkey", "mongodb", "kafka", and "opensearch". + "name": "str", # A unique, human-readable name referring to a + database cluster. Required. + "num_nodes": 0, # The number of nodes in the database cluster. + Required. + "region": "str", # The slug identifier for the region where the + database cluster is located. Required. + "size": "str", # The slug identifier representing the size of the + nodes in the database cluster. Required. + "connection": { + "database": "str", # Optional. The name of the default + database. + "host": "str", # Optional. The FQDN pointing to the database + cluster's current primary node. + "password": "str", # Optional. The randomly generated + password for the default user.:code:`
`:code:`
`Requires + ``database:view_credentials`` scope. + "port": 0, # Optional. The port on which the database + cluster is listening. + "ssl": bool, # Optional. A boolean value indicating if the + connection should be made over SSL. + "uri": "str", # Optional. A connection string in the format + accepted by the ``psql`` command. This is provided as a convenience and + should be able to be constructed by the other attributes. + "user": "str" # Optional. The default user for the + database.:code:`
`:code:`
`Requires ``database:view_credentials`` + scope. + }, + "created_at": "2020-02-20 00:00:00", # Optional. A time value given + in ISO8601 combined date and time format that represents when the database + cluster was created. + "db_names": [ + "str" # Optional. An array of strings containing the names + of databases created in the database cluster. + ], + "do_settings": { + "service_cnames": [ + "str" # Optional. An array of custom CNAMEs for the + database cluster. Each CNAME must be a valid RFC 1123 hostname (e.g., + "db.example.com"). Maximum of 16 CNAMEs allowed, each up to 253 + characters. + ] + }, + "id": "str", # Optional. A unique ID that can be used to identify + and reference a database cluster. + "maintenance_window": { + "day": "str", # The day of the week on which to apply + maintenance updates. Required. + "hour": "str", # The hour in UTC at which maintenance + updates will be applied in 24 hour format. Required. + "description": [ + "str" # Optional. A list of strings, each containing + information about a pending maintenance update. ], - "ui_connection": { - "host": "str", # Optional. The FQDN pointing to the - opensearch cluster's current primary node. - "password": "str", # Optional. The randomly - generated password for the default - user.:code:`
`:code:`
`Requires ``database:view_credentials`` - scope. - "port": 0, # Optional. The port on which the - opensearch dashboard is listening. - "ssl": bool, # Optional. A boolean value indicating - if the connection should be made over SSL. - "uri": "str", # Optional. This is provided as a - convenience and should be able to be constructed by the other - attributes. - "user": "str" # Optional. The default user for the - opensearch dashboard.:code:`
`:code:`
`Requires + "pending": bool # Optional. A boolean value indicating + whether any maintenance is scheduled to be performed in the next window. + }, + "metrics_endpoints": [ + { + "host": "str", # Optional. A FQDN pointing to the + database cluster's node(s). + "port": 0 # Optional. The port on which a service is + listening. + } + ], + "private_connection": { + "database": "str", # Optional. The name of the default + database. + "host": "str", # Optional. The FQDN pointing to the database + cluster's current primary node. + "password": "str", # Optional. The randomly generated + password for the default user.:code:`
`:code:`
`Requires + ``database:view_credentials`` scope. + "port": 0, # Optional. The port on which the database + cluster is listening. + "ssl": bool, # Optional. A boolean value indicating if the + connection should be made over SSL. + "uri": "str", # Optional. A connection string in the format + accepted by the ``psql`` command. This is provided as a convenience and + should be able to be constructed by the other attributes. + "user": "str" # Optional. The default user for the + database.:code:`
`:code:`
`Requires ``database:view_credentials`` + scope. + }, + "private_network_uuid": "str", # Optional. A string specifying the + UUID of the VPC to which the database cluster will be assigned. If excluded, + the cluster when creating a new database cluster, it will be assigned to your + account's default VPC for the region. :code:`
`:code:`
`Requires + ``vpc:read`` scope. + "project_id": "str", # Optional. The ID of the project that the + database cluster is assigned to. If excluded when creating a new database + cluster, it will be assigned to your default + project.:code:`
`:code:`
`Requires ``project:read`` scope. + "rules": [ + { + "type": "str", # The type of resource that the + firewall rule allows to access the database cluster. Required. Known + values are: "droplet", "k8s", "ip_addr", "tag", and "app". + "value": "str", # The ID of the specific resource, + the name of a tag applied to a group of resources, or the IP address + that the firewall rule allows to access the database cluster. + Required. + "cluster_uuid": "str", # Optional. A unique ID for + the database cluster to which the rule is applied. + "created_at": "2020-02-20 00:00:00", # Optional. A + time value given in ISO8601 combined date and time format that + represents when the firewall rule was created. + "description": "str", # Optional. A human-readable + description of the rule. + "uuid": "str" # Optional. A unique ID for the + firewall rule itself. + } + ], + "schema_registry_connection": { + "host": "str", # Optional. The FQDN pointing to the schema + registry connection uri. + "password": "str", # Optional. The randomly generated + password for the schema registry.:code:`
`:code:`
`Requires + ``database:view_credentials`` scope. + "port": 0, # Optional. The port on which the schema registry + is listening. + "ssl": bool, # Optional. A boolean value indicating if the + connection should be made over SSL. + "uri": "str", # Optional. This is provided as a convenience + and should be able to be constructed by the other attributes. + "user": "str" # Optional. The default user for the schema + registry.:code:`
`:code:`
`Requires ``database:view_credentials`` + scope. + }, + "semantic_version": "str", # Optional. A string representing the + semantic version of the database engine in use for the cluster. + "standby_connection": { + "database": "str", # Optional. The name of the default + database. + "host": "str", # Optional. The FQDN pointing to the database + cluster's current primary node. + "password": "str", # Optional. The randomly generated + password for the default user.:code:`
`:code:`
`Requires + ``database:view_credentials`` scope. + "port": 0, # Optional. The port on which the database + cluster is listening. + "ssl": bool, # Optional. A boolean value indicating if the + connection should be made over SSL. + "uri": "str", # Optional. A connection string in the format + accepted by the ``psql`` command. This is provided as a convenience and + should be able to be constructed by the other attributes. + "user": "str" # Optional. The default user for the + database.:code:`
`:code:`
`Requires ``database:view_credentials`` + scope. + }, + "standby_private_connection": { + "database": "str", # Optional. The name of the default + database. + "host": "str", # Optional. The FQDN pointing to the database + cluster's current primary node. + "password": "str", # Optional. The randomly generated + password for the default user.:code:`
`:code:`
`Requires + ``database:view_credentials`` scope. + "port": 0, # Optional. The port on which the database + cluster is listening. + "ssl": bool, # Optional. A boolean value indicating if the + connection should be made over SSL. + "uri": "str", # Optional. A connection string in the format + accepted by the ``psql`` command. This is provided as a convenience and + should be able to be constructed by the other attributes. + "user": "str" # Optional. The default user for the + database.:code:`
`:code:`
`Requires ``database:view_credentials`` + scope. + }, + "status": "str", # Optional. A string representing the current + status of the database cluster. Known values are: "creating", "online", + "resizing", "migrating", and "forking". + "storage_size_mib": 0, # Optional. Additional storage added to the + cluster, in MiB. If null, no additional storage is added to the cluster, + beyond what is provided as a base amount from the 'size' and any previously + added additional storage. + "tags": [ + "str" # Optional. An array of tags that have been applied to + the database cluster. :code:`
`:code:`
`Requires ``tag:read`` + scope. + ], + "ui_connection": { + "host": "str", # Optional. The FQDN pointing to the + opensearch cluster's current primary node. + "password": "str", # Optional. The randomly generated + password for the default user.:code:`
`:code:`
`Requires + ``database:view_credentials`` scope. + "port": 0, # Optional. The port on which the opensearch + dashboard is listening. + "ssl": bool, # Optional. A boolean value indicating if the + connection should be made over SSL. + "uri": "str", # Optional. This is provided as a convenience + and should be able to be constructed by the other attributes. + "user": "str" # Optional. The default user for the + opensearch dashboard.:code:`
`:code:`
`Requires + ``database:view_credentials`` scope. + }, + "users": [ + { + "name": "str", # The name of a database user. + Required. + "access_cert": "str", # Optional. Access certificate + for TLS client authentication. (Kafka only). + "access_key": "str", # Optional. Access key for TLS + client authentication. (Kafka only). + "mysql_settings": { + "auth_plugin": "str" # A string specifying + the authentication method to be used for connections to the MySQL + user account. The valid values are ``mysql_native_password`` or + ``caching_sha2_password``. If excluded when creating a new user, + the default for the version of MySQL in use will be used. As of + MySQL 8.0, the default is ``caching_sha2_password``. Required. + Known values are: "mysql_native_password" and + "caching_sha2_password". + }, + "password": "str", # Optional. A randomly generated + password for the database user.:code:`
`Requires ``database:view_credentials`` scope. - }, - "users": [ - { - "name": "str", # The name of a database - user. Required. - "access_cert": "str", # Optional. Access - certificate for TLS client authentication. (Kafka only). - "access_key": "str", # Optional. Access key - for TLS client authentication. (Kafka only). - "mysql_settings": { - "auth_plugin": "str" # A string - specifying the authentication method to be used for - connections to the MySQL user account. The valid values are - ``mysql_native_password`` or ``caching_sha2_password``. If - excluded when creating a new user, the default for the - version of MySQL in use will be used. As of MySQL 8.0, the - default is ``caching_sha2_password``. Required. Known values - are: "mysql_native_password" and "caching_sha2_password". - }, - "password": "str", # Optional. A randomly - generated password for the database user.:code:`
`Requires - ``database:view_credentials`` scope. - "role": "str", # Optional. A string - representing the database user's role. The value will be either - "primary" or "normal". Known values are: "primary" and "normal". - "settings": { - "acl": [ - { - "permission": "str", - # Permission set applied to the ACL. 'consume' allows - for messages to be consumed from the topic. 'produce' - allows for messages to be published to the topic. - 'produceconsume' allows for both 'consume' and - 'produce' permission. 'admin' allows for - 'produceconsume' as well as any operations to - administer the topic (delete, update). Required. - Known values are: "admin", "consume", "produce", and - "produceconsume". - "topic": "str", # A - regex for matching the topic(s) that this ACL should - apply to. Required. - "id": "str" # - Optional. An identifier for the ACL. Will be computed - after the ACL is created/updated. - } - ], - "mongo_user_settings": { - "databases": [ - "str" # Optional. A - list of databases to which the user should have - access. When the database is set to ``admin``"" , the - user will have access to all databases based on the - user's role i.e. a user with the role ``readOnly`` - assigned to the ``admin`` database will have read - access to all databases. - ], - "role": "str" # Optional. - The role to assign to the user with each role mapping to - a MongoDB built-in role. ``readOnly`` maps to a `read - `_ - role. ``readWrite`` maps to a `readWrite - `_ - role. ``dbAdmin`` maps to a `dbAdmin - `_ - role. Known values are: "readOnly", "readWrite", and - "dbAdmin". - }, - "opensearch_acl": [ - { - "index": "str", # - Optional. A regex for matching the indexes that this - ACL should apply to. - "permission": "str" - # Optional. Permission set applied to the ACL. 'read' - allows user to read from the index. 'write' allows - for user to write to the index. 'readwrite' allows - for both 'read' and 'write' permission. - 'deny'(default) restricts user from performing any - operation over an index. 'admin' allows for - 'readwrite' as well as any operations to administer - the index. Known values are: "deny", "admin", "read", - "readwrite", and "write". - } + "role": "str", # Optional. A string representing the + database user's role. The value will be either "primary" or "normal". + Known values are: "primary" and "normal". + "settings": { + "acl": [ + { + "permission": "str", # + Permission set applied to the ACL. 'consume' allows for + messages to be consumed from the topic. 'produce' allows + for messages to be published to the topic. + 'produceconsume' allows for both 'consume' and 'produce' + permission. 'admin' allows for 'produceconsume' as well + as any operations to administer the topic (delete, + update). Required. Known values are: "admin", "consume", + "produce", and "produceconsume". + "topic": "str", # A regex + for matching the topic(s) that this ACL should apply to. + Required. + "id": "str" # Optional. An + identifier for the ACL. Will be computed after the ACL is + created/updated. + } + ], + "mongo_user_settings": { + "databases": [ + "str" # Optional. A list of + databases to which the user should have access. When the + database is set to ``admin``"" , the user will have + access to all databases based on the user's role i.e. a + user with the role ``readOnly`` assigned to the ``admin`` + database will have read access to all databases. ], - "pg_allow_replication": bool # - Optional. For Postgres clusters, set to ``true`` for a user - with replication rights. This option is not currently - supported for other database engines. - } + "role": "str" # Optional. The role + to assign to the user with each role mapping to a MongoDB + built-in role. ``readOnly`` maps to a `read + `_ + role. ``readWrite`` maps to a `readWrite + `_ + role. ``dbAdmin`` maps to a `dbAdmin + `_ + role. Known values are: "readOnly", "readWrite", and + "dbAdmin". + }, + "opensearch_acl": [ + { + "index": "str", # Optional. + A regex for matching the indexes that this ACL should + apply to. + "permission": "str" # + Optional. Permission set applied to the ACL. 'read' + allows user to read from the index. 'write' allows for + user to write to the index. 'readwrite' allows for both + 'read' and 'write' permission. 'deny'(default) restricts + user from performing any operation over an index. 'admin' + allows for 'readwrite' as well as any operations to + administer the index. Known values are: "deny", "admin", + "read", "readwrite", and "write". + } + ], + "pg_allow_replication": bool # Optional. For + Postgres clusters, set to ``true`` for a user with replication + rights. This option is not currently supported for other database + engines. } - ], - "version": "str", # Optional. A string representing the - version of the database engine in use for the cluster. - "version_end_of_availability": "str", # Optional. A - timestamp referring to the date when the particular version will no - longer be available for creating new clusters. If null, the version does - not have an end of availability timeline. - "version_end_of_life": "str" # Optional. A timestamp - referring to the date when the particular version will no longer be - supported. If null, the version does not have an end of life timeline. - } - ] + } + ], + "version": "str", # Optional. A string representing the version of + the database engine in use for the cluster. + "version_end_of_availability": "str", # Optional. A timestamp + referring to the date when the particular version will no longer be available + for creating new clusters. If null, the version does not have an end of + availability timeline. + "version_end_of_life": "str" # Optional. A timestamp referring to + the date when the particular version will no longer be supported. If null, + the version does not have an end of life timeline. + } } # response body for status code(s): 404 response == { @@ -105196,88 +111630,9 @@ async def list_clusters( tickets to help identify the issue. } """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - 401: cast( - Type[HttpResponseError], - lambda response: ClientAuthenticationError(response=response), - ), - 429: HttpResponseError, - 500: HttpResponseError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = kwargs.pop("headers", {}) or {} - _params = kwargs.pop("params", {}) or {} - - cls: ClsType[JSON] = kwargs.pop("cls", None) - - _request = build_databases_list_clusters_request( - tag_name=tag_name, - headers=_headers, - params=_params, - ) - _request.url = self._client.format_url(_request.url) - - _stream = False - pipeline_response: PipelineResponse = ( - await self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - ) - - response = pipeline_response.http_response - - if response.status_code not in [200, 404]: - if _stream: - await response.read() # Load the body in memory and close the socket - map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore - raise HttpResponseError(response=response) - - response_headers = {} - if response.status_code == 200: - response_headers["ratelimit-limit"] = self._deserialize( - "int", response.headers.get("ratelimit-limit") - ) - response_headers["ratelimit-remaining"] = self._deserialize( - "int", response.headers.get("ratelimit-remaining") - ) - response_headers["ratelimit-reset"] = self._deserialize( - "int", response.headers.get("ratelimit-reset") - ) - - if response.content: - deserialized = response.json() - else: - deserialized = None - - if response.status_code == 404: - response_headers["ratelimit-limit"] = self._deserialize( - "int", response.headers.get("ratelimit-limit") - ) - response_headers["ratelimit-remaining"] = self._deserialize( - "int", response.headers.get("ratelimit-remaining") - ) - response_headers["ratelimit-reset"] = self._deserialize( - "int", response.headers.get("ratelimit-reset") - ) - - if response.content: - deserialized = response.json() - else: - deserialized = None - - if cls: - return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore - - return cast(JSON, deserialized) # type: ignore - @overload - async def create_cluster( - self, body: JSON, *, content_type: str = "application/json", **kwargs: Any - ) -> JSON: + @distributed_trace_async + async def create_cluster(self, body: Union[JSON, IO[bytes]], **kwargs: Any) -> JSON: # pylint: disable=line-too-long """Create a New Database Cluster. @@ -105306,11 +111661,8 @@ async def create_cluster( Note: Caching cluster creates are no longer supported as of 2025-04-30T00:00:00Z. Backups are also not supported for Caching or Valkey clusters. - :param body: Required. - :type body: JSON - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] :return: JSON object :rtype: JSON :raises ~azure.core.exceptions.HttpResponseError: @@ -105937,44 +112289,118 @@ async def create_cluster( tickets to help identify the issue. } """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + 401: cast( + Type[HttpResponseError], + lambda response: ClientAuthenticationError(response=response), + ), + 429: HttpResponseError, + 500: HttpResponseError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) - @overload - async def create_cluster( - self, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any - ) -> JSON: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop( + "content_type", _headers.pop("Content-Type", None) + ) + cls: ClsType[JSON] = kwargs.pop("cls", None) + + content_type = content_type or "application/json" + _json = None + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _json = body + + _request = build_databases_create_cluster_request( + content_type=content_type, + json=_json, + content=_content, + headers=_headers, + params=_params, + ) + _request.url = self._client.format_url(_request.url) + + _stream = False + pipeline_response: PipelineResponse = ( + await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + ) + + response = pipeline_response.http_response + + if response.status_code not in [201, 404]: + if _stream: + await response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore + raise HttpResponseError(response=response) + + response_headers = {} + if response.status_code == 201: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.content: + deserialized = response.json() + else: + deserialized = None + + if response.status_code == 404: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.content: + deserialized = response.json() + else: + deserialized = None + + if cls: + return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore + + return cast(JSON, deserialized) # type: ignore + + @distributed_trace_async + async def get_cluster(self, database_cluster_uuid: str, **kwargs: Any) -> JSON: # pylint: disable=line-too-long - """Create a New Database Cluster. + """Retrieve an Existing Database Cluster. - To create a database cluster, send a POST request to ``/v2/databases``. To see a list of - options for each engine, such as available regions, size slugs, and versions, send a GET - request to the ``/v2/databases/options`` endpoint. The available sizes for the - ``storage_size_mib`` field depends on the cluster's size. To see a list of available sizes, see - `Managed Database Pricing `_. + To show information about an existing database cluster, send a GET request to + ``/v2/databases/$DATABASE_ID``. - The create response returns a JSON object with a key called ``database``. The value of this is - an object that contains the standard attributes associated with a database cluster. The initial - value of the database cluster's ``status`` attribute is ``creating``. When the cluster is ready - to receive traffic, this changes to ``online``. + The response will be a JSON object with a database key. This will be set to an object + containing the standard database cluster attributes. - The embedded ``connection`` and ``private_connection`` objects contains the information needed - to access the database cluster. For multi-node clusters, the ``standby_connection`` and + The embedded ``connection`` and ``private_connection`` objects will contain the information + needed to access the database cluster. For multi-node clusters, the ``standby_connection`` and ``standby_private_connection`` objects contain the information needed to connect to the cluster's standby node(s). - DigitalOcean managed PostgreSQL and MySQL database clusters take automated daily backups. To - create a new database cluster based on a backup of an existing cluster, send a POST request to - ``/v2/databases``. In addition to the standard database cluster attributes, the JSON body must - include a key named ``backup_restore`` with the name of the original database cluster and the - timestamp of the backup to be restored. Creating a database from a backup is the same as - forking a database in the control panel. - Note: Caching cluster creates are no longer supported as of 2025-04-30T00:00:00Z. Backups are - also not supported for Caching or Valkey clusters. + The embedded maintenance_window object will contain information about any scheduled maintenance + for the database cluster. - :param body: Required. - :type body: IO[bytes] - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str + :param database_cluster_uuid: A unique identifier for a database cluster. Required. + :type database_cluster_uuid: str :return: JSON object :rtype: JSON :raises ~azure.core.exceptions.HttpResponseError: @@ -105982,7 +112408,7 @@ async def create_cluster( Example: .. code-block:: python - # response body for status code(s): 201 + # response body for status code(s): 200 response == { "database": { "engine": "str", # A slug representing the database engine used for @@ -106296,37 +112722,915 @@ async def create_cluster( tickets to help identify the issue. } """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + 401: cast( + Type[HttpResponseError], + lambda response: ClientAuthenticationError(response=response), + ), + 429: HttpResponseError, + 500: HttpResponseError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[JSON] = kwargs.pop("cls", None) + + _request = build_databases_get_cluster_request( + database_cluster_uuid=database_cluster_uuid, + headers=_headers, + params=_params, + ) + _request.url = self._client.format_url(_request.url) + + _stream = False + pipeline_response: PipelineResponse = ( + await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + ) + + response = pipeline_response.http_response + + if response.status_code not in [200, 404]: + if _stream: + await response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore + raise HttpResponseError(response=response) + + response_headers = {} + if response.status_code == 200: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.content: + deserialized = response.json() + else: + deserialized = None + + if response.status_code == 404: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.content: + deserialized = response.json() + else: + deserialized = None + + if cls: + return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore + + return cast(JSON, deserialized) # type: ignore + + @distributed_trace_async + async def destroy_cluster( + self, database_cluster_uuid: str, **kwargs: Any + ) -> Optional[JSON]: + # pylint: disable=line-too-long + """Destroy a Database Cluster. + + To destroy a specific database, send a DELETE request to ``/v2/databases/$DATABASE_ID``. + A status of 204 will be given. This indicates that the request was processed successfully, but + that no response body is needed. + + :param database_cluster_uuid: A unique identifier for a database cluster. Required. + :type database_cluster_uuid: str + :return: JSON object or None + :rtype: JSON or None + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 404 + response == { + "id": "str", # A short identifier corresponding to the HTTP status code + returned. For example, the ID for a response returning a 404 status code would + be "not_found.". Required. + "message": "str", # A message providing additional information about the + error, including details to help resolve it when possible. Required. + "request_id": "str" # Optional. Optionally, some endpoints may include a + request ID that should be provided when reporting bugs or opening support + tickets to help identify the issue. + } + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + 401: cast( + Type[HttpResponseError], + lambda response: ClientAuthenticationError(response=response), + ), + 429: HttpResponseError, + 500: HttpResponseError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[Optional[JSON]] = kwargs.pop("cls", None) + + _request = build_databases_destroy_cluster_request( + database_cluster_uuid=database_cluster_uuid, + headers=_headers, + params=_params, + ) + _request.url = self._client.format_url(_request.url) + + _stream = False + pipeline_response: PipelineResponse = ( + await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + ) + + response = pipeline_response.http_response + + if response.status_code not in [204, 404]: + if _stream: + await response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore + raise HttpResponseError(response=response) + + deserialized = None + response_headers = {} + if response.status_code == 204: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.status_code == 404: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.content: + deserialized = response.json() + else: + deserialized = None + + if cls: + return cls(pipeline_response, deserialized, response_headers) # type: ignore + + return deserialized # type: ignore + + @distributed_trace_async + async def get_config(self, database_cluster_uuid: str, **kwargs: Any) -> JSON: + # pylint: disable=line-too-long + """Retrieve an Existing Database Cluster Configuration. + + Shows configuration parameters for an existing database cluster by sending a GET request to + ``/v2/databases/$DATABASE_ID/config``. + The response is a JSON object with a ``config`` key, which is set to an object + containing any database configuration parameters. + + :param database_cluster_uuid: A unique identifier for a database cluster. Required. + :type database_cluster_uuid: str + :return: JSON object + :rtype: JSON + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 200 + response == { + "config": {} + } + # response body for status code(s): 404 + response == { + "id": "str", # A short identifier corresponding to the HTTP status code + returned. For example, the ID for a response returning a 404 status code would + be "not_found.". Required. + "message": "str", # A message providing additional information about the + error, including details to help resolve it when possible. Required. + "request_id": "str" # Optional. Optionally, some endpoints may include a + request ID that should be provided when reporting bugs or opening support + tickets to help identify the issue. + } + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + 401: cast( + Type[HttpResponseError], + lambda response: ClientAuthenticationError(response=response), + ), + 429: HttpResponseError, + 500: HttpResponseError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[JSON] = kwargs.pop("cls", None) + + _request = build_databases_get_config_request( + database_cluster_uuid=database_cluster_uuid, + headers=_headers, + params=_params, + ) + _request.url = self._client.format_url(_request.url) + + _stream = False + pipeline_response: PipelineResponse = ( + await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + ) + + response = pipeline_response.http_response + + if response.status_code not in [200, 404]: + if _stream: + await response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore + raise HttpResponseError(response=response) + + response_headers = {} + if response.status_code == 200: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.content: + deserialized = response.json() + else: + deserialized = None + + if response.status_code == 404: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.content: + deserialized = response.json() + else: + deserialized = None + + if cls: + return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore + + return cast(JSON, deserialized) # type: ignore + + @overload + async def patch_config( + self, + database_cluster_uuid: str, + body: JSON, + *, + content_type: str = "application/json", + **kwargs: Any + ) -> Optional[JSON]: + # pylint: disable=line-too-long + """Update the Database Configuration for an Existing Database. + + To update the configuration for an existing database cluster, send a PATCH request to + ``/v2/databases/$DATABASE_ID/config``. + + :param database_cluster_uuid: A unique identifier for a database cluster. Required. + :type database_cluster_uuid: str + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: JSON object or None + :rtype: JSON or None + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "config": {} + } + + # response body for status code(s): 404 + response == { + "id": "str", # A short identifier corresponding to the HTTP status code + returned. For example, the ID for a response returning a 404 status code would + be "not_found.". Required. + "message": "str", # A message providing additional information about the + error, including details to help resolve it when possible. Required. + "request_id": "str" # Optional. Optionally, some endpoints may include a + request ID that should be provided when reporting bugs or opening support + tickets to help identify the issue. + } + """ + + @overload + async def patch_config( + self, + database_cluster_uuid: str, + body: IO[bytes], + *, + content_type: str = "application/json", + **kwargs: Any + ) -> Optional[JSON]: + # pylint: disable=line-too-long + """Update the Database Configuration for an Existing Database. + + To update the configuration for an existing database cluster, send a PATCH request to + ``/v2/databases/$DATABASE_ID/config``. + + :param database_cluster_uuid: A unique identifier for a database cluster. Required. + :type database_cluster_uuid: str + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: JSON object or None + :rtype: JSON or None + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 404 + response == { + "id": "str", # A short identifier corresponding to the HTTP status code + returned. For example, the ID for a response returning a 404 status code would + be "not_found.". Required. + "message": "str", # A message providing additional information about the + error, including details to help resolve it when possible. Required. + "request_id": "str" # Optional. Optionally, some endpoints may include a + request ID that should be provided when reporting bugs or opening support + tickets to help identify the issue. + } + """ + + @distributed_trace_async + async def patch_config( + self, database_cluster_uuid: str, body: Union[JSON, IO[bytes]], **kwargs: Any + ) -> Optional[JSON]: + # pylint: disable=line-too-long + """Update the Database Configuration for an Existing Database. + + To update the configuration for an existing database cluster, send a PATCH request to + ``/v2/databases/$DATABASE_ID/config``. + + :param database_cluster_uuid: A unique identifier for a database cluster. Required. + :type database_cluster_uuid: str + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :return: JSON object or None + :rtype: JSON or None + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "config": {} + } + + # response body for status code(s): 404 + response == { + "id": "str", # A short identifier corresponding to the HTTP status code + returned. For example, the ID for a response returning a 404 status code would + be "not_found.". Required. + "message": "str", # A message providing additional information about the + error, including details to help resolve it when possible. Required. + "request_id": "str" # Optional. Optionally, some endpoints may include a + request ID that should be provided when reporting bugs or opening support + tickets to help identify the issue. + } + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + 401: cast( + Type[HttpResponseError], + lambda response: ClientAuthenticationError(response=response), + ), + 429: HttpResponseError, + 500: HttpResponseError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop( + "content_type", _headers.pop("Content-Type", None) + ) + cls: ClsType[Optional[JSON]] = kwargs.pop("cls", None) + + content_type = content_type or "application/json" + _json = None + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _json = body + + _request = build_databases_patch_config_request( + database_cluster_uuid=database_cluster_uuid, + content_type=content_type, + json=_json, + content=_content, + headers=_headers, + params=_params, + ) + _request.url = self._client.format_url(_request.url) + + _stream = False + pipeline_response: PipelineResponse = ( + await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + ) + + response = pipeline_response.http_response + + if response.status_code not in [200, 404]: + if _stream: + await response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore + raise HttpResponseError(response=response) + + deserialized = None + response_headers = {} + if response.status_code == 200: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.status_code == 404: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.content: + deserialized = response.json() + else: + deserialized = None + + if cls: + return cls(pipeline_response, deserialized, response_headers) # type: ignore + + return deserialized # type: ignore + + @distributed_trace_async + async def get_ca(self, database_cluster_uuid: str, **kwargs: Any) -> JSON: + # pylint: disable=line-too-long + """Retrieve the Public Certificate. + + To retrieve the public certificate used to secure the connection to the database cluster send a + GET request to + ``/v2/databases/$DATABASE_ID/ca``. + + The response will be a JSON object with a ``ca`` key. This will be set to an object + containing the base64 encoding of the public key certificate. + + :param database_cluster_uuid: A unique identifier for a database cluster. Required. + :type database_cluster_uuid: str + :return: JSON object + :rtype: JSON + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 200 + response == { + "ca": { + "certificate": "str" # base64 encoding of the certificate used to + secure database connections. Required. + } + } + # response body for status code(s): 404 + response == { + "id": "str", # A short identifier corresponding to the HTTP status code + returned. For example, the ID for a response returning a 404 status code would + be "not_found.". Required. + "message": "str", # A message providing additional information about the + error, including details to help resolve it when possible. Required. + "request_id": "str" # Optional. Optionally, some endpoints may include a + request ID that should be provided when reporting bugs or opening support + tickets to help identify the issue. + } + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + 401: cast( + Type[HttpResponseError], + lambda response: ClientAuthenticationError(response=response), + ), + 429: HttpResponseError, + 500: HttpResponseError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[JSON] = kwargs.pop("cls", None) + + _request = build_databases_get_ca_request( + database_cluster_uuid=database_cluster_uuid, + headers=_headers, + params=_params, + ) + _request.url = self._client.format_url(_request.url) + + _stream = False + pipeline_response: PipelineResponse = ( + await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + ) + + response = pipeline_response.http_response + + if response.status_code not in [200, 404]: + if _stream: + await response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore + raise HttpResponseError(response=response) + + response_headers = {} + if response.status_code == 200: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.content: + deserialized = response.json() + else: + deserialized = None + + if response.status_code == 404: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.content: + deserialized = response.json() + else: + deserialized = None + + if cls: + return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore + + return cast(JSON, deserialized) # type: ignore + + @distributed_trace_async + async def get_migration_status( + self, database_cluster_uuid: str, **kwargs: Any + ) -> JSON: + # pylint: disable=line-too-long + """Retrieve the Status of an Online Migration. + + To retrieve the status of the most recent online migration, send a GET request to + ``/v2/databases/$DATABASE_ID/online-migration``. + + :param database_cluster_uuid: A unique identifier for a database cluster. Required. + :type database_cluster_uuid: str + :return: JSON object + :rtype: JSON + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 200 + response == { + "created_at": "str", # Optional. The time the migration was initiated, in + ISO 8601 format. + "id": "str", # Optional. The ID of the most recent migration. + "status": "str" # Optional. The current status of the migration. Known + values are: "running", "syncing", "canceled", "error", and "done". + } + # response body for status code(s): 404 + response == { + "id": "str", # A short identifier corresponding to the HTTP status code + returned. For example, the ID for a response returning a 404 status code would + be "not_found.". Required. + "message": "str", # A message providing additional information about the + error, including details to help resolve it when possible. Required. + "request_id": "str" # Optional. Optionally, some endpoints may include a + request ID that should be provided when reporting bugs or opening support + tickets to help identify the issue. + } + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + 401: cast( + Type[HttpResponseError], + lambda response: ClientAuthenticationError(response=response), + ), + 429: HttpResponseError, + 500: HttpResponseError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[JSON] = kwargs.pop("cls", None) + + _request = build_databases_get_migration_status_request( + database_cluster_uuid=database_cluster_uuid, + headers=_headers, + params=_params, + ) + _request.url = self._client.format_url(_request.url) + + _stream = False + pipeline_response: PipelineResponse = ( + await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + ) + + response = pipeline_response.http_response + + if response.status_code not in [200, 404]: + if _stream: + await response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore + raise HttpResponseError(response=response) + + response_headers = {} + if response.status_code == 200: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.content: + deserialized = response.json() + else: + deserialized = None + + if response.status_code == 404: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.content: + deserialized = response.json() + else: + deserialized = None + + if cls: + return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore + + return cast(JSON, deserialized) # type: ignore + + @overload + async def update_online_migration( + self, + database_cluster_uuid: str, + body: JSON, + *, + content_type: str = "application/json", + **kwargs: Any + ) -> JSON: + # pylint: disable=line-too-long + """Start an Online Migration. + + To start an online migration, send a PUT request to + ``/v2/databases/$DATABASE_ID/online-migration`` endpoint. Migrating a cluster establishes a + connection with an existing cluster and replicates its contents to the target cluster. Online + migration is only available for MySQL, PostgreSQL, Caching, and Valkey clusters. + If the existing database is continuously being written to, the migration process will continue + for up to two weeks unless it is manually stopped. Online migration is only available for + `MySQL + `_\\ + , `PostgreSQL + `_\\ , `Caching + `_\\ , and `Valkey + `_ clusters. + + :param database_cluster_uuid: A unique identifier for a database cluster. Required. + :type database_cluster_uuid: str + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: JSON object + :rtype: JSON + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "source": { + "dbname": "str", # Optional. The name of the default database. + "host": "str", # Optional. The FQDN pointing to the database + cluster's current primary node. + "password": "str", # Optional. The randomly generated password for + the default user. + "port": 0, # Optional. The port on which the database cluster is + listening. + "username": "str" # Optional. The default user for the database. + }, + "disable_ssl": bool, # Optional. Enables SSL encryption when connecting to + the source database. + "ignore_dbs": [ + "str" # Optional. List of databases that should be ignored during + migration. + ] + } + + # response body for status code(s): 200 + response == { + "created_at": "str", # Optional. The time the migration was initiated, in + ISO 8601 format. + "id": "str", # Optional. The ID of the most recent migration. + "status": "str" # Optional. The current status of the migration. Known + values are: "running", "syncing", "canceled", "error", and "done". + } + # response body for status code(s): 404 + response == { + "id": "str", # A short identifier corresponding to the HTTP status code + returned. For example, the ID for a response returning a 404 status code would + be "not_found.". Required. + "message": "str", # A message providing additional information about the + error, including details to help resolve it when possible. Required. + "request_id": "str" # Optional. Optionally, some endpoints may include a + request ID that should be provided when reporting bugs or opening support + tickets to help identify the issue. + } + """ - @distributed_trace_async - async def create_cluster(self, body: Union[JSON, IO[bytes]], **kwargs: Any) -> JSON: + @overload + async def update_online_migration( + self, + database_cluster_uuid: str, + body: IO[bytes], + *, + content_type: str = "application/json", + **kwargs: Any + ) -> JSON: # pylint: disable=line-too-long - """Create a New Database Cluster. + """Start an Online Migration. - To create a database cluster, send a POST request to ``/v2/databases``. To see a list of - options for each engine, such as available regions, size slugs, and versions, send a GET - request to the ``/v2/databases/options`` endpoint. The available sizes for the - ``storage_size_mib`` field depends on the cluster's size. To see a list of available sizes, see - `Managed Database Pricing `_. + To start an online migration, send a PUT request to + ``/v2/databases/$DATABASE_ID/online-migration`` endpoint. Migrating a cluster establishes a + connection with an existing cluster and replicates its contents to the target cluster. Online + migration is only available for MySQL, PostgreSQL, Caching, and Valkey clusters. + If the existing database is continuously being written to, the migration process will continue + for up to two weeks unless it is manually stopped. Online migration is only available for + `MySQL + `_\\ + , `PostgreSQL + `_\\ , `Caching + `_\\ , and `Valkey + `_ clusters. - The create response returns a JSON object with a key called ``database``. The value of this is - an object that contains the standard attributes associated with a database cluster. The initial - value of the database cluster's ``status`` attribute is ``creating``. When the cluster is ready - to receive traffic, this changes to ``online``. + :param database_cluster_uuid: A unique identifier for a database cluster. Required. + :type database_cluster_uuid: str + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: JSON object + :rtype: JSON + :raises ~azure.core.exceptions.HttpResponseError: - The embedded ``connection`` and ``private_connection`` objects contains the information needed - to access the database cluster. For multi-node clusters, the ``standby_connection`` and - ``standby_private_connection`` objects contain the information needed to connect to the - cluster's standby node(s). + Example: + .. code-block:: python - DigitalOcean managed PostgreSQL and MySQL database clusters take automated daily backups. To - create a new database cluster based on a backup of an existing cluster, send a POST request to - ``/v2/databases``. In addition to the standard database cluster attributes, the JSON body must - include a key named ``backup_restore`` with the name of the original database cluster and the - timestamp of the backup to be restored. Creating a database from a backup is the same as - forking a database in the control panel. - Note: Caching cluster creates are no longer supported as of 2025-04-30T00:00:00Z. Backups are - also not supported for Caching or Valkey clusters. + # response body for status code(s): 200 + response == { + "created_at": "str", # Optional. The time the migration was initiated, in + ISO 8601 format. + "id": "str", # Optional. The ID of the most recent migration. + "status": "str" # Optional. The current status of the migration. Known + values are: "running", "syncing", "canceled", "error", and "done". + } + # response body for status code(s): 404 + response == { + "id": "str", # A short identifier corresponding to the HTTP status code + returned. For example, the ID for a response returning a 404 status code would + be "not_found.". Required. + "message": "str", # A message providing additional information about the + error, including details to help resolve it when possible. Required. + "request_id": "str" # Optional. Optionally, some endpoints may include a + request ID that should be provided when reporting bugs or opening support + tickets to help identify the issue. + } + """ + + @distributed_trace_async + async def update_online_migration( + self, database_cluster_uuid: str, body: Union[JSON, IO[bytes]], **kwargs: Any + ) -> JSON: + # pylint: disable=line-too-long + """Start an Online Migration. + + To start an online migration, send a PUT request to + ``/v2/databases/$DATABASE_ID/online-migration`` endpoint. Migrating a cluster establishes a + connection with an existing cluster and replicates its contents to the target cluster. Online + migration is only available for MySQL, PostgreSQL, Caching, and Valkey clusters. + If the existing database is continuously being written to, the migration process will continue + for up to two weeks unless it is manually stopped. Online migration is only available for + `MySQL + `_\\ + , `PostgreSQL + `_\\ , `Caching + `_\\ , and `Valkey + `_ clusters. + :param database_cluster_uuid: A unique identifier for a database cluster. Required. + :type database_cluster_uuid: str :param body: Is either a JSON type or a IO[bytes] type. Required. :type body: JSON or IO[bytes] :return: JSON object @@ -106338,611 +113642,382 @@ async def create_cluster(self, body: Union[JSON, IO[bytes]], **kwargs: Any) -> J # JSON input template you can fill out and use as your body input. body = { - "engine": "str", # A slug representing the database engine used for the - cluster. The possible values are: "pg" for PostgreSQL, "mysql" for MySQL, "redis" - for Caching, "mongodb" for MongoDB, "kafka" for Kafka, "opensearch" for - OpenSearch, and "valkey" for Valkey. Required. Known values are: "pg", "mysql", - "redis", "valkey", "mongodb", "kafka", and "opensearch". - "name": "str", # A unique, human-readable name referring to a database - cluster. Required. - "num_nodes": 0, # The number of nodes in the database cluster. Required. - "region": "str", # The slug identifier for the region where the database - cluster is located. Required. - "size": "str", # The slug identifier representing the size of the nodes in - the database cluster. Required. - "autoscale": { - "storage": { - "enabled": bool, # Whether storage autoscaling is enabled - for the cluster. Required. - "increment_gib": 0, # Optional. The amount of additional - storage to add (in GiB) when autoscaling is triggered. - "threshold_percent": 0 # Optional. The storage usage - threshold percentage that triggers autoscaling. When storage usage - exceeds this percentage, additional storage will be added automatically. - } - }, - "backup_restore": { - "database_name": "str", # The name of an existing database cluster - from which the backup will be restored. Required. - "backup_created_at": "2020-02-20 00:00:00" # Optional. The timestamp - of an existing database cluster backup in ISO8601 combined date and time - format. The most recent backup will be used if excluded. - }, - "connection": { - "database": "str", # Optional. The name of the default database. - "host": "str", # Optional. The FQDN pointing to the database - cluster's current primary node. - "password": "str", # Optional. The randomly generated password for - the default user.:code:`
`:code:`
`Requires - ``database:view_credentials`` scope. - "port": 0, # Optional. The port on which the database cluster is - listening. - "ssl": bool, # Optional. A boolean value indicating if the - connection should be made over SSL. - "uri": "str", # Optional. A connection string in the format accepted - by the ``psql`` command. This is provided as a convenience and should be able - to be constructed by the other attributes. - "user": "str" # Optional. The default user for the - database.:code:`
`:code:`
`Requires ``database:view_credentials`` - scope. - }, - "created_at": "2020-02-20 00:00:00", # Optional. A time value given in - ISO8601 combined date and time format that represents when the database cluster - was created. - "db_names": [ - "str" # Optional. An array of strings containing the names of - databases created in the database cluster. - ], - "do_settings": { - "service_cnames": [ - "str" # Optional. An array of custom CNAMEs for the database - cluster. Each CNAME must be a valid RFC 1123 hostname (e.g., - "db.example.com"). Maximum of 16 CNAMEs allowed, each up to 253 - characters. - ] - }, - "id": "str", # Optional. A unique ID that can be used to identify and - reference a database cluster. - "maintenance_window": { - "day": "str", # The day of the week on which to apply maintenance - updates. Required. - "hour": "str", # The hour in UTC at which maintenance updates will - be applied in 24 hour format. Required. - "description": [ - "str" # Optional. A list of strings, each containing - information about a pending maintenance update. - ], - "pending": bool # Optional. A boolean value indicating whether any - maintenance is scheduled to be performed in the next window. - }, - "metrics_endpoints": [ - { - "host": "str", # Optional. A FQDN pointing to the database - cluster's node(s). - "port": 0 # Optional. The port on which a service is - listening. - } - ], - "private_connection": { - "database": "str", # Optional. The name of the default database. - "host": "str", # Optional. The FQDN pointing to the database - cluster's current primary node. - "password": "str", # Optional. The randomly generated password for - the default user.:code:`
`:code:`
`Requires - ``database:view_credentials`` scope. - "port": 0, # Optional. The port on which the database cluster is - listening. - "ssl": bool, # Optional. A boolean value indicating if the - connection should be made over SSL. - "uri": "str", # Optional. A connection string in the format accepted - by the ``psql`` command. This is provided as a convenience and should be able - to be constructed by the other attributes. - "user": "str" # Optional. The default user for the - database.:code:`
`:code:`
`Requires ``database:view_credentials`` - scope. - }, - "private_network_uuid": "str", # Optional. A string specifying the UUID of - the VPC to which the database cluster will be assigned. If excluded, the cluster - when creating a new database cluster, it will be assigned to your account's - default VPC for the region. :code:`
`:code:`
`Requires ``vpc:read`` scope. - "project_id": "str", # Optional. The ID of the project that the database - cluster is assigned to. If excluded when creating a new database cluster, it will - be assigned to your default project.:code:`
`:code:`
`Requires - ``project:update`` scope. - "rules": [ - { - "type": "str", # The type of resource that the firewall rule - allows to access the database cluster. Required. Known values are: - "droplet", "k8s", "ip_addr", "tag", and "app". - "value": "str", # The ID of the specific resource, the name - of a tag applied to a group of resources, or the IP address that the - firewall rule allows to access the database cluster. Required. - "cluster_uuid": "str", # Optional. A unique ID for the - database cluster to which the rule is applied. - "created_at": "2020-02-20 00:00:00", # Optional. A time - value given in ISO8601 combined date and time format that represents when - the firewall rule was created. - "description": "str", # Optional. A human-readable - description of the rule. - "uuid": "str" # Optional. A unique ID for the firewall rule - itself. - } - ], - "schema_registry_connection": { - "host": "str", # Optional. The FQDN pointing to the schema registry - connection uri. - "password": "str", # Optional. The randomly generated password for - the schema registry.:code:`
`:code:`
`Requires - ``database:view_credentials`` scope. - "port": 0, # Optional. The port on which the schema registry is - listening. - "ssl": bool, # Optional. A boolean value indicating if the - connection should be made over SSL. - "uri": "str", # Optional. This is provided as a convenience and - should be able to be constructed by the other attributes. - "user": "str" # Optional. The default user for the schema - registry.:code:`
`:code:`
`Requires ``database:view_credentials`` - scope. - }, - "semantic_version": "str", # Optional. A string representing the semantic - version of the database engine in use for the cluster. - "standby_connection": { - "database": "str", # Optional. The name of the default database. - "host": "str", # Optional. The FQDN pointing to the database - cluster's current primary node. - "password": "str", # Optional. The randomly generated password for - the default user.:code:`
`:code:`
`Requires - ``database:view_credentials`` scope. - "port": 0, # Optional. The port on which the database cluster is - listening. - "ssl": bool, # Optional. A boolean value indicating if the - connection should be made over SSL. - "uri": "str", # Optional. A connection string in the format accepted - by the ``psql`` command. This is provided as a convenience and should be able - to be constructed by the other attributes. - "user": "str" # Optional. The default user for the - database.:code:`
`:code:`
`Requires ``database:view_credentials`` - scope. - }, - "standby_private_connection": { - "database": "str", # Optional. The name of the default database. + "source": { + "dbname": "str", # Optional. The name of the default database. "host": "str", # Optional. The FQDN pointing to the database cluster's current primary node. "password": "str", # Optional. The randomly generated password for - the default user.:code:`
`:code:`
`Requires - ``database:view_credentials`` scope. + the default user. "port": 0, # Optional. The port on which the database cluster is listening. - "ssl": bool, # Optional. A boolean value indicating if the - connection should be made over SSL. - "uri": "str", # Optional. A connection string in the format accepted - by the ``psql`` command. This is provided as a convenience and should be able - to be constructed by the other attributes. - "user": "str" # Optional. The default user for the - database.:code:`
`:code:`
`Requires ``database:view_credentials`` - scope. - }, - "status": "str", # Optional. A string representing the current status of the - database cluster. Known values are: "creating", "online", "resizing", - "migrating", and "forking". - "storage_size_mib": 0, # Optional. Additional storage added to the cluster, - in MiB. If null, no additional storage is added to the cluster, beyond what is - provided as a base amount from the 'size' and any previously added additional - storage. - "tags": [ - "str" # Optional. An array of tags (as strings) to apply to the - database cluster. :code:`
`:code:`
`Requires ``tag:create`` scope. - ], - "ui_connection": { - "host": "str", # Optional. The FQDN pointing to the opensearch - cluster's current primary node. - "password": "str", # Optional. The randomly generated password for - the default user.:code:`
`:code:`
`Requires - ``database:view_credentials`` scope. - "port": 0, # Optional. The port on which the opensearch dashboard is - listening. - "ssl": bool, # Optional. A boolean value indicating if the - connection should be made over SSL. - "uri": "str", # Optional. This is provided as a convenience and - should be able to be constructed by the other attributes. - "user": "str" # Optional. The default user for the opensearch - dashboard.:code:`
`:code:`
`Requires ``database:view_credentials`` - scope. + "username": "str" # Optional. The default user for the database. }, - "users": [ - { - "name": "str", # The name of a database user. Required. - "access_cert": "str", # Optional. Access certificate for TLS - client authentication. (Kafka only). - "access_key": "str", # Optional. Access key for TLS client - authentication. (Kafka only). - "mysql_settings": { - "auth_plugin": "str" # A string specifying the - authentication method to be used for connections to the MySQL user - account. The valid values are ``mysql_native_password`` or - ``caching_sha2_password``. If excluded when creating a new user, the - default for the version of MySQL in use will be used. As of MySQL - 8.0, the default is ``caching_sha2_password``. Required. Known values - are: "mysql_native_password" and "caching_sha2_password". - }, - "password": "str", # Optional. A randomly generated password - for the database user.:code:`
`Requires ``database:view_credentials`` - scope. - "role": "str", # Optional. A string representing the - database user's role. The value will be either "primary" or "normal". - Known values are: "primary" and "normal". - "settings": { - "acl": [ - { - "permission": "str", # Permission - set applied to the ACL. 'consume' allows for messages to be - consumed from the topic. 'produce' allows for messages to be - published to the topic. 'produceconsume' allows for both - 'consume' and 'produce' permission. 'admin' allows for - 'produceconsume' as well as any operations to administer the - topic (delete, update). Required. Known values are: "admin", - "consume", "produce", and "produceconsume". - "topic": "str", # A regex for - matching the topic(s) that this ACL should apply to. - Required. - "id": "str" # Optional. An - identifier for the ACL. Will be computed after the ACL is - created/updated. - } - ], - "mongo_user_settings": { - "databases": [ - "str" # Optional. A list of - databases to which the user should have access. When the - database is set to ``admin``"" , the user will have access to - all databases based on the user's role i.e. a user with the - role ``readOnly`` assigned to the ``admin`` database will - have read access to all databases. - ], - "role": "str" # Optional. The role to assign - to the user with each role mapping to a MongoDB built-in role. - ``readOnly`` maps to a `read - `_ - role. ``readWrite`` maps to a `readWrite - `_ - role. ``dbAdmin`` maps to a `dbAdmin - `_ - role. Known values are: "readOnly", "readWrite", and "dbAdmin". - }, - "opensearch_acl": [ - { - "index": "str", # Optional. A regex - for matching the indexes that this ACL should apply to. - "permission": "str" # Optional. - Permission set applied to the ACL. 'read' allows user to read - from the index. 'write' allows for user to write to the - index. 'readwrite' allows for both 'read' and 'write' - permission. 'deny'(default) restricts user from performing - any operation over an index. 'admin' allows for 'readwrite' - as well as any operations to administer the index. Known - values are: "deny", "admin", "read", "readwrite", and - "write". - } - ], - "pg_allow_replication": bool # Optional. For - Postgres clusters, set to ``true`` for a user with replication - rights. This option is not currently supported for other database - engines. - } - } - ], - "version": "str", # Optional. A string representing the version of the - database engine in use for the cluster. - "version_end_of_availability": "str", # Optional. A timestamp referring to - the date when the particular version will no longer be available for creating new - clusters. If null, the version does not have an end of availability timeline. - "version_end_of_life": "str" # Optional. A timestamp referring to the date - when the particular version will no longer be supported. If null, the version - does not have an end of life timeline. + "disable_ssl": bool, # Optional. Enables SSL encryption when connecting to + the source database. + "ignore_dbs": [ + "str" # Optional. List of databases that should be ignored during + migration. + ] } - # response body for status code(s): 201 + # response body for status code(s): 200 response == { - "database": { - "engine": "str", # A slug representing the database engine used for - the cluster. The possible values are: "pg" for PostgreSQL, "mysql" for MySQL, - "redis" for Caching, "mongodb" for MongoDB, "kafka" for Kafka, "opensearch" - for OpenSearch, and "valkey" for Valkey. Required. Known values are: "pg", - "mysql", "redis", "valkey", "mongodb", "kafka", and "opensearch". - "name": "str", # A unique, human-readable name referring to a - database cluster. Required. - "num_nodes": 0, # The number of nodes in the database cluster. - Required. - "region": "str", # The slug identifier for the region where the - database cluster is located. Required. - "size": "str", # The slug identifier representing the size of the - nodes in the database cluster. Required. - "connection": { - "database": "str", # Optional. The name of the default - database. - "host": "str", # Optional. The FQDN pointing to the database - cluster's current primary node. - "password": "str", # Optional. The randomly generated - password for the default user.:code:`
`:code:`
`Requires - ``database:view_credentials`` scope. - "port": 0, # Optional. The port on which the database - cluster is listening. - "ssl": bool, # Optional. A boolean value indicating if the - connection should be made over SSL. - "uri": "str", # Optional. A connection string in the format - accepted by the ``psql`` command. This is provided as a convenience and - should be able to be constructed by the other attributes. - "user": "str" # Optional. The default user for the - database.:code:`
`:code:`
`Requires ``database:view_credentials`` - scope. - }, - "created_at": "2020-02-20 00:00:00", # Optional. A time value given - in ISO8601 combined date and time format that represents when the database - cluster was created. - "db_names": [ - "str" # Optional. An array of strings containing the names - of databases created in the database cluster. - ], - "do_settings": { - "service_cnames": [ - "str" # Optional. An array of custom CNAMEs for the - database cluster. Each CNAME must be a valid RFC 1123 hostname (e.g., - "db.example.com"). Maximum of 16 CNAMEs allowed, each up to 253 - characters. - ] - }, - "id": "str", # Optional. A unique ID that can be used to identify - and reference a database cluster. - "maintenance_window": { - "day": "str", # The day of the week on which to apply - maintenance updates. Required. - "hour": "str", # The hour in UTC at which maintenance - updates will be applied in 24 hour format. Required. - "description": [ - "str" # Optional. A list of strings, each containing - information about a pending maintenance update. - ], - "pending": bool # Optional. A boolean value indicating - whether any maintenance is scheduled to be performed in the next window. - }, - "metrics_endpoints": [ - { - "host": "str", # Optional. A FQDN pointing to the - database cluster's node(s). - "port": 0 # Optional. The port on which a service is - listening. - } - ], - "private_connection": { - "database": "str", # Optional. The name of the default - database. - "host": "str", # Optional. The FQDN pointing to the database - cluster's current primary node. - "password": "str", # Optional. The randomly generated - password for the default user.:code:`
`:code:`
`Requires - ``database:view_credentials`` scope. - "port": 0, # Optional. The port on which the database - cluster is listening. - "ssl": bool, # Optional. A boolean value indicating if the - connection should be made over SSL. - "uri": "str", # Optional. A connection string in the format - accepted by the ``psql`` command. This is provided as a convenience and - should be able to be constructed by the other attributes. - "user": "str" # Optional. The default user for the - database.:code:`
`:code:`
`Requires ``database:view_credentials`` - scope. - }, - "private_network_uuid": "str", # Optional. A string specifying the - UUID of the VPC to which the database cluster will be assigned. If excluded, - the cluster when creating a new database cluster, it will be assigned to your - account's default VPC for the region. :code:`
`:code:`
`Requires - ``vpc:read`` scope. - "project_id": "str", # Optional. The ID of the project that the - database cluster is assigned to. If excluded when creating a new database - cluster, it will be assigned to your default - project.:code:`
`:code:`
`Requires ``project:read`` scope. - "rules": [ - { - "type": "str", # The type of resource that the - firewall rule allows to access the database cluster. Required. Known - values are: "droplet", "k8s", "ip_addr", "tag", and "app". - "value": "str", # The ID of the specific resource, - the name of a tag applied to a group of resources, or the IP address - that the firewall rule allows to access the database cluster. - Required. - "cluster_uuid": "str", # Optional. A unique ID for - the database cluster to which the rule is applied. - "created_at": "2020-02-20 00:00:00", # Optional. A - time value given in ISO8601 combined date and time format that - represents when the firewall rule was created. - "description": "str", # Optional. A human-readable - description of the rule. - "uuid": "str" # Optional. A unique ID for the - firewall rule itself. - } - ], - "schema_registry_connection": { - "host": "str", # Optional. The FQDN pointing to the schema - registry connection uri. - "password": "str", # Optional. The randomly generated - password for the schema registry.:code:`
`:code:`
`Requires - ``database:view_credentials`` scope. - "port": 0, # Optional. The port on which the schema registry - is listening. - "ssl": bool, # Optional. A boolean value indicating if the - connection should be made over SSL. - "uri": "str", # Optional. This is provided as a convenience - and should be able to be constructed by the other attributes. - "user": "str" # Optional. The default user for the schema - registry.:code:`
`:code:`
`Requires ``database:view_credentials`` - scope. - }, - "semantic_version": "str", # Optional. A string representing the - semantic version of the database engine in use for the cluster. - "standby_connection": { - "database": "str", # Optional. The name of the default - database. - "host": "str", # Optional. The FQDN pointing to the database - cluster's current primary node. - "password": "str", # Optional. The randomly generated - password for the default user.:code:`
`:code:`
`Requires - ``database:view_credentials`` scope. - "port": 0, # Optional. The port on which the database - cluster is listening. - "ssl": bool, # Optional. A boolean value indicating if the - connection should be made over SSL. - "uri": "str", # Optional. A connection string in the format - accepted by the ``psql`` command. This is provided as a convenience and - should be able to be constructed by the other attributes. - "user": "str" # Optional. The default user for the - database.:code:`
`:code:`
`Requires ``database:view_credentials`` - scope. - }, - "standby_private_connection": { - "database": "str", # Optional. The name of the default - database. - "host": "str", # Optional. The FQDN pointing to the database - cluster's current primary node. - "password": "str", # Optional. The randomly generated - password for the default user.:code:`
`:code:`
`Requires - ``database:view_credentials`` scope. - "port": 0, # Optional. The port on which the database - cluster is listening. - "ssl": bool, # Optional. A boolean value indicating if the - connection should be made over SSL. - "uri": "str", # Optional. A connection string in the format - accepted by the ``psql`` command. This is provided as a convenience and - should be able to be constructed by the other attributes. - "user": "str" # Optional. The default user for the - database.:code:`
`:code:`
`Requires ``database:view_credentials`` - scope. - }, - "status": "str", # Optional. A string representing the current - status of the database cluster. Known values are: "creating", "online", - "resizing", "migrating", and "forking". - "storage_size_mib": 0, # Optional. Additional storage added to the - cluster, in MiB. If null, no additional storage is added to the cluster, - beyond what is provided as a base amount from the 'size' and any previously - added additional storage. - "tags": [ - "str" # Optional. An array of tags that have been applied to - the database cluster. :code:`
`:code:`
`Requires ``tag:read`` - scope. - ], - "ui_connection": { - "host": "str", # Optional. The FQDN pointing to the - opensearch cluster's current primary node. - "password": "str", # Optional. The randomly generated - password for the default user.:code:`
`:code:`
`Requires - ``database:view_credentials`` scope. - "port": 0, # Optional. The port on which the opensearch - dashboard is listening. - "ssl": bool, # Optional. A boolean value indicating if the - connection should be made over SSL. - "uri": "str", # Optional. This is provided as a convenience - and should be able to be constructed by the other attributes. - "user": "str" # Optional. The default user for the - opensearch dashboard.:code:`
`:code:`
`Requires - ``database:view_credentials`` scope. - }, - "users": [ - { - "name": "str", # The name of a database user. - Required. - "access_cert": "str", # Optional. Access certificate - for TLS client authentication. (Kafka only). - "access_key": "str", # Optional. Access key for TLS - client authentication. (Kafka only). - "mysql_settings": { - "auth_plugin": "str" # A string specifying - the authentication method to be used for connections to the MySQL - user account. The valid values are ``mysql_native_password`` or - ``caching_sha2_password``. If excluded when creating a new user, - the default for the version of MySQL in use will be used. As of - MySQL 8.0, the default is ``caching_sha2_password``. Required. - Known values are: "mysql_native_password" and - "caching_sha2_password". - }, - "password": "str", # Optional. A randomly generated - password for the database user.:code:`
`Requires - ``database:view_credentials`` scope. - "role": "str", # Optional. A string representing the - database user's role. The value will be either "primary" or "normal". - Known values are: "primary" and "normal". - "settings": { - "acl": [ - { - "permission": "str", # - Permission set applied to the ACL. 'consume' allows for - messages to be consumed from the topic. 'produce' allows - for messages to be published to the topic. - 'produceconsume' allows for both 'consume' and 'produce' - permission. 'admin' allows for 'produceconsume' as well - as any operations to administer the topic (delete, - update). Required. Known values are: "admin", "consume", - "produce", and "produceconsume". - "topic": "str", # A regex - for matching the topic(s) that this ACL should apply to. - Required. - "id": "str" # Optional. An - identifier for the ACL. Will be computed after the ACL is - created/updated. - } - ], - "mongo_user_settings": { - "databases": [ - "str" # Optional. A list of - databases to which the user should have access. When the - database is set to ``admin``"" , the user will have - access to all databases based on the user's role i.e. a - user with the role ``readOnly`` assigned to the ``admin`` - database will have read access to all databases. - ], - "role": "str" # Optional. The role - to assign to the user with each role mapping to a MongoDB - built-in role. ``readOnly`` maps to a `read - `_ - role. ``readWrite`` maps to a `readWrite - `_ - role. ``dbAdmin`` maps to a `dbAdmin - `_ - role. Known values are: "readOnly", "readWrite", and - "dbAdmin". - }, - "opensearch_acl": [ - { - "index": "str", # Optional. - A regex for matching the indexes that this ACL should - apply to. - "permission": "str" # - Optional. Permission set applied to the ACL. 'read' - allows user to read from the index. 'write' allows for - user to write to the index. 'readwrite' allows for both - 'read' and 'write' permission. 'deny'(default) restricts - user from performing any operation over an index. 'admin' - allows for 'readwrite' as well as any operations to - administer the index. Known values are: "deny", "admin", - "read", "readwrite", and "write". - } - ], - "pg_allow_replication": bool # Optional. For - Postgres clusters, set to ``true`` for a user with replication - rights. This option is not currently supported for other database - engines. - } - } - ], - "version": "str", # Optional. A string representing the version of - the database engine in use for the cluster. - "version_end_of_availability": "str", # Optional. A timestamp - referring to the date when the particular version will no longer be available - for creating new clusters. If null, the version does not have an end of - availability timeline. - "version_end_of_life": "str" # Optional. A timestamp referring to - the date when the particular version will no longer be supported. If null, - the version does not have an end of life timeline. - } + "created_at": "str", # Optional. The time the migration was initiated, in + ISO 8601 format. + "id": "str", # Optional. The ID of the most recent migration. + "status": "str" # Optional. The current status of the migration. Known + values are: "running", "syncing", "canceled", "error", and "done". + } + # response body for status code(s): 404 + response == { + "id": "str", # A short identifier corresponding to the HTTP status code + returned. For example, the ID for a response returning a 404 status code would + be "not_found.". Required. + "message": "str", # A message providing additional information about the + error, including details to help resolve it when possible. Required. + "request_id": "str" # Optional. Optionally, some endpoints may include a + request ID that should be provided when reporting bugs or opening support + tickets to help identify the issue. + } + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + 401: cast( + Type[HttpResponseError], + lambda response: ClientAuthenticationError(response=response), + ), + 429: HttpResponseError, + 500: HttpResponseError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop( + "content_type", _headers.pop("Content-Type", None) + ) + cls: ClsType[JSON] = kwargs.pop("cls", None) + + content_type = content_type or "application/json" + _json = None + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _json = body + + _request = build_databases_update_online_migration_request( + database_cluster_uuid=database_cluster_uuid, + content_type=content_type, + json=_json, + content=_content, + headers=_headers, + params=_params, + ) + _request.url = self._client.format_url(_request.url) + + _stream = False + pipeline_response: PipelineResponse = ( + await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + ) + + response = pipeline_response.http_response + + if response.status_code not in [200, 404]: + if _stream: + await response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore + raise HttpResponseError(response=response) + + response_headers = {} + if response.status_code == 200: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.content: + deserialized = response.json() + else: + deserialized = None + + if response.status_code == 404: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.content: + deserialized = response.json() + else: + deserialized = None + + if cls: + return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore + + return cast(JSON, deserialized) # type: ignore + + @distributed_trace_async + async def delete_online_migration( + self, database_cluster_uuid: str, migration_id: str, **kwargs: Any + ) -> Optional[JSON]: + # pylint: disable=line-too-long + """Stop an Online Migration. + + To stop an online migration, send a DELETE request to + ``/v2/databases/$DATABASE_ID/online-migration/$MIGRATION_ID``. + + A status of 204 will be given. This indicates that the request was processed successfully, but + that no response body is needed. + + :param database_cluster_uuid: A unique identifier for a database cluster. Required. + :type database_cluster_uuid: str + :param migration_id: A unique identifier assigned to the online migration. Required. + :type migration_id: str + :return: JSON object or None + :rtype: JSON or None + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 404 + response == { + "id": "str", # A short identifier corresponding to the HTTP status code + returned. For example, the ID for a response returning a 404 status code would + be "not_found.". Required. + "message": "str", # A message providing additional information about the + error, including details to help resolve it when possible. Required. + "request_id": "str" # Optional. Optionally, some endpoints may include a + request ID that should be provided when reporting bugs or opening support + tickets to help identify the issue. + } + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + 401: cast( + Type[HttpResponseError], + lambda response: ClientAuthenticationError(response=response), + ), + 429: HttpResponseError, + 500: HttpResponseError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[Optional[JSON]] = kwargs.pop("cls", None) + + _request = build_databases_delete_online_migration_request( + database_cluster_uuid=database_cluster_uuid, + migration_id=migration_id, + headers=_headers, + params=_params, + ) + _request.url = self._client.format_url(_request.url) + + _stream = False + pipeline_response: PipelineResponse = ( + await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + ) + + response = pipeline_response.http_response + + if response.status_code not in [204, 404]: + if _stream: + await response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore + raise HttpResponseError(response=response) + + deserialized = None + response_headers = {} + if response.status_code == 204: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.status_code == 404: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.content: + deserialized = response.json() + else: + deserialized = None + + if cls: + return cls(pipeline_response, deserialized, response_headers) # type: ignore + + return deserialized # type: ignore + + @overload + async def update_region( + self, + database_cluster_uuid: str, + body: JSON, + *, + content_type: str = "application/json", + **kwargs: Any + ) -> Optional[JSON]: + # pylint: disable=line-too-long + """Migrate a Database Cluster to a New Region. + + To migrate a database cluster to a new region, send a ``PUT`` request to + ``/v2/databases/$DATABASE_ID/migrate``. The body of the request must specify a + ``region`` attribute. + + A successful request will receive a 202 Accepted status code with no body in + response. Querying the database cluster will show that its ``status`` attribute + will now be set to ``migrating``. This will transition back to ``online`` when the + migration has completed. + + :param database_cluster_uuid: A unique identifier for a database cluster. Required. + :type database_cluster_uuid: str + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: JSON object or None + :rtype: JSON or None + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "region": "str" # A slug identifier for the region to which the database + cluster will be migrated. Required. + } + + # response body for status code(s): 404 + response == { + "id": "str", # A short identifier corresponding to the HTTP status code + returned. For example, the ID for a response returning a 404 status code would + be "not_found.". Required. + "message": "str", # A message providing additional information about the + error, including details to help resolve it when possible. Required. + "request_id": "str" # Optional. Optionally, some endpoints may include a + request ID that should be provided when reporting bugs or opening support + tickets to help identify the issue. + } + """ + + @overload + async def update_region( + self, + database_cluster_uuid: str, + body: IO[bytes], + *, + content_type: str = "application/json", + **kwargs: Any + ) -> Optional[JSON]: + # pylint: disable=line-too-long + """Migrate a Database Cluster to a New Region. + + To migrate a database cluster to a new region, send a ``PUT`` request to + ``/v2/databases/$DATABASE_ID/migrate``. The body of the request must specify a + ``region`` attribute. + + A successful request will receive a 202 Accepted status code with no body in + response. Querying the database cluster will show that its ``status`` attribute + will now be set to ``migrating``. This will transition back to ``online`` when the + migration has completed. + + :param database_cluster_uuid: A unique identifier for a database cluster. Required. + :type database_cluster_uuid: str + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: JSON object or None + :rtype: JSON or None + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 404 + response == { + "id": "str", # A short identifier corresponding to the HTTP status code + returned. For example, the ID for a response returning a 404 status code would + be "not_found.". Required. + "message": "str", # A message providing additional information about the + error, including details to help resolve it when possible. Required. + "request_id": "str" # Optional. Optionally, some endpoints may include a + request ID that should be provided when reporting bugs or opening support + tickets to help identify the issue. + } + """ + + @distributed_trace_async + async def update_region( + self, database_cluster_uuid: str, body: Union[JSON, IO[bytes]], **kwargs: Any + ) -> Optional[JSON]: + # pylint: disable=line-too-long + """Migrate a Database Cluster to a New Region. + + To migrate a database cluster to a new region, send a ``PUT`` request to + ``/v2/databases/$DATABASE_ID/migrate``. The body of the request must specify a + ``region`` attribute. + + A successful request will receive a 202 Accepted status code with no body in + response. Querying the database cluster will show that its ``status`` attribute + will now be set to ``migrating``. This will transition back to ``online`` when the + migration has completed. + + :param database_cluster_uuid: A unique identifier for a database cluster. Required. + :type database_cluster_uuid: str + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :return: JSON object or None + :rtype: JSON or None + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "region": "str" # A slug identifier for the region to which the database + cluster will be migrated. Required. } + # response body for status code(s): 404 response == { "id": "str", # A short identifier corresponding to the HTTP status code @@ -106974,7 +114049,7 @@ async def create_cluster(self, body: Union[JSON, IO[bytes]], **kwargs: Any) -> J content_type: Optional[str] = kwargs.pop( "content_type", _headers.pop("Content-Type", None) ) - cls: ClsType[JSON] = kwargs.pop("cls", None) + cls: ClsType[Optional[JSON]] = kwargs.pop("cls", None) content_type = content_type or "application/json" _json = None @@ -106984,7 +114059,8 @@ async def create_cluster(self, body: Union[JSON, IO[bytes]], **kwargs: Any) -> J else: _json = body - _request = build_databases_create_cluster_request( + _request = build_databases_update_region_request( + database_cluster_uuid=database_cluster_uuid, content_type=content_type, json=_json, content=_content, @@ -107002,14 +114078,15 @@ async def create_cluster(self, body: Union[JSON, IO[bytes]], **kwargs: Any) -> J response = pipeline_response.http_response - if response.status_code not in [201, 404]: + if response.status_code not in [202, 404]: if _stream: await response.read() # Load the body in memory and close the socket map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore raise HttpResponseError(response=response) + deserialized = None response_headers = {} - if response.status_code == 201: + if response.status_code == 202: response_headers["ratelimit-limit"] = self._deserialize( "int", response.headers.get("ratelimit-limit") ) @@ -107020,11 +114097,6 @@ async def create_cluster(self, body: Union[JSON, IO[bytes]], **kwargs: Any) -> J "int", response.headers.get("ratelimit-reset") ) - if response.content: - deserialized = response.json() - else: - deserialized = None - if response.status_code == 404: response_headers["ratelimit-limit"] = self._deserialize( "int", response.headers.get("ratelimit-limit") @@ -107042,340 +114114,150 @@ async def create_cluster(self, body: Union[JSON, IO[bytes]], **kwargs: Any) -> J deserialized = None if cls: - return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore + return cls(pipeline_response, deserialized, response_headers) # type: ignore - return cast(JSON, deserialized) # type: ignore + return deserialized # type: ignore - @distributed_trace_async - async def get_cluster(self, database_cluster_uuid: str, **kwargs: Any) -> JSON: + @overload + async def update_cluster_size( + self, + database_cluster_uuid: str, + body: JSON, + *, + content_type: str = "application/json", + **kwargs: Any + ) -> Optional[JSON]: # pylint: disable=line-too-long - """Retrieve an Existing Database Cluster. + """Resize a Database Cluster. - To show information about an existing database cluster, send a GET request to - ``/v2/databases/$DATABASE_ID``. + To resize a database cluster, send a PUT request to ``/v2/databases/$DATABASE_ID/resize``. The + body of the request must specify both the size and num_nodes attributes. + A successful request will receive a 202 Accepted status code with no body in response. Querying + the database cluster will show that its status attribute will now be set to resizing. This will + transition back to online when the resize operation has completed. - The response will be a JSON object with a database key. This will be set to an object - containing the standard database cluster attributes. + :param database_cluster_uuid: A unique identifier for a database cluster. Required. + :type database_cluster_uuid: str + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: JSON object or None + :rtype: JSON or None + :raises ~azure.core.exceptions.HttpResponseError: - The embedded ``connection`` and ``private_connection`` objects will contain the information - needed to access the database cluster. For multi-node clusters, the ``standby_connection`` and - ``standby_private_connection`` objects contain the information needed to connect to the - cluster's standby node(s). + Example: + .. code-block:: python - The embedded maintenance_window object will contain information about any scheduled maintenance - for the database cluster. + # JSON input template you can fill out and use as your body input. + body = { + "num_nodes": 0, # The number of nodes in the database cluster. Valid values + are are 1-3. In addition to the primary node, up to two standby nodes may be + added for highly available configurations. Required. + "size": "str", # A slug identifier representing desired the size of the + nodes in the database cluster. Required. + "storage_size_mib": 0 # Optional. Additional storage added to the cluster, + in MiB. If null, no additional storage is added to the cluster, beyond what is + provided as a base amount from the 'size' and any previously added additional + storage. + } + + # response body for status code(s): 404 + response == { + "id": "str", # A short identifier corresponding to the HTTP status code + returned. For example, the ID for a response returning a 404 status code would + be "not_found.". Required. + "message": "str", # A message providing additional information about the + error, including details to help resolve it when possible. Required. + "request_id": "str" # Optional. Optionally, some endpoints may include a + request ID that should be provided when reporting bugs or opening support + tickets to help identify the issue. + } + """ + + @overload + async def update_cluster_size( + self, + database_cluster_uuid: str, + body: IO[bytes], + *, + content_type: str = "application/json", + **kwargs: Any + ) -> Optional[JSON]: + # pylint: disable=line-too-long + """Resize a Database Cluster. + + To resize a database cluster, send a PUT request to ``/v2/databases/$DATABASE_ID/resize``. The + body of the request must specify both the size and num_nodes attributes. + A successful request will receive a 202 Accepted status code with no body in response. Querying + the database cluster will show that its status attribute will now be set to resizing. This will + transition back to online when the resize operation has completed. :param database_cluster_uuid: A unique identifier for a database cluster. Required. :type database_cluster_uuid: str - :return: JSON object - :rtype: JSON + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: JSON object or None + :rtype: JSON or None :raises ~azure.core.exceptions.HttpResponseError: Example: .. code-block:: python - # response body for status code(s): 200 + # response body for status code(s): 404 response == { - "database": { - "engine": "str", # A slug representing the database engine used for - the cluster. The possible values are: "pg" for PostgreSQL, "mysql" for MySQL, - "redis" for Caching, "mongodb" for MongoDB, "kafka" for Kafka, "opensearch" - for OpenSearch, and "valkey" for Valkey. Required. Known values are: "pg", - "mysql", "redis", "valkey", "mongodb", "kafka", and "opensearch". - "name": "str", # A unique, human-readable name referring to a - database cluster. Required. - "num_nodes": 0, # The number of nodes in the database cluster. - Required. - "region": "str", # The slug identifier for the region where the - database cluster is located. Required. - "size": "str", # The slug identifier representing the size of the - nodes in the database cluster. Required. - "connection": { - "database": "str", # Optional. The name of the default - database. - "host": "str", # Optional. The FQDN pointing to the database - cluster's current primary node. - "password": "str", # Optional. The randomly generated - password for the default user.:code:`
`:code:`
`Requires - ``database:view_credentials`` scope. - "port": 0, # Optional. The port on which the database - cluster is listening. - "ssl": bool, # Optional. A boolean value indicating if the - connection should be made over SSL. - "uri": "str", # Optional. A connection string in the format - accepted by the ``psql`` command. This is provided as a convenience and - should be able to be constructed by the other attributes. - "user": "str" # Optional. The default user for the - database.:code:`
`:code:`
`Requires ``database:view_credentials`` - scope. - }, - "created_at": "2020-02-20 00:00:00", # Optional. A time value given - in ISO8601 combined date and time format that represents when the database - cluster was created. - "db_names": [ - "str" # Optional. An array of strings containing the names - of databases created in the database cluster. - ], - "do_settings": { - "service_cnames": [ - "str" # Optional. An array of custom CNAMEs for the - database cluster. Each CNAME must be a valid RFC 1123 hostname (e.g., - "db.example.com"). Maximum of 16 CNAMEs allowed, each up to 253 - characters. - ] - }, - "id": "str", # Optional. A unique ID that can be used to identify - and reference a database cluster. - "maintenance_window": { - "day": "str", # The day of the week on which to apply - maintenance updates. Required. - "hour": "str", # The hour in UTC at which maintenance - updates will be applied in 24 hour format. Required. - "description": [ - "str" # Optional. A list of strings, each containing - information about a pending maintenance update. - ], - "pending": bool # Optional. A boolean value indicating - whether any maintenance is scheduled to be performed in the next window. - }, - "metrics_endpoints": [ - { - "host": "str", # Optional. A FQDN pointing to the - database cluster's node(s). - "port": 0 # Optional. The port on which a service is - listening. - } - ], - "private_connection": { - "database": "str", # Optional. The name of the default - database. - "host": "str", # Optional. The FQDN pointing to the database - cluster's current primary node. - "password": "str", # Optional. The randomly generated - password for the default user.:code:`
`:code:`
`Requires - ``database:view_credentials`` scope. - "port": 0, # Optional. The port on which the database - cluster is listening. - "ssl": bool, # Optional. A boolean value indicating if the - connection should be made over SSL. - "uri": "str", # Optional. A connection string in the format - accepted by the ``psql`` command. This is provided as a convenience and - should be able to be constructed by the other attributes. - "user": "str" # Optional. The default user for the - database.:code:`
`:code:`
`Requires ``database:view_credentials`` - scope. - }, - "private_network_uuid": "str", # Optional. A string specifying the - UUID of the VPC to which the database cluster will be assigned. If excluded, - the cluster when creating a new database cluster, it will be assigned to your - account's default VPC for the region. :code:`
`:code:`
`Requires - ``vpc:read`` scope. - "project_id": "str", # Optional. The ID of the project that the - database cluster is assigned to. If excluded when creating a new database - cluster, it will be assigned to your default - project.:code:`
`:code:`
`Requires ``project:read`` scope. - "rules": [ - { - "type": "str", # The type of resource that the - firewall rule allows to access the database cluster. Required. Known - values are: "droplet", "k8s", "ip_addr", "tag", and "app". - "value": "str", # The ID of the specific resource, - the name of a tag applied to a group of resources, or the IP address - that the firewall rule allows to access the database cluster. - Required. - "cluster_uuid": "str", # Optional. A unique ID for - the database cluster to which the rule is applied. - "created_at": "2020-02-20 00:00:00", # Optional. A - time value given in ISO8601 combined date and time format that - represents when the firewall rule was created. - "description": "str", # Optional. A human-readable - description of the rule. - "uuid": "str" # Optional. A unique ID for the - firewall rule itself. - } - ], - "schema_registry_connection": { - "host": "str", # Optional. The FQDN pointing to the schema - registry connection uri. - "password": "str", # Optional. The randomly generated - password for the schema registry.:code:`
`:code:`
`Requires - ``database:view_credentials`` scope. - "port": 0, # Optional. The port on which the schema registry - is listening. - "ssl": bool, # Optional. A boolean value indicating if the - connection should be made over SSL. - "uri": "str", # Optional. This is provided as a convenience - and should be able to be constructed by the other attributes. - "user": "str" # Optional. The default user for the schema - registry.:code:`
`:code:`
`Requires ``database:view_credentials`` - scope. - }, - "semantic_version": "str", # Optional. A string representing the - semantic version of the database engine in use for the cluster. - "standby_connection": { - "database": "str", # Optional. The name of the default - database. - "host": "str", # Optional. The FQDN pointing to the database - cluster's current primary node. - "password": "str", # Optional. The randomly generated - password for the default user.:code:`
`:code:`
`Requires - ``database:view_credentials`` scope. - "port": 0, # Optional. The port on which the database - cluster is listening. - "ssl": bool, # Optional. A boolean value indicating if the - connection should be made over SSL. - "uri": "str", # Optional. A connection string in the format - accepted by the ``psql`` command. This is provided as a convenience and - should be able to be constructed by the other attributes. - "user": "str" # Optional. The default user for the - database.:code:`
`:code:`
`Requires ``database:view_credentials`` - scope. - }, - "standby_private_connection": { - "database": "str", # Optional. The name of the default - database. - "host": "str", # Optional. The FQDN pointing to the database - cluster's current primary node. - "password": "str", # Optional. The randomly generated - password for the default user.:code:`
`:code:`
`Requires - ``database:view_credentials`` scope. - "port": 0, # Optional. The port on which the database - cluster is listening. - "ssl": bool, # Optional. A boolean value indicating if the - connection should be made over SSL. - "uri": "str", # Optional. A connection string in the format - accepted by the ``psql`` command. This is provided as a convenience and - should be able to be constructed by the other attributes. - "user": "str" # Optional. The default user for the - database.:code:`
`:code:`
`Requires ``database:view_credentials`` - scope. - }, - "status": "str", # Optional. A string representing the current - status of the database cluster. Known values are: "creating", "online", - "resizing", "migrating", and "forking". - "storage_size_mib": 0, # Optional. Additional storage added to the - cluster, in MiB. If null, no additional storage is added to the cluster, - beyond what is provided as a base amount from the 'size' and any previously - added additional storage. - "tags": [ - "str" # Optional. An array of tags that have been applied to - the database cluster. :code:`
`:code:`
`Requires ``tag:read`` - scope. - ], - "ui_connection": { - "host": "str", # Optional. The FQDN pointing to the - opensearch cluster's current primary node. - "password": "str", # Optional. The randomly generated - password for the default user.:code:`
`:code:`
`Requires - ``database:view_credentials`` scope. - "port": 0, # Optional. The port on which the opensearch - dashboard is listening. - "ssl": bool, # Optional. A boolean value indicating if the - connection should be made over SSL. - "uri": "str", # Optional. This is provided as a convenience - and should be able to be constructed by the other attributes. - "user": "str" # Optional. The default user for the - opensearch dashboard.:code:`
`:code:`
`Requires - ``database:view_credentials`` scope. - }, - "users": [ - { - "name": "str", # The name of a database user. - Required. - "access_cert": "str", # Optional. Access certificate - for TLS client authentication. (Kafka only). - "access_key": "str", # Optional. Access key for TLS - client authentication. (Kafka only). - "mysql_settings": { - "auth_plugin": "str" # A string specifying - the authentication method to be used for connections to the MySQL - user account. The valid values are ``mysql_native_password`` or - ``caching_sha2_password``. If excluded when creating a new user, - the default for the version of MySQL in use will be used. As of - MySQL 8.0, the default is ``caching_sha2_password``. Required. - Known values are: "mysql_native_password" and - "caching_sha2_password". - }, - "password": "str", # Optional. A randomly generated - password for the database user.:code:`
`Requires - ``database:view_credentials`` scope. - "role": "str", # Optional. A string representing the - database user's role. The value will be either "primary" or "normal". - Known values are: "primary" and "normal". - "settings": { - "acl": [ - { - "permission": "str", # - Permission set applied to the ACL. 'consume' allows for - messages to be consumed from the topic. 'produce' allows - for messages to be published to the topic. - 'produceconsume' allows for both 'consume' and 'produce' - permission. 'admin' allows for 'produceconsume' as well - as any operations to administer the topic (delete, - update). Required. Known values are: "admin", "consume", - "produce", and "produceconsume". - "topic": "str", # A regex - for matching the topic(s) that this ACL should apply to. - Required. - "id": "str" # Optional. An - identifier for the ACL. Will be computed after the ACL is - created/updated. - } - ], - "mongo_user_settings": { - "databases": [ - "str" # Optional. A list of - databases to which the user should have access. When the - database is set to ``admin``"" , the user will have - access to all databases based on the user's role i.e. a - user with the role ``readOnly`` assigned to the ``admin`` - database will have read access to all databases. - ], - "role": "str" # Optional. The role - to assign to the user with each role mapping to a MongoDB - built-in role. ``readOnly`` maps to a `read - `_ - role. ``readWrite`` maps to a `readWrite - `_ - role. ``dbAdmin`` maps to a `dbAdmin - `_ - role. Known values are: "readOnly", "readWrite", and - "dbAdmin". - }, - "opensearch_acl": [ - { - "index": "str", # Optional. - A regex for matching the indexes that this ACL should - apply to. - "permission": "str" # - Optional. Permission set applied to the ACL. 'read' - allows user to read from the index. 'write' allows for - user to write to the index. 'readwrite' allows for both - 'read' and 'write' permission. 'deny'(default) restricts - user from performing any operation over an index. 'admin' - allows for 'readwrite' as well as any operations to - administer the index. Known values are: "deny", "admin", - "read", "readwrite", and "write". - } - ], - "pg_allow_replication": bool # Optional. For - Postgres clusters, set to ``true`` for a user with replication - rights. This option is not currently supported for other database - engines. - } - } - ], - "version": "str", # Optional. A string representing the version of - the database engine in use for the cluster. - "version_end_of_availability": "str", # Optional. A timestamp - referring to the date when the particular version will no longer be available - for creating new clusters. If null, the version does not have an end of - availability timeline. - "version_end_of_life": "str" # Optional. A timestamp referring to - the date when the particular version will no longer be supported. If null, - the version does not have an end of life timeline. - } + "id": "str", # A short identifier corresponding to the HTTP status code + returned. For example, the ID for a response returning a 404 status code would + be "not_found.". Required. + "message": "str", # A message providing additional information about the + error, including details to help resolve it when possible. Required. + "request_id": "str" # Optional. Optionally, some endpoints may include a + request ID that should be provided when reporting bugs or opening support + tickets to help identify the issue. + } + """ + + @distributed_trace_async + async def update_cluster_size( + self, database_cluster_uuid: str, body: Union[JSON, IO[bytes]], **kwargs: Any + ) -> Optional[JSON]: + # pylint: disable=line-too-long + """Resize a Database Cluster. + + To resize a database cluster, send a PUT request to ``/v2/databases/$DATABASE_ID/resize``. The + body of the request must specify both the size and num_nodes attributes. + A successful request will receive a 202 Accepted status code with no body in response. Querying + the database cluster will show that its status attribute will now be set to resizing. This will + transition back to online when the resize operation has completed. + + :param database_cluster_uuid: A unique identifier for a database cluster. Required. + :type database_cluster_uuid: str + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :return: JSON object or None + :rtype: JSON or None + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "num_nodes": 0, # The number of nodes in the database cluster. Valid values + are are 1-3. In addition to the primary node, up to two standby nodes may be + added for highly available configurations. Required. + "size": "str", # A slug identifier representing desired the size of the + nodes in the database cluster. Required. + "storage_size_mib": 0 # Optional. Additional storage added to the cluster, + in MiB. If null, no additional storage is added to the cluster, beyond what is + provided as a base amount from the 'size' and any previously added additional + storage. } + # response body for status code(s): 404 response == { "id": "str", # A short identifier corresponding to the HTTP status code @@ -107401,13 +114283,27 @@ async def get_cluster(self, database_cluster_uuid: str, **kwargs: Any) -> JSON: } error_map.update(kwargs.pop("error_map", {}) or {}) - _headers = kwargs.pop("headers", {}) or {} + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = kwargs.pop("params", {}) or {} - cls: ClsType[JSON] = kwargs.pop("cls", None) + content_type: Optional[str] = kwargs.pop( + "content_type", _headers.pop("Content-Type", None) + ) + cls: ClsType[Optional[JSON]] = kwargs.pop("cls", None) - _request = build_databases_get_cluster_request( + content_type = content_type or "application/json" + _json = None + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _json = body + + _request = build_databases_update_cluster_size_request( database_cluster_uuid=database_cluster_uuid, + content_type=content_type, + json=_json, + content=_content, headers=_headers, params=_params, ) @@ -107422,14 +114318,15 @@ async def get_cluster(self, database_cluster_uuid: str, **kwargs: Any) -> JSON: response = pipeline_response.http_response - if response.status_code not in [200, 404]: + if response.status_code not in [202, 404]: if _stream: await response.read() # Load the body in memory and close the socket map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore raise HttpResponseError(response=response) + deserialized = None response_headers = {} - if response.status_code == 200: + if response.status_code == 202: response_headers["ratelimit-limit"] = self._deserialize( "int", response.headers.get("ratelimit-limit") ) @@ -107440,11 +114337,6 @@ async def get_cluster(self, database_cluster_uuid: str, **kwargs: Any) -> JSON: "int", response.headers.get("ratelimit-reset") ) - if response.content: - deserialized = response.json() - else: - deserialized = None - if response.status_code == 404: response_headers["ratelimit-limit"] = self._deserialize( "int", response.headers.get("ratelimit-limit") @@ -107462,30 +114354,52 @@ async def get_cluster(self, database_cluster_uuid: str, **kwargs: Any) -> JSON: deserialized = None if cls: - return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore + return cls(pipeline_response, deserialized, response_headers) # type: ignore - return cast(JSON, deserialized) # type: ignore + return deserialized # type: ignore @distributed_trace_async - async def destroy_cluster( + async def list_firewall_rules( self, database_cluster_uuid: str, **kwargs: Any - ) -> Optional[JSON]: + ) -> JSON: # pylint: disable=line-too-long - """Destroy a Database Cluster. + """List Firewall Rules (Trusted Sources) for a Database Cluster. - To destroy a specific database, send a DELETE request to ``/v2/databases/$DATABASE_ID``. - A status of 204 will be given. This indicates that the request was processed successfully, but - that no response body is needed. + To list all of a database cluster's firewall rules (known as "trusted sources" in the control + panel), send a GET request to ``/v2/databases/$DATABASE_ID/firewall``. + The result will be a JSON object with a ``rules`` key. :param database_cluster_uuid: A unique identifier for a database cluster. Required. :type database_cluster_uuid: str - :return: JSON object or None - :rtype: JSON or None + :return: JSON object + :rtype: JSON :raises ~azure.core.exceptions.HttpResponseError: Example: .. code-block:: python + # response body for status code(s): 200 + response == { + "rules": [ + { + "type": "str", # The type of resource that the firewall rule + allows to access the database cluster. Required. Known values are: + "droplet", "k8s", "ip_addr", "tag", and "app". + "value": "str", # The ID of the specific resource, the name + of a tag applied to a group of resources, or the IP address that the + firewall rule allows to access the database cluster. Required. + "cluster_uuid": "str", # Optional. A unique ID for the + database cluster to which the rule is applied. + "created_at": "2020-02-20 00:00:00", # Optional. A time + value given in ISO8601 combined date and time format that represents when + the firewall rule was created. + "description": "str", # Optional. A human-readable + description of the rule. + "uuid": "str" # Optional. A unique ID for the firewall rule + itself. + } + ] + } # response body for status code(s): 404 response == { "id": "str", # A short identifier corresponding to the HTTP status code @@ -107514,9 +114428,9 @@ async def destroy_cluster( _headers = kwargs.pop("headers", {}) or {} _params = kwargs.pop("params", {}) or {} - cls: ClsType[Optional[JSON]] = kwargs.pop("cls", None) + cls: ClsType[JSON] = kwargs.pop("cls", None) - _request = build_databases_destroy_cluster_request( + _request = build_databases_list_firewall_rules_request( database_cluster_uuid=database_cluster_uuid, headers=_headers, params=_params, @@ -107532,15 +114446,14 @@ async def destroy_cluster( response = pipeline_response.http_response - if response.status_code not in [204, 404]: + if response.status_code not in [200, 404]: if _stream: await response.read() # Load the body in memory and close the socket map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore raise HttpResponseError(response=response) - deserialized = None response_headers = {} - if response.status_code == 204: + if response.status_code == 200: response_headers["ratelimit-limit"] = self._deserialize( "int", response.headers.get("ratelimit-limit") ) @@ -107551,6 +114464,11 @@ async def destroy_cluster( "int", response.headers.get("ratelimit-reset") ) + if response.content: + deserialized = response.json() + else: + deserialized = None + if response.status_code == 404: response_headers["ratelimit-limit"] = self._deserialize( "int", response.headers.get("ratelimit-limit") @@ -107568,33 +114486,182 @@ async def destroy_cluster( deserialized = None if cls: - return cls(pipeline_response, deserialized, response_headers) # type: ignore + return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore - return deserialized # type: ignore + return cast(JSON, deserialized) # type: ignore - @distributed_trace_async - async def get_config(self, database_cluster_uuid: str, **kwargs: Any) -> JSON: + @overload + async def update_firewall_rules( + self, + database_cluster_uuid: str, + body: JSON, + *, + content_type: str = "application/json", + **kwargs: Any + ) -> Optional[JSON]: # pylint: disable=line-too-long - """Retrieve an Existing Database Cluster Configuration. + """Update Firewall Rules (Trusted Sources) for a Database. - Shows configuration parameters for an existing database cluster by sending a GET request to - ``/v2/databases/$DATABASE_ID/config``. - The response is a JSON object with a ``config`` key, which is set to an object - containing any database configuration parameters. + To update a database cluster's firewall rules (known as "trusted sources" in the control + panel), send a PUT request to ``/v2/databases/$DATABASE_ID/firewall`` specifying which + resources should be able to open connections to the database. You may limit connections to + specific Droplets, Kubernetes clusters, or IP addresses. When a tag is provided, any Droplet or + Kubernetes node with that tag applied to it will have access. The firewall is limited to 100 + rules (or trusted sources). When possible, we recommend `placing your databases into a VPC + network `_ to limit access to them + instead of using a firewall. + A successful. :param database_cluster_uuid: A unique identifier for a database cluster. Required. :type database_cluster_uuid: str - :return: JSON object - :rtype: JSON + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: JSON object or None + :rtype: JSON or None :raises ~azure.core.exceptions.HttpResponseError: Example: .. code-block:: python - # response body for status code(s): 200 + # JSON input template you can fill out and use as your body input. + body = { + "rules": [ + { + "type": "str", # The type of resource that the firewall rule + allows to access the database cluster. Required. Known values are: + "droplet", "k8s", "ip_addr", "tag", and "app". + "value": "str", # The ID of the specific resource, the name + of a tag applied to a group of resources, or the IP address that the + firewall rule allows to access the database cluster. Required. + "cluster_uuid": "str", # Optional. A unique ID for the + database cluster to which the rule is applied. + "created_at": "2020-02-20 00:00:00", # Optional. A time + value given in ISO8601 combined date and time format that represents when + the firewall rule was created. + "description": "str", # Optional. A human-readable + description of the rule. + "uuid": "str" # Optional. A unique ID for the firewall rule + itself. + } + ] + } + + # response body for status code(s): 404 response == { - "config": {} + "id": "str", # A short identifier corresponding to the HTTP status code + returned. For example, the ID for a response returning a 404 status code would + be "not_found.". Required. + "message": "str", # A message providing additional information about the + error, including details to help resolve it when possible. Required. + "request_id": "str" # Optional. Optionally, some endpoints may include a + request ID that should be provided when reporting bugs or opening support + tickets to help identify the issue. + } + """ + + @overload + async def update_firewall_rules( + self, + database_cluster_uuid: str, + body: IO[bytes], + *, + content_type: str = "application/json", + **kwargs: Any + ) -> Optional[JSON]: + # pylint: disable=line-too-long + """Update Firewall Rules (Trusted Sources) for a Database. + + To update a database cluster's firewall rules (known as "trusted sources" in the control + panel), send a PUT request to ``/v2/databases/$DATABASE_ID/firewall`` specifying which + resources should be able to open connections to the database. You may limit connections to + specific Droplets, Kubernetes clusters, or IP addresses. When a tag is provided, any Droplet or + Kubernetes node with that tag applied to it will have access. The firewall is limited to 100 + rules (or trusted sources). When possible, we recommend `placing your databases into a VPC + network `_ to limit access to them + instead of using a firewall. + A successful. + + :param database_cluster_uuid: A unique identifier for a database cluster. Required. + :type database_cluster_uuid: str + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: JSON object or None + :rtype: JSON or None + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 404 + response == { + "id": "str", # A short identifier corresponding to the HTTP status code + returned. For example, the ID for a response returning a 404 status code would + be "not_found.". Required. + "message": "str", # A message providing additional information about the + error, including details to help resolve it when possible. Required. + "request_id": "str" # Optional. Optionally, some endpoints may include a + request ID that should be provided when reporting bugs or opening support + tickets to help identify the issue. + } + """ + + @distributed_trace_async + async def update_firewall_rules( + self, database_cluster_uuid: str, body: Union[JSON, IO[bytes]], **kwargs: Any + ) -> Optional[JSON]: + # pylint: disable=line-too-long + """Update Firewall Rules (Trusted Sources) for a Database. + + To update a database cluster's firewall rules (known as "trusted sources" in the control + panel), send a PUT request to ``/v2/databases/$DATABASE_ID/firewall`` specifying which + resources should be able to open connections to the database. You may limit connections to + specific Droplets, Kubernetes clusters, or IP addresses. When a tag is provided, any Droplet or + Kubernetes node with that tag applied to it will have access. The firewall is limited to 100 + rules (or trusted sources). When possible, we recommend `placing your databases into a VPC + network `_ to limit access to them + instead of using a firewall. + A successful. + + :param database_cluster_uuid: A unique identifier for a database cluster. Required. + :type database_cluster_uuid: str + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :return: JSON object or None + :rtype: JSON or None + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "rules": [ + { + "type": "str", # The type of resource that the firewall rule + allows to access the database cluster. Required. Known values are: + "droplet", "k8s", "ip_addr", "tag", and "app". + "value": "str", # The ID of the specific resource, the name + of a tag applied to a group of resources, or the IP address that the + firewall rule allows to access the database cluster. Required. + "cluster_uuid": "str", # Optional. A unique ID for the + database cluster to which the rule is applied. + "created_at": "2020-02-20 00:00:00", # Optional. A time + value given in ISO8601 combined date and time format that represents when + the firewall rule was created. + "description": "str", # Optional. A human-readable + description of the rule. + "uuid": "str" # Optional. A unique ID for the firewall rule + itself. + } + ] } + # response body for status code(s): 404 response == { "id": "str", # A short identifier corresponding to the HTTP status code @@ -107620,13 +114687,27 @@ async def get_config(self, database_cluster_uuid: str, **kwargs: Any) -> JSON: } error_map.update(kwargs.pop("error_map", {}) or {}) - _headers = kwargs.pop("headers", {}) or {} + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = kwargs.pop("params", {}) or {} - cls: ClsType[JSON] = kwargs.pop("cls", None) + content_type: Optional[str] = kwargs.pop( + "content_type", _headers.pop("Content-Type", None) + ) + cls: ClsType[Optional[JSON]] = kwargs.pop("cls", None) - _request = build_databases_get_config_request( + content_type = content_type or "application/json" + _json = None + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _json = body + + _request = build_databases_update_firewall_rules_request( database_cluster_uuid=database_cluster_uuid, + content_type=content_type, + json=_json, + content=_content, headers=_headers, params=_params, ) @@ -107641,14 +114722,15 @@ async def get_config(self, database_cluster_uuid: str, **kwargs: Any) -> JSON: response = pipeline_response.http_response - if response.status_code not in [200, 404]: + if response.status_code not in [204, 404]: if _stream: await response.read() # Load the body in memory and close the socket map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore raise HttpResponseError(response=response) + deserialized = None response_headers = {} - if response.status_code == 200: + if response.status_code == 204: response_headers["ratelimit-limit"] = self._deserialize( "int", response.headers.get("ratelimit-limit") ) @@ -107659,11 +114741,6 @@ async def get_config(self, database_cluster_uuid: str, **kwargs: Any) -> JSON: "int", response.headers.get("ratelimit-reset") ) - if response.content: - deserialized = response.json() - else: - deserialized = None - if response.status_code == 404: response_headers["ratelimit-limit"] = self._deserialize( "int", response.headers.get("ratelimit-limit") @@ -107681,12 +114758,12 @@ async def get_config(self, database_cluster_uuid: str, **kwargs: Any) -> JSON: deserialized = None if cls: - return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore + return cls(pipeline_response, deserialized, response_headers) # type: ignore - return cast(JSON, deserialized) # type: ignore + return deserialized # type: ignore @overload - async def patch_config( + async def update_maintenance_window( self, database_cluster_uuid: str, body: JSON, @@ -107695,10 +114772,11 @@ async def patch_config( **kwargs: Any ) -> Optional[JSON]: # pylint: disable=line-too-long - """Update the Database Configuration for an Existing Database. + """Configure a Database Cluster's Maintenance Window. - To update the configuration for an existing database cluster, send a PATCH request to - ``/v2/databases/$DATABASE_ID/config``. + To configure the window when automatic maintenance should be performed for a database cluster, + send a PUT request to ``/v2/databases/$DATABASE_ID/maintenance``. + A successful request will receive a 204 No Content status code with no body in response. :param database_cluster_uuid: A unique identifier for a database cluster. Required. :type database_cluster_uuid: str @@ -107716,7 +114794,16 @@ async def patch_config( # JSON input template you can fill out and use as your body input. body = { - "config": {} + "day": "str", # The day of the week on which to apply maintenance updates. + Required. + "hour": "str", # The hour in UTC at which maintenance updates will be + applied in 24 hour format. Required. + "description": [ + "str" # Optional. A list of strings, each containing information + about a pending maintenance update. + ], + "pending": bool # Optional. A boolean value indicating whether any + maintenance is scheduled to be performed in the next window. } # response body for status code(s): 404 @@ -107733,7 +114820,7 @@ async def patch_config( """ @overload - async def patch_config( + async def update_maintenance_window( self, database_cluster_uuid: str, body: IO[bytes], @@ -107742,10 +114829,11 @@ async def patch_config( **kwargs: Any ) -> Optional[JSON]: # pylint: disable=line-too-long - """Update the Database Configuration for an Existing Database. + """Configure a Database Cluster's Maintenance Window. - To update the configuration for an existing database cluster, send a PATCH request to - ``/v2/databases/$DATABASE_ID/config``. + To configure the window when automatic maintenance should be performed for a database cluster, + send a PUT request to ``/v2/databases/$DATABASE_ID/maintenance``. + A successful request will receive a 204 No Content status code with no body in response. :param database_cluster_uuid: A unique identifier for a database cluster. Required. :type database_cluster_uuid: str @@ -107758,36 +114846,171 @@ async def patch_config( :rtype: JSON or None :raises ~azure.core.exceptions.HttpResponseError: - Example: - .. code-block:: python + Example: + .. code-block:: python + + # response body for status code(s): 404 + response == { + "id": "str", # A short identifier corresponding to the HTTP status code + returned. For example, the ID for a response returning a 404 status code would + be "not_found.". Required. + "message": "str", # A message providing additional information about the + error, including details to help resolve it when possible. Required. + "request_id": "str" # Optional. Optionally, some endpoints may include a + request ID that should be provided when reporting bugs or opening support + tickets to help identify the issue. + } + """ + + @distributed_trace_async + async def update_maintenance_window( + self, database_cluster_uuid: str, body: Union[JSON, IO[bytes]], **kwargs: Any + ) -> Optional[JSON]: + # pylint: disable=line-too-long + """Configure a Database Cluster's Maintenance Window. + + To configure the window when automatic maintenance should be performed for a database cluster, + send a PUT request to ``/v2/databases/$DATABASE_ID/maintenance``. + A successful request will receive a 204 No Content status code with no body in response. + + :param database_cluster_uuid: A unique identifier for a database cluster. Required. + :type database_cluster_uuid: str + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :return: JSON object or None + :rtype: JSON or None + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "day": "str", # The day of the week on which to apply maintenance updates. + Required. + "hour": "str", # The hour in UTC at which maintenance updates will be + applied in 24 hour format. Required. + "description": [ + "str" # Optional. A list of strings, each containing information + about a pending maintenance update. + ], + "pending": bool # Optional. A boolean value indicating whether any + maintenance is scheduled to be performed in the next window. + } + + # response body for status code(s): 404 + response == { + "id": "str", # A short identifier corresponding to the HTTP status code + returned. For example, the ID for a response returning a 404 status code would + be "not_found.". Required. + "message": "str", # A message providing additional information about the + error, including details to help resolve it when possible. Required. + "request_id": "str" # Optional. Optionally, some endpoints may include a + request ID that should be provided when reporting bugs or opening support + tickets to help identify the issue. + } + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + 401: cast( + Type[HttpResponseError], + lambda response: ClientAuthenticationError(response=response), + ), + 429: HttpResponseError, + 500: HttpResponseError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop( + "content_type", _headers.pop("Content-Type", None) + ) + cls: ClsType[Optional[JSON]] = kwargs.pop("cls", None) + + content_type = content_type or "application/json" + _json = None + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _json = body + + _request = build_databases_update_maintenance_window_request( + database_cluster_uuid=database_cluster_uuid, + content_type=content_type, + json=_json, + content=_content, + headers=_headers, + params=_params, + ) + _request.url = self._client.format_url(_request.url) + + _stream = False + pipeline_response: PipelineResponse = ( + await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + ) + + response = pipeline_response.http_response + + if response.status_code not in [204, 404]: + if _stream: + await response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore + raise HttpResponseError(response=response) + + deserialized = None + response_headers = {} + if response.status_code == 204: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.status_code == 404: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.content: + deserialized = response.json() + else: + deserialized = None + + if cls: + return cls(pipeline_response, deserialized, response_headers) # type: ignore - # response body for status code(s): 404 - response == { - "id": "str", # A short identifier corresponding to the HTTP status code - returned. For example, the ID for a response returning a 404 status code would - be "not_found.". Required. - "message": "str", # A message providing additional information about the - error, including details to help resolve it when possible. Required. - "request_id": "str" # Optional. Optionally, some endpoints may include a - request ID that should be provided when reporting bugs or opening support - tickets to help identify the issue. - } - """ + return deserialized # type: ignore @distributed_trace_async - async def patch_config( - self, database_cluster_uuid: str, body: Union[JSON, IO[bytes]], **kwargs: Any + async def install_update( + self, database_cluster_uuid: str, **kwargs: Any ) -> Optional[JSON]: # pylint: disable=line-too-long - """Update the Database Configuration for an Existing Database. + """Start Database Maintenance. - To update the configuration for an existing database cluster, send a PATCH request to - ``/v2/databases/$DATABASE_ID/config``. + To start the installation of updates for a database cluster, send a PUT request to + ``/v2/databases/$DATABASE_ID/install_update``. + A successful request will receive a 204 No Content status code with no body in response. :param database_cluster_uuid: A unique identifier for a database cluster. Required. :type database_cluster_uuid: str - :param body: Is either a JSON type or a IO[bytes] type. Required. - :type body: JSON or IO[bytes] :return: JSON object or None :rtype: JSON or None :raises ~azure.core.exceptions.HttpResponseError: @@ -107795,11 +115018,6 @@ async def patch_config( Example: .. code-block:: python - # JSON input template you can fill out and use as your body input. - body = { - "config": {} - } - # response body for status code(s): 404 response == { "id": "str", # A short identifier corresponding to the HTTP status code @@ -107825,27 +115043,13 @@ async def patch_config( } error_map.update(kwargs.pop("error_map", {}) or {}) - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _headers = kwargs.pop("headers", {}) or {} _params = kwargs.pop("params", {}) or {} - content_type: Optional[str] = kwargs.pop( - "content_type", _headers.pop("Content-Type", None) - ) cls: ClsType[Optional[JSON]] = kwargs.pop("cls", None) - content_type = content_type or "application/json" - _json = None - _content = None - if isinstance(body, (IOBase, bytes)): - _content = body - else: - _json = body - - _request = build_databases_patch_config_request( + _request = build_databases_install_update_request( database_cluster_uuid=database_cluster_uuid, - content_type=content_type, - json=_json, - content=_content, headers=_headers, params=_params, ) @@ -107860,7 +115064,7 @@ async def patch_config( response = pipeline_response.http_response - if response.status_code not in [200, 404]: + if response.status_code not in [204, 404]: if _stream: await response.read() # Load the body in memory and close the socket map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore @@ -107868,7 +115072,7 @@ async def patch_config( deserialized = None response_headers = {} - if response.status_code == 200: + if response.status_code == 204: response_headers["ratelimit-limit"] = self._deserialize( "int", response.headers.get("ratelimit-limit") ) @@ -107901,16 +115105,16 @@ async def patch_config( return deserialized # type: ignore @distributed_trace_async - async def get_ca(self, database_cluster_uuid: str, **kwargs: Any) -> JSON: + async def list_backups(self, database_cluster_uuid: str, **kwargs: Any) -> JSON: # pylint: disable=line-too-long - """Retrieve the Public Certificate. - - To retrieve the public certificate used to secure the connection to the database cluster send a - GET request to - ``/v2/databases/$DATABASE_ID/ca``. + """List Backups for a Database Cluster. - The response will be a JSON object with a ``ca`` key. This will be set to an object - containing the base64 encoding of the public key certificate. + To list all of the available backups of a PostgreSQL or MySQL database cluster, send a GET + request to ``/v2/databases/$DATABASE_ID/backups``. + **Note**\\ : Backups are not supported for Caching or Valkey clusters. + The result will be a JSON object with a ``backups key``. This will be set to an array of backup + objects, each of which will contain the size of the backup and the timestamp at which it was + created. :param database_cluster_uuid: A unique identifier for a database cluster. Required. :type database_cluster_uuid: str @@ -107923,9 +115127,27 @@ async def get_ca(self, database_cluster_uuid: str, **kwargs: Any) -> JSON: # response body for status code(s): 200 response == { - "ca": { - "certificate": "str" # base64 encoding of the certificate used to - secure database connections. Required. + "backups": [ + { + "created_at": "2020-02-20 00:00:00", # A time value given in + ISO8601 combined date and time format at which the backup was created. + Required. + "size_gigabytes": 0.0, # The size of the database backup in + GBs. Required. + "incremental": bool # Optional. Indicates if this backup is + a full or an incremental one (available only for MySQL). + } + ], + "backup_progress": "str", # Optional. If a backup is currently in progress, + this attribute shows the percentage of completion. If no backup is in progress, + this attribute will be hidden. + "scheduled_backup_time": { + "backup_hour": 0, # Optional. The hour of the day when the backup is + scheduled (in UTC). + "backup_interval_hours": 0, # Optional. The frequency, in hours, at + which backups are taken. + "backup_minute": 0 # Optional. The minute of the hour when the + backup is scheduled. } } # response body for status code(s): 404 @@ -107958,7 +115180,7 @@ async def get_ca(self, database_cluster_uuid: str, **kwargs: Any) -> JSON: cls: ClsType[JSON] = kwargs.pop("cls", None) - _request = build_databases_get_ca_request( + _request = build_databases_list_backups_request( database_cluster_uuid=database_cluster_uuid, headers=_headers, params=_params, @@ -108019,14 +115241,17 @@ async def get_ca(self, database_cluster_uuid: str, **kwargs: Any) -> JSON: return cast(JSON, deserialized) # type: ignore @distributed_trace_async - async def get_migration_status( - self, database_cluster_uuid: str, **kwargs: Any - ) -> JSON: + async def list_replicas(self, database_cluster_uuid: str, **kwargs: Any) -> JSON: # pylint: disable=line-too-long - """Retrieve the Status of an Online Migration. + """List All Read-only Replicas. - To retrieve the status of the most recent online migration, send a GET request to - ``/v2/databases/$DATABASE_ID/online-migration``. + To list all of the read-only replicas associated with a database cluster, send a GET request to + ``/v2/databases/$DATABASE_ID/replicas``. + + **Note**\\ : Read-only replicas are not supported for Caching or Valkey clusters. + + The result will be a JSON object with a ``replicas`` key. This will be set to an array of + database replica objects, each of which will contain the standard database replica attributes. :param database_cluster_uuid: A unique identifier for a database cluster. Required. :type database_cluster_uuid: str @@ -108039,11 +115264,91 @@ async def get_migration_status( # response body for status code(s): 200 response == { - "created_at": "str", # Optional. The time the migration was initiated, in - ISO 8601 format. - "id": "str", # Optional. The ID of the most recent migration. - "status": "str" # Optional. The current status of the migration. Known - values are: "running", "syncing", "canceled", "error", and "done". + "replicas": [ + { + "name": "str", # The name to give the read-only replicating. + Required. + "connection": { + "database": "str", # Optional. The name of the + default database. + "host": "str", # Optional. The FQDN pointing to the + database cluster's current primary node. + "password": "str", # Optional. The randomly + generated password for the default + user.:code:`
`:code:`
`Requires ``database:view_credentials`` + scope. + "port": 0, # Optional. The port on which the + database cluster is listening. + "ssl": bool, # Optional. A boolean value indicating + if the connection should be made over SSL. + "uri": "str", # Optional. A connection string in the + format accepted by the ``psql`` command. This is provided as a + convenience and should be able to be constructed by the other + attributes. + "user": "str" # Optional. The default user for the + database.:code:`
`:code:`
`Requires + ``database:view_credentials`` scope. + }, + "created_at": "2020-02-20 00:00:00", # Optional. A time + value given in ISO8601 combined date and time format that represents when + the database cluster was created. + "do_settings": { + "service_cnames": [ + "str" # Optional. An array of custom CNAMEs + for the database cluster. Each CNAME must be a valid RFC 1123 + hostname (e.g., "db.example.com"). Maximum of 16 CNAMEs allowed, + each up to 253 characters. + ] + }, + "id": "str", # Optional. A unique ID that can be used to + identify and reference a database replica. + "private_connection": { + "database": "str", # Optional. The name of the + default database. + "host": "str", # Optional. The FQDN pointing to the + database cluster's current primary node. + "password": "str", # Optional. The randomly + generated password for the default + user.:code:`
`:code:`
`Requires ``database:view_credentials`` + scope. + "port": 0, # Optional. The port on which the + database cluster is listening. + "ssl": bool, # Optional. A boolean value indicating + if the connection should be made over SSL. + "uri": "str", # Optional. A connection string in the + format accepted by the ``psql`` command. This is provided as a + convenience and should be able to be constructed by the other + attributes. + "user": "str" # Optional. The default user for the + database.:code:`
`:code:`
`Requires + ``database:view_credentials`` scope. + }, + "private_network_uuid": "str", # Optional. A string + specifying the UUID of the VPC to which the read-only replica will be + assigned. If excluded, the replica will be assigned to your account's + default VPC for the region. :code:`
`:code:`
`Requires ``vpc:read`` + scope. + "region": "str", # Optional. A slug identifier for the + region where the read-only replica will be located. If excluded, the + replica will be placed in the same region as the cluster. + "size": "str", # Optional. A slug identifier representing + the size of the node for the read-only replica. The size of the replica + must be at least as large as the node size for the database cluster from + which it is replicating. + "status": "str", # Optional. A string representing the + current status of the database cluster. Known values are: "creating", + "online", "resizing", "migrating", and "forking". + "storage_size_mib": 0, # Optional. Additional storage added + to the cluster, in MiB. If null, no additional storage is added to the + cluster, beyond what is provided as a base amount from the 'size' and any + previously added additional storage. + "tags": [ + "str" # Optional. A flat array of tag names as + strings applied to the read-only + replica.:code:`
`:code:`
`Requires ``tag:read`` scope. + ] + } + ] } # response body for status code(s): 404 response == { @@ -108075,7 +115380,7 @@ async def get_migration_status( cls: ClsType[JSON] = kwargs.pop("cls", None) - _request = build_databases_get_migration_status_request( + _request = build_databases_list_replicas_request( database_cluster_uuid=database_cluster_uuid, headers=_headers, params=_params, @@ -108136,33 +115441,31 @@ async def get_migration_status( return cast(JSON, deserialized) # type: ignore @overload - async def update_online_migration( + async def create_replica( self, database_cluster_uuid: str, - body: JSON, + body: Optional[JSON] = None, *, content_type: str = "application/json", **kwargs: Any ) -> JSON: # pylint: disable=line-too-long - """Start an Online Migration. + """Create a Read-only Replica. - To start an online migration, send a PUT request to - ``/v2/databases/$DATABASE_ID/online-migration`` endpoint. Migrating a cluster establishes a - connection with an existing cluster and replicates its contents to the target cluster. Online - migration is only available for MySQL, PostgreSQL, Caching, and Valkey clusters. - If the existing database is continuously being written to, the migration process will continue - for up to two weeks unless it is manually stopped. Online migration is only available for - `MySQL - `_\\ - , `PostgreSQL - `_\\ , `Caching - `_\\ , and `Valkey - `_ clusters. + To create a read-only replica for a PostgreSQL or MySQL database cluster, send a POST request + to ``/v2/databases/$DATABASE_ID/replicas`` specifying the name it should be given, the size of + the node to be used, and the region where it will be located. + + **Note**\\ : Read-only replicas are not supported for Caching or Valkey clusters. + + The response will be a JSON object with a key called ``replica``. The value of this will be an + object that contains the standard attributes associated with a database replica. The initial + value of the read-only replica's ``status`` attribute will be ``forking``. When the replica is + ready to receive traffic, this will transition to ``active``. :param database_cluster_uuid: A unique identifier for a database cluster. Required. :type database_cluster_uuid: str - :param body: Required. + :param body: Default value is None. :type body: JSON :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". @@ -108176,31 +115479,160 @@ async def update_online_migration( # JSON input template you can fill out and use as your body input. body = { - "source": { - "dbname": "str", # Optional. The name of the default database. + "name": "str", # The name to give the read-only replicating. Required. + "connection": { + "database": "str", # Optional. The name of the default database. "host": "str", # Optional. The FQDN pointing to the database cluster's current primary node. "password": "str", # Optional. The randomly generated password for - the default user. + the default user.:code:`
`:code:`
`Requires + ``database:view_credentials`` scope. "port": 0, # Optional. The port on which the database cluster is listening. - "username": "str" # Optional. The default user for the database. + "ssl": bool, # Optional. A boolean value indicating if the + connection should be made over SSL. + "uri": "str", # Optional. A connection string in the format accepted + by the ``psql`` command. This is provided as a convenience and should be able + to be constructed by the other attributes. + "user": "str" # Optional. The default user for the + database.:code:`
`:code:`
`Requires ``database:view_credentials`` + scope. }, - "disable_ssl": bool, # Optional. Enables SSL encryption when connecting to - the source database. - "ignore_dbs": [ - "str" # Optional. List of databases that should be ignored during - migration. + "created_at": "2020-02-20 00:00:00", # Optional. A time value given in + ISO8601 combined date and time format that represents when the database cluster + was created. + "do_settings": { + "service_cnames": [ + "str" # Optional. An array of custom CNAMEs for the database + cluster. Each CNAME must be a valid RFC 1123 hostname (e.g., + "db.example.com"). Maximum of 16 CNAMEs allowed, each up to 253 + characters. + ] + }, + "id": "str", # Optional. A unique ID that can be used to identify and + reference a database replica. + "private_connection": { + "database": "str", # Optional. The name of the default database. + "host": "str", # Optional. The FQDN pointing to the database + cluster's current primary node. + "password": "str", # Optional. The randomly generated password for + the default user.:code:`
`:code:`
`Requires + ``database:view_credentials`` scope. + "port": 0, # Optional. The port on which the database cluster is + listening. + "ssl": bool, # Optional. A boolean value indicating if the + connection should be made over SSL. + "uri": "str", # Optional. A connection string in the format accepted + by the ``psql`` command. This is provided as a convenience and should be able + to be constructed by the other attributes. + "user": "str" # Optional. The default user for the + database.:code:`
`:code:`
`Requires ``database:view_credentials`` + scope. + }, + "private_network_uuid": "str", # Optional. A string specifying the UUID of + the VPC to which the read-only replica will be assigned. If excluded, the replica + will be assigned to your account's default VPC for the region. + :code:`
`:code:`
`Requires ``vpc:read`` scope. + "region": "str", # Optional. A slug identifier for the region where the + read-only replica will be located. If excluded, the replica will be placed in the + same region as the cluster. + "size": "str", # Optional. A slug identifier representing the size of the + node for the read-only replica. The size of the replica must be at least as large + as the node size for the database cluster from which it is replicating. + "status": "str", # Optional. A string representing the current status of the + database cluster. Known values are: "creating", "online", "resizing", + "migrating", and "forking". + "storage_size_mib": 0, # Optional. Additional storage added to the cluster, + in MiB. If null, no additional storage is added to the cluster, beyond what is + provided as a base amount from the 'size' and any previously added additional + storage. + "tags": [ + "str" # Optional. A flat array of tag names as strings to apply to + the read-only replica after it is created. Tag names can either be existing + or new tags. :code:`
`:code:`
`Requires ``tag:create`` scope. ] } - # response body for status code(s): 200 + # response body for status code(s): 201 response == { - "created_at": "str", # Optional. The time the migration was initiated, in - ISO 8601 format. - "id": "str", # Optional. The ID of the most recent migration. - "status": "str" # Optional. The current status of the migration. Known - values are: "running", "syncing", "canceled", "error", and "done". + "replica": { + "name": "str", # The name to give the read-only replicating. + Required. + "connection": { + "database": "str", # Optional. The name of the default + database. + "host": "str", # Optional. The FQDN pointing to the database + cluster's current primary node. + "password": "str", # Optional. The randomly generated + password for the default user.:code:`
`:code:`
`Requires + ``database:view_credentials`` scope. + "port": 0, # Optional. The port on which the database + cluster is listening. + "ssl": bool, # Optional. A boolean value indicating if the + connection should be made over SSL. + "uri": "str", # Optional. A connection string in the format + accepted by the ``psql`` command. This is provided as a convenience and + should be able to be constructed by the other attributes. + "user": "str" # Optional. The default user for the + database.:code:`
`:code:`
`Requires ``database:view_credentials`` + scope. + }, + "created_at": "2020-02-20 00:00:00", # Optional. A time value given + in ISO8601 combined date and time format that represents when the database + cluster was created. + "do_settings": { + "service_cnames": [ + "str" # Optional. An array of custom CNAMEs for the + database cluster. Each CNAME must be a valid RFC 1123 hostname (e.g., + "db.example.com"). Maximum of 16 CNAMEs allowed, each up to 253 + characters. + ] + }, + "id": "str", # Optional. A unique ID that can be used to identify + and reference a database replica. + "private_connection": { + "database": "str", # Optional. The name of the default + database. + "host": "str", # Optional. The FQDN pointing to the database + cluster's current primary node. + "password": "str", # Optional. The randomly generated + password for the default user.:code:`
`:code:`
`Requires + ``database:view_credentials`` scope. + "port": 0, # Optional. The port on which the database + cluster is listening. + "ssl": bool, # Optional. A boolean value indicating if the + connection should be made over SSL. + "uri": "str", # Optional. A connection string in the format + accepted by the ``psql`` command. This is provided as a convenience and + should be able to be constructed by the other attributes. + "user": "str" # Optional. The default user for the + database.:code:`
`:code:`
`Requires ``database:view_credentials`` + scope. + }, + "private_network_uuid": "str", # Optional. A string specifying the + UUID of the VPC to which the read-only replica will be assigned. If excluded, + the replica will be assigned to your account's default VPC for the region. + :code:`
`:code:`
`Requires ``vpc:read`` scope. + "region": "str", # Optional. A slug identifier for the region where + the read-only replica will be located. If excluded, the replica will be + placed in the same region as the cluster. + "size": "str", # Optional. A slug identifier representing the size + of the node for the read-only replica. The size of the replica must be at + least as large as the node size for the database cluster from which it is + replicating. + "status": "str", # Optional. A string representing the current + status of the database cluster. Known values are: "creating", "online", + "resizing", "migrating", and "forking". + "storage_size_mib": 0, # Optional. Additional storage added to the + cluster, in MiB. If null, no additional storage is added to the cluster, + beyond what is provided as a base amount from the 'size' and any previously + added additional storage. + "tags": [ + "str" # Optional. A flat array of tag names as strings + applied to the read-only replica.:code:`
`:code:`
`Requires + ``tag:read`` scope. + ] + } } # response body for status code(s): 404 response == { @@ -108216,33 +115648,31 @@ async def update_online_migration( """ @overload - async def update_online_migration( + async def create_replica( self, database_cluster_uuid: str, - body: IO[bytes], + body: Optional[IO[bytes]] = None, *, content_type: str = "application/json", **kwargs: Any ) -> JSON: # pylint: disable=line-too-long - """Start an Online Migration. + """Create a Read-only Replica. - To start an online migration, send a PUT request to - ``/v2/databases/$DATABASE_ID/online-migration`` endpoint. Migrating a cluster establishes a - connection with an existing cluster and replicates its contents to the target cluster. Online - migration is only available for MySQL, PostgreSQL, Caching, and Valkey clusters. - If the existing database is continuously being written to, the migration process will continue - for up to two weeks unless it is manually stopped. Online migration is only available for - `MySQL - `_\\ - , `PostgreSQL - `_\\ , `Caching - `_\\ , and `Valkey - `_ clusters. + To create a read-only replica for a PostgreSQL or MySQL database cluster, send a POST request + to ``/v2/databases/$DATABASE_ID/replicas`` specifying the name it should be given, the size of + the node to be used, and the region where it will be located. + + **Note**\\ : Read-only replicas are not supported for Caching or Valkey clusters. + + The response will be a JSON object with a key called ``replica``. The value of this will be an + object that contains the standard attributes associated with a database replica. The initial + value of the read-only replica's ``status`` attribute will be ``forking``. When the replica is + ready to receive traffic, this will transition to ``active``. :param database_cluster_uuid: A unique identifier for a database cluster. Required. :type database_cluster_uuid: str - :param body: Required. + :param body: Default value is None. :type body: IO[bytes] :keyword content_type: Body Parameter content-type. Content type parameter for binary body. Default value is "application/json". @@ -108254,13 +115684,86 @@ async def update_online_migration( Example: .. code-block:: python - # response body for status code(s): 200 + # response body for status code(s): 201 response == { - "created_at": "str", # Optional. The time the migration was initiated, in - ISO 8601 format. - "id": "str", # Optional. The ID of the most recent migration. - "status": "str" # Optional. The current status of the migration. Known - values are: "running", "syncing", "canceled", "error", and "done". + "replica": { + "name": "str", # The name to give the read-only replicating. + Required. + "connection": { + "database": "str", # Optional. The name of the default + database. + "host": "str", # Optional. The FQDN pointing to the database + cluster's current primary node. + "password": "str", # Optional. The randomly generated + password for the default user.:code:`
`:code:`
`Requires + ``database:view_credentials`` scope. + "port": 0, # Optional. The port on which the database + cluster is listening. + "ssl": bool, # Optional. A boolean value indicating if the + connection should be made over SSL. + "uri": "str", # Optional. A connection string in the format + accepted by the ``psql`` command. This is provided as a convenience and + should be able to be constructed by the other attributes. + "user": "str" # Optional. The default user for the + database.:code:`
`:code:`
`Requires ``database:view_credentials`` + scope. + }, + "created_at": "2020-02-20 00:00:00", # Optional. A time value given + in ISO8601 combined date and time format that represents when the database + cluster was created. + "do_settings": { + "service_cnames": [ + "str" # Optional. An array of custom CNAMEs for the + database cluster. Each CNAME must be a valid RFC 1123 hostname (e.g., + "db.example.com"). Maximum of 16 CNAMEs allowed, each up to 253 + characters. + ] + }, + "id": "str", # Optional. A unique ID that can be used to identify + and reference a database replica. + "private_connection": { + "database": "str", # Optional. The name of the default + database. + "host": "str", # Optional. The FQDN pointing to the database + cluster's current primary node. + "password": "str", # Optional. The randomly generated + password for the default user.:code:`
`:code:`
`Requires + ``database:view_credentials`` scope. + "port": 0, # Optional. The port on which the database + cluster is listening. + "ssl": bool, # Optional. A boolean value indicating if the + connection should be made over SSL. + "uri": "str", # Optional. A connection string in the format + accepted by the ``psql`` command. This is provided as a convenience and + should be able to be constructed by the other attributes. + "user": "str" # Optional. The default user for the + database.:code:`
`:code:`
`Requires ``database:view_credentials`` + scope. + }, + "private_network_uuid": "str", # Optional. A string specifying the + UUID of the VPC to which the read-only replica will be assigned. If excluded, + the replica will be assigned to your account's default VPC for the region. + :code:`
`:code:`
`Requires ``vpc:read`` scope. + "region": "str", # Optional. A slug identifier for the region where + the read-only replica will be located. If excluded, the replica will be + placed in the same region as the cluster. + "size": "str", # Optional. A slug identifier representing the size + of the node for the read-only replica. The size of the replica must be at + least as large as the node size for the database cluster from which it is + replicating. + "status": "str", # Optional. A string representing the current + status of the database cluster. Known values are: "creating", "online", + "resizing", "migrating", and "forking". + "storage_size_mib": 0, # Optional. Additional storage added to the + cluster, in MiB. If null, no additional storage is added to the cluster, + beyond what is provided as a base amount from the 'size' and any previously + added additional storage. + "tags": [ + "str" # Optional. A flat array of tag names as strings + applied to the read-only replica.:code:`
`:code:`
`Requires + ``tag:read`` scope. + ] + } } # response body for status code(s): 404 response == { @@ -108276,28 +115779,29 @@ async def update_online_migration( """ @distributed_trace_async - async def update_online_migration( - self, database_cluster_uuid: str, body: Union[JSON, IO[bytes]], **kwargs: Any + async def create_replica( + self, + database_cluster_uuid: str, + body: Optional[Union[JSON, IO[bytes]]] = None, + **kwargs: Any ) -> JSON: # pylint: disable=line-too-long - """Start an Online Migration. + """Create a Read-only Replica. - To start an online migration, send a PUT request to - ``/v2/databases/$DATABASE_ID/online-migration`` endpoint. Migrating a cluster establishes a - connection with an existing cluster and replicates its contents to the target cluster. Online - migration is only available for MySQL, PostgreSQL, Caching, and Valkey clusters. - If the existing database is continuously being written to, the migration process will continue - for up to two weeks unless it is manually stopped. Online migration is only available for - `MySQL - `_\\ - , `PostgreSQL - `_\\ , `Caching - `_\\ , and `Valkey - `_ clusters. + To create a read-only replica for a PostgreSQL or MySQL database cluster, send a POST request + to ``/v2/databases/$DATABASE_ID/replicas`` specifying the name it should be given, the size of + the node to be used, and the region where it will be located. + + **Note**\\ : Read-only replicas are not supported for Caching or Valkey clusters. + + The response will be a JSON object with a key called ``replica``. The value of this will be an + object that contains the standard attributes associated with a database replica. The initial + value of the read-only replica's ``status`` attribute will be ``forking``. When the replica is + ready to receive traffic, this will transition to ``active``. :param database_cluster_uuid: A unique identifier for a database cluster. Required. :type database_cluster_uuid: str - :param body: Is either a JSON type or a IO[bytes] type. Required. + :param body: Is either a JSON type or a IO[bytes] type. Default value is None. :type body: JSON or IO[bytes] :return: JSON object :rtype: JSON @@ -108308,31 +115812,160 @@ async def update_online_migration( # JSON input template you can fill out and use as your body input. body = { - "source": { - "dbname": "str", # Optional. The name of the default database. + "name": "str", # The name to give the read-only replicating. Required. + "connection": { + "database": "str", # Optional. The name of the default database. "host": "str", # Optional. The FQDN pointing to the database cluster's current primary node. "password": "str", # Optional. The randomly generated password for - the default user. + the default user.:code:`
`:code:`
`Requires + ``database:view_credentials`` scope. "port": 0, # Optional. The port on which the database cluster is listening. - "username": "str" # Optional. The default user for the database. + "ssl": bool, # Optional. A boolean value indicating if the + connection should be made over SSL. + "uri": "str", # Optional. A connection string in the format accepted + by the ``psql`` command. This is provided as a convenience and should be able + to be constructed by the other attributes. + "user": "str" # Optional. The default user for the + database.:code:`
`:code:`
`Requires ``database:view_credentials`` + scope. }, - "disable_ssl": bool, # Optional. Enables SSL encryption when connecting to - the source database. - "ignore_dbs": [ - "str" # Optional. List of databases that should be ignored during - migration. + "created_at": "2020-02-20 00:00:00", # Optional. A time value given in + ISO8601 combined date and time format that represents when the database cluster + was created. + "do_settings": { + "service_cnames": [ + "str" # Optional. An array of custom CNAMEs for the database + cluster. Each CNAME must be a valid RFC 1123 hostname (e.g., + "db.example.com"). Maximum of 16 CNAMEs allowed, each up to 253 + characters. + ] + }, + "id": "str", # Optional. A unique ID that can be used to identify and + reference a database replica. + "private_connection": { + "database": "str", # Optional. The name of the default database. + "host": "str", # Optional. The FQDN pointing to the database + cluster's current primary node. + "password": "str", # Optional. The randomly generated password for + the default user.:code:`
`:code:`
`Requires + ``database:view_credentials`` scope. + "port": 0, # Optional. The port on which the database cluster is + listening. + "ssl": bool, # Optional. A boolean value indicating if the + connection should be made over SSL. + "uri": "str", # Optional. A connection string in the format accepted + by the ``psql`` command. This is provided as a convenience and should be able + to be constructed by the other attributes. + "user": "str" # Optional. The default user for the + database.:code:`
`:code:`
`Requires ``database:view_credentials`` + scope. + }, + "private_network_uuid": "str", # Optional. A string specifying the UUID of + the VPC to which the read-only replica will be assigned. If excluded, the replica + will be assigned to your account's default VPC for the region. + :code:`
`:code:`
`Requires ``vpc:read`` scope. + "region": "str", # Optional. A slug identifier for the region where the + read-only replica will be located. If excluded, the replica will be placed in the + same region as the cluster. + "size": "str", # Optional. A slug identifier representing the size of the + node for the read-only replica. The size of the replica must be at least as large + as the node size for the database cluster from which it is replicating. + "status": "str", # Optional. A string representing the current status of the + database cluster. Known values are: "creating", "online", "resizing", + "migrating", and "forking". + "storage_size_mib": 0, # Optional. Additional storage added to the cluster, + in MiB. If null, no additional storage is added to the cluster, beyond what is + provided as a base amount from the 'size' and any previously added additional + storage. + "tags": [ + "str" # Optional. A flat array of tag names as strings to apply to + the read-only replica after it is created. Tag names can either be existing + or new tags. :code:`
`:code:`
`Requires ``tag:create`` scope. ] } - # response body for status code(s): 200 + # response body for status code(s): 201 response == { - "created_at": "str", # Optional. The time the migration was initiated, in - ISO 8601 format. - "id": "str", # Optional. The ID of the most recent migration. - "status": "str" # Optional. The current status of the migration. Known - values are: "running", "syncing", "canceled", "error", and "done". + "replica": { + "name": "str", # The name to give the read-only replicating. + Required. + "connection": { + "database": "str", # Optional. The name of the default + database. + "host": "str", # Optional. The FQDN pointing to the database + cluster's current primary node. + "password": "str", # Optional. The randomly generated + password for the default user.:code:`
`:code:`
`Requires + ``database:view_credentials`` scope. + "port": 0, # Optional. The port on which the database + cluster is listening. + "ssl": bool, # Optional. A boolean value indicating if the + connection should be made over SSL. + "uri": "str", # Optional. A connection string in the format + accepted by the ``psql`` command. This is provided as a convenience and + should be able to be constructed by the other attributes. + "user": "str" # Optional. The default user for the + database.:code:`
`:code:`
`Requires ``database:view_credentials`` + scope. + }, + "created_at": "2020-02-20 00:00:00", # Optional. A time value given + in ISO8601 combined date and time format that represents when the database + cluster was created. + "do_settings": { + "service_cnames": [ + "str" # Optional. An array of custom CNAMEs for the + database cluster. Each CNAME must be a valid RFC 1123 hostname (e.g., + "db.example.com"). Maximum of 16 CNAMEs allowed, each up to 253 + characters. + ] + }, + "id": "str", # Optional. A unique ID that can be used to identify + and reference a database replica. + "private_connection": { + "database": "str", # Optional. The name of the default + database. + "host": "str", # Optional. The FQDN pointing to the database + cluster's current primary node. + "password": "str", # Optional. The randomly generated + password for the default user.:code:`
`:code:`
`Requires + ``database:view_credentials`` scope. + "port": 0, # Optional. The port on which the database + cluster is listening. + "ssl": bool, # Optional. A boolean value indicating if the + connection should be made over SSL. + "uri": "str", # Optional. A connection string in the format + accepted by the ``psql`` command. This is provided as a convenience and + should be able to be constructed by the other attributes. + "user": "str" # Optional. The default user for the + database.:code:`
`:code:`
`Requires ``database:view_credentials`` + scope. + }, + "private_network_uuid": "str", # Optional. A string specifying the + UUID of the VPC to which the read-only replica will be assigned. If excluded, + the replica will be assigned to your account's default VPC for the region. + :code:`
`:code:`
`Requires ``vpc:read`` scope. + "region": "str", # Optional. A slug identifier for the region where + the read-only replica will be located. If excluded, the replica will be + placed in the same region as the cluster. + "size": "str", # Optional. A slug identifier representing the size + of the node for the read-only replica. The size of the replica must be at + least as large as the node size for the database cluster from which it is + replicating. + "status": "str", # Optional. A string representing the current + status of the database cluster. Known values are: "creating", "online", + "resizing", "migrating", and "forking". + "storage_size_mib": 0, # Optional. Additional storage added to the + cluster, in MiB. If null, no additional storage is added to the cluster, + beyond what is provided as a base amount from the 'size' and any previously + added additional storage. + "tags": [ + "str" # Optional. A flat array of tag names as strings + applied to the read-only replica.:code:`
`:code:`
`Requires + ``tag:read`` scope. + ] + } } # response body for status code(s): 404 response == { @@ -108373,9 +116006,12 @@ async def update_online_migration( if isinstance(body, (IOBase, bytes)): _content = body else: - _json = body + if body is not None: + _json = body + else: + _json = None - _request = build_databases_update_online_migration_request( + _request = build_databases_create_replica_request( database_cluster_uuid=database_cluster_uuid, content_type=content_type, json=_json, @@ -108394,14 +116030,14 @@ async def update_online_migration( response = pipeline_response.http_response - if response.status_code not in [200, 404]: + if response.status_code not in [201, 404]: if _stream: await response.read() # Load the body in memory and close the socket map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore raise HttpResponseError(response=response) response_headers = {} - if response.status_code == 200: + if response.status_code == 201: response_headers["ratelimit-limit"] = self._deserialize( "int", response.headers.get("ratelimit-limit") ) @@ -108439,29 +116075,39 @@ async def update_online_migration( return cast(JSON, deserialized) # type: ignore @distributed_trace_async - async def delete_online_migration( - self, database_cluster_uuid: str, migration_id: str, **kwargs: Any - ) -> Optional[JSON]: + async def list_events_logs(self, database_cluster_uuid: str, **kwargs: Any) -> JSON: # pylint: disable=line-too-long - """Stop an Online Migration. + """List all Events Logs. - To stop an online migration, send a DELETE request to - ``/v2/databases/$DATABASE_ID/online-migration/$MIGRATION_ID``. + To list all of the cluster events, send a GET request to + ``/v2/databases/$DATABASE_ID/events``. - A status of 204 will be given. This indicates that the request was processed successfully, but - that no response body is needed. + The result will be a JSON object with a ``events`` key. :param database_cluster_uuid: A unique identifier for a database cluster. Required. :type database_cluster_uuid: str - :param migration_id: A unique identifier assigned to the online migration. Required. - :type migration_id: str - :return: JSON object or None - :rtype: JSON or None + :return: JSON object + :rtype: JSON :raises ~azure.core.exceptions.HttpResponseError: Example: .. code-block:: python + # response body for status code(s): 200 + response == { + "events": [ + { + "cluster_name": "str", # Optional. The name of cluster. + "create_time": "str", # Optional. The time of the generation + of a event. + "event_type": "str", # Optional. Type of the event. Known + values are: "cluster_maintenance_perform", "cluster_master_promotion", + "cluster_create", "cluster_update", "cluster_delete", "cluster_poweron", + and "cluster_poweroff". + "id": "str" # Optional. ID of the particular event. + } + ] + } # response body for status code(s): 404 response == { "id": "str", # A short identifier corresponding to the HTTP status code @@ -108490,11 +116136,10 @@ async def delete_online_migration( _headers = kwargs.pop("headers", {}) or {} _params = kwargs.pop("params", {}) or {} - cls: ClsType[Optional[JSON]] = kwargs.pop("cls", None) + cls: ClsType[JSON] = kwargs.pop("cls", None) - _request = build_databases_delete_online_migration_request( + _request = build_databases_list_events_logs_request( database_cluster_uuid=database_cluster_uuid, - migration_id=migration_id, headers=_headers, params=_params, ) @@ -108509,15 +116154,14 @@ async def delete_online_migration( response = pipeline_response.http_response - if response.status_code not in [204, 404]: + if response.status_code not in [200, 404]: if _stream: await response.read() # Load the body in memory and close the socket map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore raise HttpResponseError(response=response) - deserialized = None response_headers = {} - if response.status_code == 204: + if response.status_code == 200: response_headers["ratelimit-limit"] = self._deserialize( "int", response.headers.get("ratelimit-limit") ) @@ -108528,6 +116172,11 @@ async def delete_online_migration( "int", response.headers.get("ratelimit-reset") ) + if response.content: + deserialized = response.json() + else: + deserialized = None + if response.status_code == 404: response_headers["ratelimit-limit"] = self._deserialize( "int", response.headers.get("ratelimit-limit") @@ -108545,51 +116194,117 @@ async def delete_online_migration( deserialized = None if cls: - return cls(pipeline_response, deserialized, response_headers) # type: ignore + return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore - return deserialized # type: ignore + return cast(JSON, deserialized) # type: ignore - @overload - async def update_region( - self, - database_cluster_uuid: str, - body: JSON, - *, - content_type: str = "application/json", - **kwargs: Any - ) -> Optional[JSON]: + @distributed_trace_async + async def get_replica( + self, database_cluster_uuid: str, replica_name: str, **kwargs: Any + ) -> JSON: # pylint: disable=line-too-long - """Migrate a Database Cluster to a New Region. + """Retrieve an Existing Read-only Replica. - To migrate a database cluster to a new region, send a ``PUT`` request to - ``/v2/databases/$DATABASE_ID/migrate``. The body of the request must specify a - ``region`` attribute. + To show information about an existing database replica, send a GET request to + ``/v2/databases/$DATABASE_ID/replicas/$REPLICA_NAME``. - A successful request will receive a 202 Accepted status code with no body in - response. Querying the database cluster will show that its ``status`` attribute - will now be set to ``migrating``. This will transition back to ``online`` when the - migration has completed. + **Note**\\ : Read-only replicas are not supported for Caching or Valkey clusters. + + The response will be a JSON object with a ``replica key``. This will be set to an object + containing the standard database replica attributes. :param database_cluster_uuid: A unique identifier for a database cluster. Required. :type database_cluster_uuid: str - :param body: Required. - :type body: JSON - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: JSON object or None - :rtype: JSON or None + :param replica_name: The name of the database replica. Required. + :type replica_name: str + :return: JSON object + :rtype: JSON :raises ~azure.core.exceptions.HttpResponseError: Example: .. code-block:: python - # JSON input template you can fill out and use as your body input. - body = { - "region": "str" # A slug identifier for the region to which the database - cluster will be migrated. Required. + # response body for status code(s): 200 + response == { + "replica": { + "name": "str", # The name to give the read-only replicating. + Required. + "connection": { + "database": "str", # Optional. The name of the default + database. + "host": "str", # Optional. The FQDN pointing to the database + cluster's current primary node. + "password": "str", # Optional. The randomly generated + password for the default user.:code:`
`:code:`
`Requires + ``database:view_credentials`` scope. + "port": 0, # Optional. The port on which the database + cluster is listening. + "ssl": bool, # Optional. A boolean value indicating if the + connection should be made over SSL. + "uri": "str", # Optional. A connection string in the format + accepted by the ``psql`` command. This is provided as a convenience and + should be able to be constructed by the other attributes. + "user": "str" # Optional. The default user for the + database.:code:`
`:code:`
`Requires ``database:view_credentials`` + scope. + }, + "created_at": "2020-02-20 00:00:00", # Optional. A time value given + in ISO8601 combined date and time format that represents when the database + cluster was created. + "do_settings": { + "service_cnames": [ + "str" # Optional. An array of custom CNAMEs for the + database cluster. Each CNAME must be a valid RFC 1123 hostname (e.g., + "db.example.com"). Maximum of 16 CNAMEs allowed, each up to 253 + characters. + ] + }, + "id": "str", # Optional. A unique ID that can be used to identify + and reference a database replica. + "private_connection": { + "database": "str", # Optional. The name of the default + database. + "host": "str", # Optional. The FQDN pointing to the database + cluster's current primary node. + "password": "str", # Optional. The randomly generated + password for the default user.:code:`
`:code:`
`Requires + ``database:view_credentials`` scope. + "port": 0, # Optional. The port on which the database + cluster is listening. + "ssl": bool, # Optional. A boolean value indicating if the + connection should be made over SSL. + "uri": "str", # Optional. A connection string in the format + accepted by the ``psql`` command. This is provided as a convenience and + should be able to be constructed by the other attributes. + "user": "str" # Optional. The default user for the + database.:code:`
`:code:`
`Requires ``database:view_credentials`` + scope. + }, + "private_network_uuid": "str", # Optional. A string specifying the + UUID of the VPC to which the read-only replica will be assigned. If excluded, + the replica will be assigned to your account's default VPC for the region. + :code:`
`:code:`
`Requires ``vpc:read`` scope. + "region": "str", # Optional. A slug identifier for the region where + the read-only replica will be located. If excluded, the replica will be + placed in the same region as the cluster. + "size": "str", # Optional. A slug identifier representing the size + of the node for the read-only replica. The size of the replica must be at + least as large as the node size for the database cluster from which it is + replicating. + "status": "str", # Optional. A string representing the current + status of the database cluster. Known values are: "creating", "online", + "resizing", "migrating", and "forking". + "storage_size_mib": 0, # Optional. Additional storage added to the + cluster, in MiB. If null, no additional storage is added to the cluster, + beyond what is provided as a base amount from the 'size' and any previously + added additional storage. + "tags": [ + "str" # Optional. A flat array of tag names as strings + applied to the read-only replica.:code:`
`:code:`
`Requires + ``tag:read`` scope. + ] + } } - # response body for status code(s): 404 response == { "id": "str", # A short identifier corresponding to the HTTP status code @@ -108602,75 +116317,104 @@ async def update_region( tickets to help identify the issue. } """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + 401: cast( + Type[HttpResponseError], + lambda response: ClientAuthenticationError(response=response), + ), + 429: HttpResponseError, + 500: HttpResponseError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) - @overload - async def update_region( - self, - database_cluster_uuid: str, - body: IO[bytes], - *, - content_type: str = "application/json", - **kwargs: Any - ) -> Optional[JSON]: - # pylint: disable=line-too-long - """Migrate a Database Cluster to a New Region. + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} - To migrate a database cluster to a new region, send a ``PUT`` request to - ``/v2/databases/$DATABASE_ID/migrate``. The body of the request must specify a - ``region`` attribute. + cls: ClsType[JSON] = kwargs.pop("cls", None) - A successful request will receive a 202 Accepted status code with no body in - response. Querying the database cluster will show that its ``status`` attribute - will now be set to ``migrating``. This will transition back to ``online`` when the - migration has completed. + _request = build_databases_get_replica_request( + database_cluster_uuid=database_cluster_uuid, + replica_name=replica_name, + headers=_headers, + params=_params, + ) + _request.url = self._client.format_url(_request.url) - :param database_cluster_uuid: A unique identifier for a database cluster. Required. - :type database_cluster_uuid: str - :param body: Required. - :type body: IO[bytes] - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str - :return: JSON object or None - :rtype: JSON or None - :raises ~azure.core.exceptions.HttpResponseError: + _stream = False + pipeline_response: PipelineResponse = ( + await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + ) - Example: - .. code-block:: python + response = pipeline_response.http_response - # response body for status code(s): 404 - response == { - "id": "str", # A short identifier corresponding to the HTTP status code - returned. For example, the ID for a response returning a 404 status code would - be "not_found.". Required. - "message": "str", # A message providing additional information about the - error, including details to help resolve it when possible. Required. - "request_id": "str" # Optional. Optionally, some endpoints may include a - request ID that should be provided when reporting bugs or opening support - tickets to help identify the issue. - } - """ + if response.status_code not in [200, 404]: + if _stream: + await response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore + raise HttpResponseError(response=response) + + response_headers = {} + if response.status_code == 200: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.content: + deserialized = response.json() + else: + deserialized = None + + if response.status_code == 404: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.content: + deserialized = response.json() + else: + deserialized = None + + if cls: + return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore + + return cast(JSON, deserialized) # type: ignore @distributed_trace_async - async def update_region( - self, database_cluster_uuid: str, body: Union[JSON, IO[bytes]], **kwargs: Any + async def destroy_replica( + self, database_cluster_uuid: str, replica_name: str, **kwargs: Any ) -> Optional[JSON]: # pylint: disable=line-too-long - """Migrate a Database Cluster to a New Region. + """Destroy a Read-only Replica. - To migrate a database cluster to a new region, send a ``PUT`` request to - ``/v2/databases/$DATABASE_ID/migrate``. The body of the request must specify a - ``region`` attribute. + To destroy a specific read-only replica, send a DELETE request to + ``/v2/databases/$DATABASE_ID/replicas/$REPLICA_NAME``. - A successful request will receive a 202 Accepted status code with no body in - response. Querying the database cluster will show that its ``status`` attribute - will now be set to ``migrating``. This will transition back to ``online`` when the - migration has completed. + **Note**\\ : Read-only replicas are not supported for Caching or Valkey clusters. + + A status of 204 will be given. This indicates that the request was processed successfully, but + that no response body is needed. :param database_cluster_uuid: A unique identifier for a database cluster. Required. :type database_cluster_uuid: str - :param body: Is either a JSON type or a IO[bytes] type. Required. - :type body: JSON or IO[bytes] + :param replica_name: The name of the database replica. Required. + :type replica_name: str :return: JSON object or None :rtype: JSON or None :raises ~azure.core.exceptions.HttpResponseError: @@ -108678,12 +116422,6 @@ async def update_region( Example: .. code-block:: python - # JSON input template you can fill out and use as your body input. - body = { - "region": "str" # A slug identifier for the region to which the database - cluster will be migrated. Required. - } - # response body for status code(s): 404 response == { "id": "str", # A short identifier corresponding to the HTTP status code @@ -108709,27 +116447,14 @@ async def update_region( } error_map.update(kwargs.pop("error_map", {}) or {}) - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _headers = kwargs.pop("headers", {}) or {} _params = kwargs.pop("params", {}) or {} - content_type: Optional[str] = kwargs.pop( - "content_type", _headers.pop("Content-Type", None) - ) cls: ClsType[Optional[JSON]] = kwargs.pop("cls", None) - content_type = content_type or "application/json" - _json = None - _content = None - if isinstance(body, (IOBase, bytes)): - _content = body - else: - _json = body - - _request = build_databases_update_region_request( + _request = build_databases_destroy_replica_request( database_cluster_uuid=database_cluster_uuid, - content_type=content_type, - json=_json, - content=_content, + replica_name=replica_name, headers=_headers, params=_params, ) @@ -108744,7 +116469,7 @@ async def update_region( response = pipeline_response.http_response - if response.status_code not in [202, 404]: + if response.status_code not in [204, 404]: if _stream: await response.read() # Load the body in memory and close the socket map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore @@ -108752,7 +116477,7 @@ async def update_region( deserialized = None response_headers = {} - if response.status_code == 202: + if response.status_code == 204: response_headers["ratelimit-limit"] = self._deserialize( "int", response.headers.get("ratelimit-limit") ) @@ -108784,126 +116509,25 @@ async def update_region( return deserialized # type: ignore - @overload - async def update_cluster_size( - self, - database_cluster_uuid: str, - body: JSON, - *, - content_type: str = "application/json", - **kwargs: Any - ) -> Optional[JSON]: - # pylint: disable=line-too-long - """Resize a Database Cluster. - - To resize a database cluster, send a PUT request to ``/v2/databases/$DATABASE_ID/resize``. The - body of the request must specify both the size and num_nodes attributes. - A successful request will receive a 202 Accepted status code with no body in response. Querying - the database cluster will show that its status attribute will now be set to resizing. This will - transition back to online when the resize operation has completed. - - :param database_cluster_uuid: A unique identifier for a database cluster. Required. - :type database_cluster_uuid: str - :param body: Required. - :type body: JSON - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: JSON object or None - :rtype: JSON or None - :raises ~azure.core.exceptions.HttpResponseError: - - Example: - .. code-block:: python - - # JSON input template you can fill out and use as your body input. - body = { - "num_nodes": 0, # The number of nodes in the database cluster. Valid values - are are 1-3. In addition to the primary node, up to two standby nodes may be - added for highly available configurations. Required. - "size": "str", # A slug identifier representing desired the size of the - nodes in the database cluster. Required. - "storage_size_mib": 0 # Optional. Additional storage added to the cluster, - in MiB. If null, no additional storage is added to the cluster, beyond what is - provided as a base amount from the 'size' and any previously added additional - storage. - } - - # response body for status code(s): 404 - response == { - "id": "str", # A short identifier corresponding to the HTTP status code - returned. For example, the ID for a response returning a 404 status code would - be "not_found.". Required. - "message": "str", # A message providing additional information about the - error, including details to help resolve it when possible. Required. - "request_id": "str" # Optional. Optionally, some endpoints may include a - request ID that should be provided when reporting bugs or opening support - tickets to help identify the issue. - } - """ - - @overload - async def update_cluster_size( - self, - database_cluster_uuid: str, - body: IO[bytes], - *, - content_type: str = "application/json", - **kwargs: Any + @distributed_trace_async + async def promote_replica( + self, database_cluster_uuid: str, replica_name: str, **kwargs: Any ) -> Optional[JSON]: # pylint: disable=line-too-long - """Resize a Database Cluster. - - To resize a database cluster, send a PUT request to ``/v2/databases/$DATABASE_ID/resize``. The - body of the request must specify both the size and num_nodes attributes. - A successful request will receive a 202 Accepted status code with no body in response. Querying - the database cluster will show that its status attribute will now be set to resizing. This will - transition back to online when the resize operation has completed. - - :param database_cluster_uuid: A unique identifier for a database cluster. Required. - :type database_cluster_uuid: str - :param body: Required. - :type body: IO[bytes] - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str - :return: JSON object or None - :rtype: JSON or None - :raises ~azure.core.exceptions.HttpResponseError: - - Example: - .. code-block:: python + """Promote a Read-only Replica to become a Primary Cluster. - # response body for status code(s): 404 - response == { - "id": "str", # A short identifier corresponding to the HTTP status code - returned. For example, the ID for a response returning a 404 status code would - be "not_found.". Required. - "message": "str", # A message providing additional information about the - error, including details to help resolve it when possible. Required. - "request_id": "str" # Optional. Optionally, some endpoints may include a - request ID that should be provided when reporting bugs or opening support - tickets to help identify the issue. - } - """ + To promote a specific read-only replica, send a PUT request to + ``/v2/databases/$DATABASE_ID/replicas/$REPLICA_NAME/promote``. - @distributed_trace_async - async def update_cluster_size( - self, database_cluster_uuid: str, body: Union[JSON, IO[bytes]], **kwargs: Any - ) -> Optional[JSON]: - # pylint: disable=line-too-long - """Resize a Database Cluster. + **Note**\\ : Read-only replicas are not supported for Caching or Valkey clusters. - To resize a database cluster, send a PUT request to ``/v2/databases/$DATABASE_ID/resize``. The - body of the request must specify both the size and num_nodes attributes. - A successful request will receive a 202 Accepted status code with no body in response. Querying - the database cluster will show that its status attribute will now be set to resizing. This will - transition back to online when the resize operation has completed. + A status of 204 will be given. This indicates that the request was processed successfully, but + that no response body is needed. :param database_cluster_uuid: A unique identifier for a database cluster. Required. :type database_cluster_uuid: str - :param body: Is either a JSON type or a IO[bytes] type. Required. - :type body: JSON or IO[bytes] + :param replica_name: The name of the database replica. Required. + :type replica_name: str :return: JSON object or None :rtype: JSON or None :raises ~azure.core.exceptions.HttpResponseError: @@ -108911,19 +116535,6 @@ async def update_cluster_size( Example: .. code-block:: python - # JSON input template you can fill out and use as your body input. - body = { - "num_nodes": 0, # The number of nodes in the database cluster. Valid values - are are 1-3. In addition to the primary node, up to two standby nodes may be - added for highly available configurations. Required. - "size": "str", # A slug identifier representing desired the size of the - nodes in the database cluster. Required. - "storage_size_mib": 0 # Optional. Additional storage added to the cluster, - in MiB. If null, no additional storage is added to the cluster, beyond what is - provided as a base amount from the 'size' and any previously added additional - storage. - } - # response body for status code(s): 404 response == { "id": "str", # A short identifier corresponding to the HTTP status code @@ -108949,27 +116560,14 @@ async def update_cluster_size( } error_map.update(kwargs.pop("error_map", {}) or {}) - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _headers = kwargs.pop("headers", {}) or {} _params = kwargs.pop("params", {}) or {} - content_type: Optional[str] = kwargs.pop( - "content_type", _headers.pop("Content-Type", None) - ) cls: ClsType[Optional[JSON]] = kwargs.pop("cls", None) - content_type = content_type or "application/json" - _json = None - _content = None - if isinstance(body, (IOBase, bytes)): - _content = body - else: - _json = body - - _request = build_databases_update_cluster_size_request( + _request = build_databases_promote_replica_request( database_cluster_uuid=database_cluster_uuid, - content_type=content_type, - json=_json, - content=_content, + replica_name=replica_name, headers=_headers, params=_params, ) @@ -108984,7 +116582,7 @@ async def update_cluster_size( response = pipeline_response.http_response - if response.status_code not in [202, 404]: + if response.status_code not in [204, 404]: if _stream: await response.read() # Load the body in memory and close the socket map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore @@ -108992,7 +116590,7 @@ async def update_cluster_size( deserialized = None response_headers = {} - if response.status_code == 202: + if response.status_code == 204: response_headers["ratelimit-limit"] = self._deserialize( "int", response.headers.get("ratelimit-limit") ) @@ -109025,15 +116623,23 @@ async def update_cluster_size( return deserialized # type: ignore @distributed_trace_async - async def list_firewall_rules( - self, database_cluster_uuid: str, **kwargs: Any - ) -> JSON: + async def list_users(self, database_cluster_uuid: str, **kwargs: Any) -> JSON: # pylint: disable=line-too-long - """List Firewall Rules (Trusted Sources) for a Database Cluster. + """List all Database Users. - To list all of a database cluster's firewall rules (known as "trusted sources" in the control - panel), send a GET request to ``/v2/databases/$DATABASE_ID/firewall``. - The result will be a JSON object with a ``rules`` key. + To list all of the users for your database cluster, send a GET request to + ``/v2/databases/$DATABASE_ID/users``. + + Note: User management is not supported for Caching or Valkey clusters. + + The result will be a JSON object with a ``users`` key. This will be set to an array + of database user objects, each of which will contain the standard database user attributes. + User passwords will not show without the ``database:view_credentials`` scope. + + For MySQL clusters, additional options will be contained in the mysql_settings object. + + For MongoDB clusters, additional information will be contained in the mongo_user_settings + object. :param database_cluster_uuid: A unique identifier for a database cluster. Required. :type database_cluster_uuid: str @@ -109046,23 +116652,86 @@ async def list_firewall_rules( # response body for status code(s): 200 response == { - "rules": [ + "users": [ { - "type": "str", # The type of resource that the firewall rule - allows to access the database cluster. Required. Known values are: - "droplet", "k8s", "ip_addr", "tag", and "app". - "value": "str", # The ID of the specific resource, the name - of a tag applied to a group of resources, or the IP address that the - firewall rule allows to access the database cluster. Required. - "cluster_uuid": "str", # Optional. A unique ID for the - database cluster to which the rule is applied. - "created_at": "2020-02-20 00:00:00", # Optional. A time - value given in ISO8601 combined date and time format that represents when - the firewall rule was created. - "description": "str", # Optional. A human-readable - description of the rule. - "uuid": "str" # Optional. A unique ID for the firewall rule - itself. + "name": "str", # The name of a database user. Required. + "access_cert": "str", # Optional. Access certificate for TLS + client authentication. (Kafka only). + "access_key": "str", # Optional. Access key for TLS client + authentication. (Kafka only). + "mysql_settings": { + "auth_plugin": "str" # A string specifying the + authentication method to be used for connections to the MySQL user + account. The valid values are ``mysql_native_password`` or + ``caching_sha2_password``. If excluded when creating a new user, the + default for the version of MySQL in use will be used. As of MySQL + 8.0, the default is ``caching_sha2_password``. Required. Known values + are: "mysql_native_password" and "caching_sha2_password". + }, + "password": "str", # Optional. A randomly generated password + for the database user.:code:`
`Requires ``database:view_credentials`` + scope. + "role": "str", # Optional. A string representing the + database user's role. The value will be either "primary" or "normal". + Known values are: "primary" and "normal". + "settings": { + "acl": [ + { + "permission": "str", # Permission + set applied to the ACL. 'consume' allows for messages to be + consumed from the topic. 'produce' allows for messages to be + published to the topic. 'produceconsume' allows for both + 'consume' and 'produce' permission. 'admin' allows for + 'produceconsume' as well as any operations to administer the + topic (delete, update). Required. Known values are: "admin", + "consume", "produce", and "produceconsume". + "topic": "str", # A regex for + matching the topic(s) that this ACL should apply to. + Required. + "id": "str" # Optional. An + identifier for the ACL. Will be computed after the ACL is + created/updated. + } + ], + "mongo_user_settings": { + "databases": [ + "str" # Optional. A list of + databases to which the user should have access. When the + database is set to ``admin``"" , the user will have access to + all databases based on the user's role i.e. a user with the + role ``readOnly`` assigned to the ``admin`` database will + have read access to all databases. + ], + "role": "str" # Optional. The role to assign + to the user with each role mapping to a MongoDB built-in role. + ``readOnly`` maps to a `read + `_ + role. ``readWrite`` maps to a `readWrite + `_ + role. ``dbAdmin`` maps to a `dbAdmin + `_ + role. Known values are: "readOnly", "readWrite", and "dbAdmin". + }, + "opensearch_acl": [ + { + "index": "str", # Optional. A regex + for matching the indexes that this ACL should apply to. + "permission": "str" # Optional. + Permission set applied to the ACL. 'read' allows user to read + from the index. 'write' allows for user to write to the + index. 'readwrite' allows for both 'read' and 'write' + permission. 'deny'(default) restricts user from performing + any operation over an index. 'admin' allows for 'readwrite' + as well as any operations to administer the index. Known + values are: "deny", "admin", "read", "readwrite", and + "write". + } + ], + "pg_allow_replication": bool # Optional. For + Postgres clusters, set to ``true`` for a user with replication + rights. This option is not currently supported for other database + engines. + } } ] } @@ -109096,7 +116765,7 @@ async def list_firewall_rules( cls: ClsType[JSON] = kwargs.pop("cls", None) - _request = build_databases_list_firewall_rules_request( + _request = build_databases_list_users_request( database_cluster_uuid=database_cluster_uuid, headers=_headers, params=_params, @@ -109157,26 +116826,34 @@ async def list_firewall_rules( return cast(JSON, deserialized) # type: ignore @overload - async def update_firewall_rules( + async def add_user( self, database_cluster_uuid: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any - ) -> Optional[JSON]: + ) -> JSON: # pylint: disable=line-too-long - """Update Firewall Rules (Trusted Sources) for a Database. + """Add a Database User. - To update a database cluster's firewall rules (known as "trusted sources" in the control - panel), send a PUT request to ``/v2/databases/$DATABASE_ID/firewall`` specifying which - resources should be able to open connections to the database. You may limit connections to - specific Droplets, Kubernetes clusters, or IP addresses. When a tag is provided, any Droplet or - Kubernetes node with that tag applied to it will have access. The firewall is limited to 100 - rules (or trusted sources). When possible, we recommend `placing your databases into a VPC - network `_ to limit access to them - instead of using a firewall. - A successful. + To add a new database user, send a POST request to ``/v2/databases/$DATABASE_ID/users`` + with the desired username. + + Note: User management is not supported for Caching or Valkey clusters. + + When adding a user to a MySQL cluster, additional options can be configured in the + ``mysql_settings`` object. + + When adding a user to a Kafka cluster, additional options can be configured in + the ``settings`` object. + + When adding a user to a MongoDB cluster, additional options can be configured in + the ``settings.mongo_user_settings`` object. + + The response will be a JSON object with a key called ``user``. The value of this will be an + object that contains the standard attributes associated with a database user including + its randomly generated password. :param database_cluster_uuid: A unique identifier for a database cluster. Required. :type database_cluster_uuid: str @@ -109185,8 +116862,8 @@ async def update_firewall_rules( :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str - :return: JSON object or None - :rtype: JSON or None + :return: JSON object + :rtype: JSON :raises ~azure.core.exceptions.HttpResponseError: Example: @@ -109194,27 +116871,158 @@ async def update_firewall_rules( # JSON input template you can fill out and use as your body input. body = { - "rules": [ - { - "type": "str", # The type of resource that the firewall rule - allows to access the database cluster. Required. Known values are: - "droplet", "k8s", "ip_addr", "tag", and "app". - "value": "str", # The ID of the specific resource, the name - of a tag applied to a group of resources, or the IP address that the - firewall rule allows to access the database cluster. Required. - "cluster_uuid": "str", # Optional. A unique ID for the - database cluster to which the rule is applied. - "created_at": "2020-02-20 00:00:00", # Optional. A time - value given in ISO8601 combined date and time format that represents when - the firewall rule was created. - "description": "str", # Optional. A human-readable - description of the rule. - "uuid": "str" # Optional. A unique ID for the firewall rule - itself. - } - ] + "name": "str", # The name of a database user. Required. + "access_cert": "str", # Optional. Access certificate for TLS client + authentication. (Kafka only). + "access_key": "str", # Optional. Access key for TLS client authentication. + (Kafka only). + "mysql_settings": { + "auth_plugin": "str" # A string specifying the authentication method + to be used for connections to the MySQL user account. The valid values are + ``mysql_native_password`` or ``caching_sha2_password``. If excluded when + creating a new user, the default for the version of MySQL in use will be + used. As of MySQL 8.0, the default is ``caching_sha2_password``. Required. + Known values are: "mysql_native_password" and "caching_sha2_password". + }, + "password": "str", # Optional. A randomly generated password for the + database user.:code:`
`Requires ``database:view_credentials`` scope. + "readonly": bool, # Optional. (To be deprecated: use + settings.mongo_user_settings.role instead for access controls to MongoDB + databases). For MongoDB clusters, set to ``true`` to create a read-only user. + This option is not currently supported for other database engines. + "role": "str", # Optional. A string representing the database user's role. + The value will be either "primary" or "normal". Known values are: "primary" and + "normal". + "settings": { + "acl": [ + { + "permission": "str", # Permission set applied to the + ACL. 'consume' allows for messages to be consumed from the topic. + 'produce' allows for messages to be published to the topic. + 'produceconsume' allows for both 'consume' and 'produce' permission. + 'admin' allows for 'produceconsume' as well as any operations to + administer the topic (delete, update). Required. Known values are: + "admin", "consume", "produce", and "produceconsume". + "topic": "str", # A regex for matching the topic(s) + that this ACL should apply to. Required. + "id": "str" # Optional. An identifier for the ACL. + Will be computed after the ACL is created/updated. + } + ], + "mongo_user_settings": { + "databases": [ + "str" # Optional. A list of databases to which the + user should have access. When the database is set to ``admin``"" , + the user will have access to all databases based on the user's role + i.e. a user with the role ``readOnly`` assigned to the ``admin`` + database will have read access to all databases. + ], + "role": "str" # Optional. The role to assign to the user + with each role mapping to a MongoDB built-in role. ``readOnly`` maps to + a `read + `_ + role. ``readWrite`` maps to a `readWrite + `_ + role. ``dbAdmin`` maps to a `dbAdmin + `_ + role. Known values are: "readOnly", "readWrite", and "dbAdmin". + }, + "opensearch_acl": [ + { + "index": "str", # Optional. A regex for matching the + indexes that this ACL should apply to. + "permission": "str" # Optional. Permission set + applied to the ACL. 'read' allows user to read from the index. + 'write' allows for user to write to the index. 'readwrite' allows for + both 'read' and 'write' permission. 'deny'(default) restricts user + from performing any operation over an index. 'admin' allows for + 'readwrite' as well as any operations to administer the index. Known + values are: "deny", "admin", "read", "readwrite", and "write". + } + ], + "pg_allow_replication": bool # Optional. For Postgres clusters, set + to ``true`` for a user with replication rights. This option is not currently + supported for other database engines. + } } + # response body for status code(s): 201 + response == { + "user": { + "name": "str", # The name of a database user. Required. + "access_cert": "str", # Optional. Access certificate for TLS client + authentication. (Kafka only). + "access_key": "str", # Optional. Access key for TLS client + authentication. (Kafka only). + "mysql_settings": { + "auth_plugin": "str" # A string specifying the + authentication method to be used for connections to the MySQL user + account. The valid values are ``mysql_native_password`` or + ``caching_sha2_password``. If excluded when creating a new user, the + default for the version of MySQL in use will be used. As of MySQL 8.0, + the default is ``caching_sha2_password``. Required. Known values are: + "mysql_native_password" and "caching_sha2_password". + }, + "password": "str", # Optional. A randomly generated password for the + database user.:code:`
`Requires ``database:view_credentials`` scope. + "role": "str", # Optional. A string representing the database user's + role. The value will be either "primary" or "normal". Known values are: + "primary" and "normal". + "settings": { + "acl": [ + { + "permission": "str", # Permission set + applied to the ACL. 'consume' allows for messages to be consumed + from the topic. 'produce' allows for messages to be published to + the topic. 'produceconsume' allows for both 'consume' and + 'produce' permission. 'admin' allows for 'produceconsume' as well + as any operations to administer the topic (delete, update). + Required. Known values are: "admin", "consume", "produce", and + "produceconsume". + "topic": "str", # A regex for matching the + topic(s) that this ACL should apply to. Required. + "id": "str" # Optional. An identifier for + the ACL. Will be computed after the ACL is created/updated. + } + ], + "mongo_user_settings": { + "databases": [ + "str" # Optional. A list of databases to + which the user should have access. When the database is set to + ``admin``"" , the user will have access to all databases based on + the user's role i.e. a user with the role ``readOnly`` assigned + to the ``admin`` database will have read access to all databases. + ], + "role": "str" # Optional. The role to assign to the + user with each role mapping to a MongoDB built-in role. ``readOnly`` + maps to a `read + `_ + role. ``readWrite`` maps to a `readWrite + `_ + role. ``dbAdmin`` maps to a `dbAdmin + `_ + role. Known values are: "readOnly", "readWrite", and "dbAdmin". + }, + "opensearch_acl": [ + { + "index": "str", # Optional. A regex for + matching the indexes that this ACL should apply to. + "permission": "str" # Optional. Permission + set applied to the ACL. 'read' allows user to read from the + index. 'write' allows for user to write to the index. 'readwrite' + allows for both 'read' and 'write' permission. 'deny'(default) + restricts user from performing any operation over an index. + 'admin' allows for 'readwrite' as well as any operations to + administer the index. Known values are: "deny", "admin", "read", + "readwrite", and "write". + } + ], + "pg_allow_replication": bool # Optional. For Postgres + clusters, set to ``true`` for a user with replication rights. This option + is not currently supported for other database engines. + } + } + } # response body for status code(s): 404 response == { "id": "str", # A short identifier corresponding to the HTTP status code @@ -109229,26 +117037,34 @@ async def update_firewall_rules( """ @overload - async def update_firewall_rules( + async def add_user( self, database_cluster_uuid: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any - ) -> Optional[JSON]: + ) -> JSON: # pylint: disable=line-too-long - """Update Firewall Rules (Trusted Sources) for a Database. + """Add a Database User. - To update a database cluster's firewall rules (known as "trusted sources" in the control - panel), send a PUT request to ``/v2/databases/$DATABASE_ID/firewall`` specifying which - resources should be able to open connections to the database. You may limit connections to - specific Droplets, Kubernetes clusters, or IP addresses. When a tag is provided, any Droplet or - Kubernetes node with that tag applied to it will have access. The firewall is limited to 100 - rules (or trusted sources). When possible, we recommend `placing your databases into a VPC - network `_ to limit access to them - instead of using a firewall. - A successful. + To add a new database user, send a POST request to ``/v2/databases/$DATABASE_ID/users`` + with the desired username. + + Note: User management is not supported for Caching or Valkey clusters. + + When adding a user to a MySQL cluster, additional options can be configured in the + ``mysql_settings`` object. + + When adding a user to a Kafka cluster, additional options can be configured in + the ``settings`` object. + + When adding a user to a MongoDB cluster, additional options can be configured in + the ``settings.mongo_user_settings`` object. + + The response will be a JSON object with a key called ``user``. The value of this will be an + object that contains the standard attributes associated with a database user including + its randomly generated password. :param database_cluster_uuid: A unique identifier for a database cluster. Required. :type database_cluster_uuid: str @@ -109257,13 +117073,90 @@ async def update_firewall_rules( :keyword content_type: Body Parameter content-type. Content type parameter for binary body. Default value is "application/json". :paramtype content_type: str - :return: JSON object or None - :rtype: JSON or None + :return: JSON object + :rtype: JSON :raises ~azure.core.exceptions.HttpResponseError: Example: .. code-block:: python + # response body for status code(s): 201 + response == { + "user": { + "name": "str", # The name of a database user. Required. + "access_cert": "str", # Optional. Access certificate for TLS client + authentication. (Kafka only). + "access_key": "str", # Optional. Access key for TLS client + authentication. (Kafka only). + "mysql_settings": { + "auth_plugin": "str" # A string specifying the + authentication method to be used for connections to the MySQL user + account. The valid values are ``mysql_native_password`` or + ``caching_sha2_password``. If excluded when creating a new user, the + default for the version of MySQL in use will be used. As of MySQL 8.0, + the default is ``caching_sha2_password``. Required. Known values are: + "mysql_native_password" and "caching_sha2_password". + }, + "password": "str", # Optional. A randomly generated password for the + database user.:code:`
`Requires ``database:view_credentials`` scope. + "role": "str", # Optional. A string representing the database user's + role. The value will be either "primary" or "normal". Known values are: + "primary" and "normal". + "settings": { + "acl": [ + { + "permission": "str", # Permission set + applied to the ACL. 'consume' allows for messages to be consumed + from the topic. 'produce' allows for messages to be published to + the topic. 'produceconsume' allows for both 'consume' and + 'produce' permission. 'admin' allows for 'produceconsume' as well + as any operations to administer the topic (delete, update). + Required. Known values are: "admin", "consume", "produce", and + "produceconsume". + "topic": "str", # A regex for matching the + topic(s) that this ACL should apply to. Required. + "id": "str" # Optional. An identifier for + the ACL. Will be computed after the ACL is created/updated. + } + ], + "mongo_user_settings": { + "databases": [ + "str" # Optional. A list of databases to + which the user should have access. When the database is set to + ``admin``"" , the user will have access to all databases based on + the user's role i.e. a user with the role ``readOnly`` assigned + to the ``admin`` database will have read access to all databases. + ], + "role": "str" # Optional. The role to assign to the + user with each role mapping to a MongoDB built-in role. ``readOnly`` + maps to a `read + `_ + role. ``readWrite`` maps to a `readWrite + `_ + role. ``dbAdmin`` maps to a `dbAdmin + `_ + role. Known values are: "readOnly", "readWrite", and "dbAdmin". + }, + "opensearch_acl": [ + { + "index": "str", # Optional. A regex for + matching the indexes that this ACL should apply to. + "permission": "str" # Optional. Permission + set applied to the ACL. 'read' allows user to read from the + index. 'write' allows for user to write to the index. 'readwrite' + allows for both 'read' and 'write' permission. 'deny'(default) + restricts user from performing any operation over an index. + 'admin' allows for 'readwrite' as well as any operations to + administer the index. Known values are: "deny", "admin", "read", + "readwrite", and "write". + } + ], + "pg_allow_replication": bool # Optional. For Postgres + clusters, set to ``true`` for a user with replication rights. This option + is not currently supported for other database engines. + } + } + } # response body for status code(s): 404 response == { "id": "str", # A short identifier corresponding to the HTTP status code @@ -109278,28 +117171,36 @@ async def update_firewall_rules( """ @distributed_trace_async - async def update_firewall_rules( + async def add_user( self, database_cluster_uuid: str, body: Union[JSON, IO[bytes]], **kwargs: Any - ) -> Optional[JSON]: + ) -> JSON: # pylint: disable=line-too-long - """Update Firewall Rules (Trusted Sources) for a Database. + """Add a Database User. - To update a database cluster's firewall rules (known as "trusted sources" in the control - panel), send a PUT request to ``/v2/databases/$DATABASE_ID/firewall`` specifying which - resources should be able to open connections to the database. You may limit connections to - specific Droplets, Kubernetes clusters, or IP addresses. When a tag is provided, any Droplet or - Kubernetes node with that tag applied to it will have access. The firewall is limited to 100 - rules (or trusted sources). When possible, we recommend `placing your databases into a VPC - network `_ to limit access to them - instead of using a firewall. - A successful. + To add a new database user, send a POST request to ``/v2/databases/$DATABASE_ID/users`` + with the desired username. + + Note: User management is not supported for Caching or Valkey clusters. + + When adding a user to a MySQL cluster, additional options can be configured in the + ``mysql_settings`` object. + + When adding a user to a Kafka cluster, additional options can be configured in + the ``settings`` object. + + When adding a user to a MongoDB cluster, additional options can be configured in + the ``settings.mongo_user_settings`` object. + + The response will be a JSON object with a key called ``user``. The value of this will be an + object that contains the standard attributes associated with a database user including + its randomly generated password. :param database_cluster_uuid: A unique identifier for a database cluster. Required. :type database_cluster_uuid: str :param body: Is either a JSON type or a IO[bytes] type. Required. :type body: JSON or IO[bytes] - :return: JSON object or None - :rtype: JSON or None + :return: JSON object + :rtype: JSON :raises ~azure.core.exceptions.HttpResponseError: Example: @@ -109307,27 +117208,158 @@ async def update_firewall_rules( # JSON input template you can fill out and use as your body input. body = { - "rules": [ - { - "type": "str", # The type of resource that the firewall rule - allows to access the database cluster. Required. Known values are: - "droplet", "k8s", "ip_addr", "tag", and "app". - "value": "str", # The ID of the specific resource, the name - of a tag applied to a group of resources, or the IP address that the - firewall rule allows to access the database cluster. Required. - "cluster_uuid": "str", # Optional. A unique ID for the - database cluster to which the rule is applied. - "created_at": "2020-02-20 00:00:00", # Optional. A time - value given in ISO8601 combined date and time format that represents when - the firewall rule was created. - "description": "str", # Optional. A human-readable - description of the rule. - "uuid": "str" # Optional. A unique ID for the firewall rule - itself. - } - ] + "name": "str", # The name of a database user. Required. + "access_cert": "str", # Optional. Access certificate for TLS client + authentication. (Kafka only). + "access_key": "str", # Optional. Access key for TLS client authentication. + (Kafka only). + "mysql_settings": { + "auth_plugin": "str" # A string specifying the authentication method + to be used for connections to the MySQL user account. The valid values are + ``mysql_native_password`` or ``caching_sha2_password``. If excluded when + creating a new user, the default for the version of MySQL in use will be + used. As of MySQL 8.0, the default is ``caching_sha2_password``. Required. + Known values are: "mysql_native_password" and "caching_sha2_password". + }, + "password": "str", # Optional. A randomly generated password for the + database user.:code:`
`Requires ``database:view_credentials`` scope. + "readonly": bool, # Optional. (To be deprecated: use + settings.mongo_user_settings.role instead for access controls to MongoDB + databases). For MongoDB clusters, set to ``true`` to create a read-only user. + This option is not currently supported for other database engines. + "role": "str", # Optional. A string representing the database user's role. + The value will be either "primary" or "normal". Known values are: "primary" and + "normal". + "settings": { + "acl": [ + { + "permission": "str", # Permission set applied to the + ACL. 'consume' allows for messages to be consumed from the topic. + 'produce' allows for messages to be published to the topic. + 'produceconsume' allows for both 'consume' and 'produce' permission. + 'admin' allows for 'produceconsume' as well as any operations to + administer the topic (delete, update). Required. Known values are: + "admin", "consume", "produce", and "produceconsume". + "topic": "str", # A regex for matching the topic(s) + that this ACL should apply to. Required. + "id": "str" # Optional. An identifier for the ACL. + Will be computed after the ACL is created/updated. + } + ], + "mongo_user_settings": { + "databases": [ + "str" # Optional. A list of databases to which the + user should have access. When the database is set to ``admin``"" , + the user will have access to all databases based on the user's role + i.e. a user with the role ``readOnly`` assigned to the ``admin`` + database will have read access to all databases. + ], + "role": "str" # Optional. The role to assign to the user + with each role mapping to a MongoDB built-in role. ``readOnly`` maps to + a `read + `_ + role. ``readWrite`` maps to a `readWrite + `_ + role. ``dbAdmin`` maps to a `dbAdmin + `_ + role. Known values are: "readOnly", "readWrite", and "dbAdmin". + }, + "opensearch_acl": [ + { + "index": "str", # Optional. A regex for matching the + indexes that this ACL should apply to. + "permission": "str" # Optional. Permission set + applied to the ACL. 'read' allows user to read from the index. + 'write' allows for user to write to the index. 'readwrite' allows for + both 'read' and 'write' permission. 'deny'(default) restricts user + from performing any operation over an index. 'admin' allows for + 'readwrite' as well as any operations to administer the index. Known + values are: "deny", "admin", "read", "readwrite", and "write". + } + ], + "pg_allow_replication": bool # Optional. For Postgres clusters, set + to ``true`` for a user with replication rights. This option is not currently + supported for other database engines. + } } + # response body for status code(s): 201 + response == { + "user": { + "name": "str", # The name of a database user. Required. + "access_cert": "str", # Optional. Access certificate for TLS client + authentication. (Kafka only). + "access_key": "str", # Optional. Access key for TLS client + authentication. (Kafka only). + "mysql_settings": { + "auth_plugin": "str" # A string specifying the + authentication method to be used for connections to the MySQL user + account. The valid values are ``mysql_native_password`` or + ``caching_sha2_password``. If excluded when creating a new user, the + default for the version of MySQL in use will be used. As of MySQL 8.0, + the default is ``caching_sha2_password``. Required. Known values are: + "mysql_native_password" and "caching_sha2_password". + }, + "password": "str", # Optional. A randomly generated password for the + database user.:code:`
`Requires ``database:view_credentials`` scope. + "role": "str", # Optional. A string representing the database user's + role. The value will be either "primary" or "normal". Known values are: + "primary" and "normal". + "settings": { + "acl": [ + { + "permission": "str", # Permission set + applied to the ACL. 'consume' allows for messages to be consumed + from the topic. 'produce' allows for messages to be published to + the topic. 'produceconsume' allows for both 'consume' and + 'produce' permission. 'admin' allows for 'produceconsume' as well + as any operations to administer the topic (delete, update). + Required. Known values are: "admin", "consume", "produce", and + "produceconsume". + "topic": "str", # A regex for matching the + topic(s) that this ACL should apply to. Required. + "id": "str" # Optional. An identifier for + the ACL. Will be computed after the ACL is created/updated. + } + ], + "mongo_user_settings": { + "databases": [ + "str" # Optional. A list of databases to + which the user should have access. When the database is set to + ``admin``"" , the user will have access to all databases based on + the user's role i.e. a user with the role ``readOnly`` assigned + to the ``admin`` database will have read access to all databases. + ], + "role": "str" # Optional. The role to assign to the + user with each role mapping to a MongoDB built-in role. ``readOnly`` + maps to a `read + `_ + role. ``readWrite`` maps to a `readWrite + `_ + role. ``dbAdmin`` maps to a `dbAdmin + `_ + role. Known values are: "readOnly", "readWrite", and "dbAdmin". + }, + "opensearch_acl": [ + { + "index": "str", # Optional. A regex for + matching the indexes that this ACL should apply to. + "permission": "str" # Optional. Permission + set applied to the ACL. 'read' allows user to read from the + index. 'write' allows for user to write to the index. 'readwrite' + allows for both 'read' and 'write' permission. 'deny'(default) + restricts user from performing any operation over an index. + 'admin' allows for 'readwrite' as well as any operations to + administer the index. Known values are: "deny", "admin", "read", + "readwrite", and "write". + } + ], + "pg_allow_replication": bool # Optional. For Postgres + clusters, set to ``true`` for a user with replication rights. This option + is not currently supported for other database engines. + } + } + } # response body for status code(s): 404 response == { "id": "str", # A short identifier corresponding to the HTTP status code @@ -109359,7 +117391,7 @@ async def update_firewall_rules( content_type: Optional[str] = kwargs.pop( "content_type", _headers.pop("Content-Type", None) ) - cls: ClsType[Optional[JSON]] = kwargs.pop("cls", None) + cls: ClsType[JSON] = kwargs.pop("cls", None) content_type = content_type or "application/json" _json = None @@ -109369,7 +117401,7 @@ async def update_firewall_rules( else: _json = body - _request = build_databases_update_firewall_rules_request( + _request = build_databases_add_user_request( database_cluster_uuid=database_cluster_uuid, content_type=content_type, json=_json, @@ -109388,15 +117420,14 @@ async def update_firewall_rules( response = pipeline_response.http_response - if response.status_code not in [204, 404]: + if response.status_code not in [201, 404]: if _stream: await response.read() # Load the body in memory and close the socket map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore raise HttpResponseError(response=response) - deserialized = None response_headers = {} - if response.status_code == 204: + if response.status_code == 201: response_headers["ratelimit-limit"] = self._deserialize( "int", response.headers.get("ratelimit-limit") ) @@ -109407,6 +117438,11 @@ async def update_firewall_rules( "int", response.headers.get("ratelimit-reset") ) + if response.content: + deserialized = response.json() + else: + deserialized = None + if response.status_code == 404: response_headers["ratelimit-limit"] = self._deserialize( "int", response.headers.get("ratelimit-limit") @@ -109424,146 +117460,122 @@ async def update_firewall_rules( deserialized = None if cls: - return cls(pipeline_response, deserialized, response_headers) # type: ignore + return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore - return deserialized # type: ignore + return cast(JSON, deserialized) # type: ignore - @overload - async def update_maintenance_window( - self, - database_cluster_uuid: str, - body: JSON, - *, - content_type: str = "application/json", - **kwargs: Any - ) -> Optional[JSON]: + @distributed_trace_async + async def get_user( + self, database_cluster_uuid: str, username: str, **kwargs: Any + ) -> JSON: # pylint: disable=line-too-long - """Configure a Database Cluster's Maintenance Window. - - To configure the window when automatic maintenance should be performed for a database cluster, - send a PUT request to ``/v2/databases/$DATABASE_ID/maintenance``. - A successful request will receive a 204 No Content status code with no body in response. + """Retrieve an Existing Database User. - :param database_cluster_uuid: A unique identifier for a database cluster. Required. - :type database_cluster_uuid: str - :param body: Required. - :type body: JSON - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: JSON object or None - :rtype: JSON or None - :raises ~azure.core.exceptions.HttpResponseError: + To show information about an existing database user, send a GET request to + ``/v2/databases/$DATABASE_ID/users/$USERNAME``. - Example: - .. code-block:: python + Note: User management is not supported for Caching or Valkey clusters. - # JSON input template you can fill out and use as your body input. - body = { - "day": "str", # The day of the week on which to apply maintenance updates. - Required. - "hour": "str", # The hour in UTC at which maintenance updates will be - applied in 24 hour format. Required. - "description": [ - "str" # Optional. A list of strings, each containing information - about a pending maintenance update. - ], - "pending": bool # Optional. A boolean value indicating whether any - maintenance is scheduled to be performed in the next window. - } + The response will be a JSON object with a ``user`` key. This will be set to an object + containing the standard database user attributes. The user's password will not show + up unless the ``database:view_credentials`` scope is present. - # response body for status code(s): 404 - response == { - "id": "str", # A short identifier corresponding to the HTTP status code - returned. For example, the ID for a response returning a 404 status code would - be "not_found.". Required. - "message": "str", # A message providing additional information about the - error, including details to help resolve it when possible. Required. - "request_id": "str" # Optional. Optionally, some endpoints may include a - request ID that should be provided when reporting bugs or opening support - tickets to help identify the issue. - } - """ + For MySQL clusters, additional options will be contained in the ``mysql_settings`` + object. - @overload - async def update_maintenance_window( - self, - database_cluster_uuid: str, - body: IO[bytes], - *, - content_type: str = "application/json", - **kwargs: Any - ) -> Optional[JSON]: - # pylint: disable=line-too-long - """Configure a Database Cluster's Maintenance Window. + For Kafka clusters, additional options will be contained in the ``settings`` object. - To configure the window when automatic maintenance should be performed for a database cluster, - send a PUT request to ``/v2/databases/$DATABASE_ID/maintenance``. - A successful request will receive a 204 No Content status code with no body in response. + For MongoDB clusters, additional information will be contained in the mongo_user_settings + object. :param database_cluster_uuid: A unique identifier for a database cluster. Required. :type database_cluster_uuid: str - :param body: Required. - :type body: IO[bytes] - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str - :return: JSON object or None - :rtype: JSON or None + :param username: The name of the database user. Required. + :type username: str + :return: JSON object + :rtype: JSON :raises ~azure.core.exceptions.HttpResponseError: Example: .. code-block:: python - # response body for status code(s): 404 + # response body for status code(s): 200 response == { - "id": "str", # A short identifier corresponding to the HTTP status code - returned. For example, the ID for a response returning a 404 status code would - be "not_found.". Required. - "message": "str", # A message providing additional information about the - error, including details to help resolve it when possible. Required. - "request_id": "str" # Optional. Optionally, some endpoints may include a - request ID that should be provided when reporting bugs or opening support - tickets to help identify the issue. - } - """ - - @distributed_trace_async - async def update_maintenance_window( - self, database_cluster_uuid: str, body: Union[JSON, IO[bytes]], **kwargs: Any - ) -> Optional[JSON]: - # pylint: disable=line-too-long - """Configure a Database Cluster's Maintenance Window. - - To configure the window when automatic maintenance should be performed for a database cluster, - send a PUT request to ``/v2/databases/$DATABASE_ID/maintenance``. - A successful request will receive a 204 No Content status code with no body in response. - - :param database_cluster_uuid: A unique identifier for a database cluster. Required. - :type database_cluster_uuid: str - :param body: Is either a JSON type or a IO[bytes] type. Required. - :type body: JSON or IO[bytes] - :return: JSON object or None - :rtype: JSON or None - :raises ~azure.core.exceptions.HttpResponseError: - - Example: - .. code-block:: python - - # JSON input template you can fill out and use as your body input. - body = { - "day": "str", # The day of the week on which to apply maintenance updates. - Required. - "hour": "str", # The hour in UTC at which maintenance updates will be - applied in 24 hour format. Required. - "description": [ - "str" # Optional. A list of strings, each containing information - about a pending maintenance update. - ], - "pending": bool # Optional. A boolean value indicating whether any - maintenance is scheduled to be performed in the next window. + "user": { + "name": "str", # The name of a database user. Required. + "access_cert": "str", # Optional. Access certificate for TLS client + authentication. (Kafka only). + "access_key": "str", # Optional. Access key for TLS client + authentication. (Kafka only). + "mysql_settings": { + "auth_plugin": "str" # A string specifying the + authentication method to be used for connections to the MySQL user + account. The valid values are ``mysql_native_password`` or + ``caching_sha2_password``. If excluded when creating a new user, the + default for the version of MySQL in use will be used. As of MySQL 8.0, + the default is ``caching_sha2_password``. Required. Known values are: + "mysql_native_password" and "caching_sha2_password". + }, + "password": "str", # Optional. A randomly generated password for the + database user.:code:`
`Requires ``database:view_credentials`` scope. + "role": "str", # Optional. A string representing the database user's + role. The value will be either "primary" or "normal". Known values are: + "primary" and "normal". + "settings": { + "acl": [ + { + "permission": "str", # Permission set + applied to the ACL. 'consume' allows for messages to be consumed + from the topic. 'produce' allows for messages to be published to + the topic. 'produceconsume' allows for both 'consume' and + 'produce' permission. 'admin' allows for 'produceconsume' as well + as any operations to administer the topic (delete, update). + Required. Known values are: "admin", "consume", "produce", and + "produceconsume". + "topic": "str", # A regex for matching the + topic(s) that this ACL should apply to. Required. + "id": "str" # Optional. An identifier for + the ACL. Will be computed after the ACL is created/updated. + } + ], + "mongo_user_settings": { + "databases": [ + "str" # Optional. A list of databases to + which the user should have access. When the database is set to + ``admin``"" , the user will have access to all databases based on + the user's role i.e. a user with the role ``readOnly`` assigned + to the ``admin`` database will have read access to all databases. + ], + "role": "str" # Optional. The role to assign to the + user with each role mapping to a MongoDB built-in role. ``readOnly`` + maps to a `read + `_ + role. ``readWrite`` maps to a `readWrite + `_ + role. ``dbAdmin`` maps to a `dbAdmin + `_ + role. Known values are: "readOnly", "readWrite", and "dbAdmin". + }, + "opensearch_acl": [ + { + "index": "str", # Optional. A regex for + matching the indexes that this ACL should apply to. + "permission": "str" # Optional. Permission + set applied to the ACL. 'read' allows user to read from the + index. 'write' allows for user to write to the index. 'readwrite' + allows for both 'read' and 'write' permission. 'deny'(default) + restricts user from performing any operation over an index. + 'admin' allows for 'readwrite' as well as any operations to + administer the index. Known values are: "deny", "admin", "read", + "readwrite", and "write". + } + ], + "pg_allow_replication": bool # Optional. For Postgres + clusters, set to ``true`` for a user with replication rights. This option + is not currently supported for other database engines. + } + } } - # response body for status code(s): 404 response == { "id": "str", # A short identifier corresponding to the HTTP status code @@ -109589,27 +117601,14 @@ async def update_maintenance_window( } error_map.update(kwargs.pop("error_map", {}) or {}) - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _headers = kwargs.pop("headers", {}) or {} _params = kwargs.pop("params", {}) or {} - content_type: Optional[str] = kwargs.pop( - "content_type", _headers.pop("Content-Type", None) - ) - cls: ClsType[Optional[JSON]] = kwargs.pop("cls", None) - - content_type = content_type or "application/json" - _json = None - _content = None - if isinstance(body, (IOBase, bytes)): - _content = body - else: - _json = body + cls: ClsType[JSON] = kwargs.pop("cls", None) - _request = build_databases_update_maintenance_window_request( + _request = build_databases_get_user_request( database_cluster_uuid=database_cluster_uuid, - content_type=content_type, - json=_json, - content=_content, + username=username, headers=_headers, params=_params, ) @@ -109624,15 +117623,14 @@ async def update_maintenance_window( response = pipeline_response.http_response - if response.status_code not in [204, 404]: + if response.status_code not in [200, 404]: if _stream: await response.read() # Load the body in memory and close the socket map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore raise HttpResponseError(response=response) - deserialized = None response_headers = {} - if response.status_code == 204: + if response.status_code == 200: response_headers["ratelimit-limit"] = self._deserialize( "int", response.headers.get("ratelimit-limit") ) @@ -109643,6 +117641,11 @@ async def update_maintenance_window( "int", response.headers.get("ratelimit-reset") ) + if response.content: + deserialized = response.json() + else: + deserialized = None + if response.status_code == 404: response_headers["ratelimit-limit"] = self._deserialize( "int", response.headers.get("ratelimit-limit") @@ -109660,23 +117663,29 @@ async def update_maintenance_window( deserialized = None if cls: - return cls(pipeline_response, deserialized, response_headers) # type: ignore + return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore - return deserialized # type: ignore + return cast(JSON, deserialized) # type: ignore @distributed_trace_async - async def install_update( - self, database_cluster_uuid: str, **kwargs: Any + async def delete_user( + self, database_cluster_uuid: str, username: str, **kwargs: Any ) -> Optional[JSON]: # pylint: disable=line-too-long - """Start Database Maintenance. + """Remove a Database User. - To start the installation of updates for a database cluster, send a PUT request to - ``/v2/databases/$DATABASE_ID/install_update``. - A successful request will receive a 204 No Content status code with no body in response. + To remove a specific database user, send a DELETE request to + ``/v2/databases/$DATABASE_ID/users/$USERNAME``. + + A status of 204 will be given. This indicates that the request was processed + successfully, but that no response body is needed. + + Note: User management is not supported for Caching or Valkey clusters. :param database_cluster_uuid: A unique identifier for a database cluster. Required. :type database_cluster_uuid: str + :param username: The name of the database user. Required. + :type username: str :return: JSON object or None :rtype: JSON or None :raises ~azure.core.exceptions.HttpResponseError: @@ -109714,8 +117723,9 @@ async def install_update( cls: ClsType[Optional[JSON]] = kwargs.pop("cls", None) - _request = build_databases_install_update_request( + _request = build_databases_delete_user_request( database_cluster_uuid=database_cluster_uuid, + username=username, headers=_headers, params=_params, ) @@ -109770,20 +117780,41 @@ async def install_update( return deserialized # type: ignore - @distributed_trace_async - async def list_backups(self, database_cluster_uuid: str, **kwargs: Any) -> JSON: + @overload + async def update_user( + self, + database_cluster_uuid: str, + username: str, + body: JSON, + *, + content_type: str = "application/json", + **kwargs: Any + ) -> JSON: # pylint: disable=line-too-long - """List Backups for a Database Cluster. + """Update a Database User. - To list all of the available backups of a PostgreSQL or MySQL database cluster, send a GET - request to ``/v2/databases/$DATABASE_ID/backups``. - **Note**\\ : Backups are not supported for Caching or Valkey clusters. - The result will be a JSON object with a ``backups key``. This will be set to an array of backup - objects, each of which will contain the size of the backup and the timestamp at which it was - created. + To update an existing database user, send a PUT request to + ``/v2/databases/$DATABASE_ID/users/$USERNAME`` + with the desired settings. + + **Note**\\ : only ``settings`` can be updated via this type of request. If you wish to change + the name of a user, + you must recreate a new user. + + The response will be a JSON object with a key called ``user``. The value of this will be an + object that contains the name of the update database user, along with the ``settings`` object + that + has been updated. :param database_cluster_uuid: A unique identifier for a database cluster. Required. :type database_cluster_uuid: str + :param username: The name of the database user. Required. + :type username: str + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str :return: JSON object :rtype: JSON :raises ~azure.core.exceptions.HttpResponseError: @@ -109791,29 +117822,136 @@ async def list_backups(self, database_cluster_uuid: str, **kwargs: Any) -> JSON: Example: .. code-block:: python - # response body for status code(s): 200 + # JSON input template you can fill out and use as your body input. + body = { + "settings": { + "acl": [ + { + "permission": "str", # Permission set applied to the + ACL. 'consume' allows for messages to be consumed from the topic. + 'produce' allows for messages to be published to the topic. + 'produceconsume' allows for both 'consume' and 'produce' permission. + 'admin' allows for 'produceconsume' as well as any operations to + administer the topic (delete, update). Required. Known values are: + "admin", "consume", "produce", and "produceconsume". + "topic": "str", # A regex for matching the topic(s) + that this ACL should apply to. Required. + "id": "str" # Optional. An identifier for the ACL. + Will be computed after the ACL is created/updated. + } + ], + "mongo_user_settings": { + "databases": [ + "str" # Optional. A list of databases to which the + user should have access. When the database is set to ``admin``"" , + the user will have access to all databases based on the user's role + i.e. a user with the role ``readOnly`` assigned to the ``admin`` + database will have read access to all databases. + ], + "role": "str" # Optional. The role to assign to the user + with each role mapping to a MongoDB built-in role. ``readOnly`` maps to + a `read + `_ + role. ``readWrite`` maps to a `readWrite + `_ + role. ``dbAdmin`` maps to a `dbAdmin + `_ + role. Known values are: "readOnly", "readWrite", and "dbAdmin". + }, + "opensearch_acl": [ + { + "index": "str", # Optional. A regex for matching the + indexes that this ACL should apply to. + "permission": "str" # Optional. Permission set + applied to the ACL. 'read' allows user to read from the index. + 'write' allows for user to write to the index. 'readwrite' allows for + both 'read' and 'write' permission. 'deny'(default) restricts user + from performing any operation over an index. 'admin' allows for + 'readwrite' as well as any operations to administer the index. Known + values are: "deny", "admin", "read", "readwrite", and "write". + } + ], + "pg_allow_replication": bool # Optional. For Postgres clusters, set + to ``true`` for a user with replication rights. This option is not currently + supported for other database engines. + } + } + + # response body for status code(s): 201 response == { - "backups": [ - { - "created_at": "2020-02-20 00:00:00", # A time value given in - ISO8601 combined date and time format at which the backup was created. - Required. - "size_gigabytes": 0.0, # The size of the database backup in - GBs. Required. - "incremental": bool # Optional. Indicates if this backup is - a full or an incremental one (available only for MySQL). + "user": { + "name": "str", # The name of a database user. Required. + "access_cert": "str", # Optional. Access certificate for TLS client + authentication. (Kafka only). + "access_key": "str", # Optional. Access key for TLS client + authentication. (Kafka only). + "mysql_settings": { + "auth_plugin": "str" # A string specifying the + authentication method to be used for connections to the MySQL user + account. The valid values are ``mysql_native_password`` or + ``caching_sha2_password``. If excluded when creating a new user, the + default for the version of MySQL in use will be used. As of MySQL 8.0, + the default is ``caching_sha2_password``. Required. Known values are: + "mysql_native_password" and "caching_sha2_password". + }, + "password": "str", # Optional. A randomly generated password for the + database user.:code:`
`Requires ``database:view_credentials`` scope. + "role": "str", # Optional. A string representing the database user's + role. The value will be either "primary" or "normal". Known values are: + "primary" and "normal". + "settings": { + "acl": [ + { + "permission": "str", # Permission set + applied to the ACL. 'consume' allows for messages to be consumed + from the topic. 'produce' allows for messages to be published to + the topic. 'produceconsume' allows for both 'consume' and + 'produce' permission. 'admin' allows for 'produceconsume' as well + as any operations to administer the topic (delete, update). + Required. Known values are: "admin", "consume", "produce", and + "produceconsume". + "topic": "str", # A regex for matching the + topic(s) that this ACL should apply to. Required. + "id": "str" # Optional. An identifier for + the ACL. Will be computed after the ACL is created/updated. + } + ], + "mongo_user_settings": { + "databases": [ + "str" # Optional. A list of databases to + which the user should have access. When the database is set to + ``admin``"" , the user will have access to all databases based on + the user's role i.e. a user with the role ``readOnly`` assigned + to the ``admin`` database will have read access to all databases. + ], + "role": "str" # Optional. The role to assign to the + user with each role mapping to a MongoDB built-in role. ``readOnly`` + maps to a `read + `_ + role. ``readWrite`` maps to a `readWrite + `_ + role. ``dbAdmin`` maps to a `dbAdmin + `_ + role. Known values are: "readOnly", "readWrite", and "dbAdmin". + }, + "opensearch_acl": [ + { + "index": "str", # Optional. A regex for + matching the indexes that this ACL should apply to. + "permission": "str" # Optional. Permission + set applied to the ACL. 'read' allows user to read from the + index. 'write' allows for user to write to the index. 'readwrite' + allows for both 'read' and 'write' permission. 'deny'(default) + restricts user from performing any operation over an index. + 'admin' allows for 'readwrite' as well as any operations to + administer the index. Known values are: "deny", "admin", "read", + "readwrite", and "write". + } + ], + "pg_allow_replication": bool # Optional. For Postgres + clusters, set to ``true`` for a user with replication rights. This option + is not currently supported for other database engines. } - ], - "backup_progress": "str", # Optional. If a backup is currently in progress, - this attribute shows the percentage of completion. If no backup is in progress, - this attribute will be hidden. - "scheduled_backup_time": { - "backup_hour": 0, # Optional. The hour of the day when the backup is - scheduled (in UTC). - "backup_interval_hours": 0, # Optional. The frequency, in hours, at - which backups are taken. - "backup_minute": 0 # Optional. The minute of the hour when the - backup is scheduled. } } # response body for status code(s): 404 @@ -109828,99 +117966,169 @@ async def list_backups(self, database_cluster_uuid: str, **kwargs: Any) -> JSON: tickets to help identify the issue. } """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - 401: cast( - Type[HttpResponseError], - lambda response: ClientAuthenticationError(response=response), - ), - 429: HttpResponseError, - 500: HttpResponseError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = kwargs.pop("headers", {}) or {} - _params = kwargs.pop("params", {}) or {} - - cls: ClsType[JSON] = kwargs.pop("cls", None) - - _request = build_databases_list_backups_request( - database_cluster_uuid=database_cluster_uuid, - headers=_headers, - params=_params, - ) - _request.url = self._client.format_url(_request.url) - - _stream = False - pipeline_response: PipelineResponse = ( - await self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - ) - - response = pipeline_response.http_response - if response.status_code not in [200, 404]: - if _stream: - await response.read() # Load the body in memory and close the socket - map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore - raise HttpResponseError(response=response) + @overload + async def update_user( + self, + database_cluster_uuid: str, + username: str, + body: IO[bytes], + *, + content_type: str = "application/json", + **kwargs: Any + ) -> JSON: + # pylint: disable=line-too-long + """Update a Database User. - response_headers = {} - if response.status_code == 200: - response_headers["ratelimit-limit"] = self._deserialize( - "int", response.headers.get("ratelimit-limit") - ) - response_headers["ratelimit-remaining"] = self._deserialize( - "int", response.headers.get("ratelimit-remaining") - ) - response_headers["ratelimit-reset"] = self._deserialize( - "int", response.headers.get("ratelimit-reset") - ) + To update an existing database user, send a PUT request to + ``/v2/databases/$DATABASE_ID/users/$USERNAME`` + with the desired settings. - if response.content: - deserialized = response.json() - else: - deserialized = None + **Note**\\ : only ``settings`` can be updated via this type of request. If you wish to change + the name of a user, + you must recreate a new user. - if response.status_code == 404: - response_headers["ratelimit-limit"] = self._deserialize( - "int", response.headers.get("ratelimit-limit") - ) - response_headers["ratelimit-remaining"] = self._deserialize( - "int", response.headers.get("ratelimit-remaining") - ) - response_headers["ratelimit-reset"] = self._deserialize( - "int", response.headers.get("ratelimit-reset") - ) + The response will be a JSON object with a key called ``user``. The value of this will be an + object that contains the name of the update database user, along with the ``settings`` object + that + has been updated. - if response.content: - deserialized = response.json() - else: - deserialized = None + :param database_cluster_uuid: A unique identifier for a database cluster. Required. + :type database_cluster_uuid: str + :param username: The name of the database user. Required. + :type username: str + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: JSON object + :rtype: JSON + :raises ~azure.core.exceptions.HttpResponseError: - if cls: - return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore + Example: + .. code-block:: python - return cast(JSON, deserialized) # type: ignore + # response body for status code(s): 201 + response == { + "user": { + "name": "str", # The name of a database user. Required. + "access_cert": "str", # Optional. Access certificate for TLS client + authentication. (Kafka only). + "access_key": "str", # Optional. Access key for TLS client + authentication. (Kafka only). + "mysql_settings": { + "auth_plugin": "str" # A string specifying the + authentication method to be used for connections to the MySQL user + account. The valid values are ``mysql_native_password`` or + ``caching_sha2_password``. If excluded when creating a new user, the + default for the version of MySQL in use will be used. As of MySQL 8.0, + the default is ``caching_sha2_password``. Required. Known values are: + "mysql_native_password" and "caching_sha2_password". + }, + "password": "str", # Optional. A randomly generated password for the + database user.:code:`
`Requires ``database:view_credentials`` scope. + "role": "str", # Optional. A string representing the database user's + role. The value will be either "primary" or "normal". Known values are: + "primary" and "normal". + "settings": { + "acl": [ + { + "permission": "str", # Permission set + applied to the ACL. 'consume' allows for messages to be consumed + from the topic. 'produce' allows for messages to be published to + the topic. 'produceconsume' allows for both 'consume' and + 'produce' permission. 'admin' allows for 'produceconsume' as well + as any operations to administer the topic (delete, update). + Required. Known values are: "admin", "consume", "produce", and + "produceconsume". + "topic": "str", # A regex for matching the + topic(s) that this ACL should apply to. Required. + "id": "str" # Optional. An identifier for + the ACL. Will be computed after the ACL is created/updated. + } + ], + "mongo_user_settings": { + "databases": [ + "str" # Optional. A list of databases to + which the user should have access. When the database is set to + ``admin``"" , the user will have access to all databases based on + the user's role i.e. a user with the role ``readOnly`` assigned + to the ``admin`` database will have read access to all databases. + ], + "role": "str" # Optional. The role to assign to the + user with each role mapping to a MongoDB built-in role. ``readOnly`` + maps to a `read + `_ + role. ``readWrite`` maps to a `readWrite + `_ + role. ``dbAdmin`` maps to a `dbAdmin + `_ + role. Known values are: "readOnly", "readWrite", and "dbAdmin". + }, + "opensearch_acl": [ + { + "index": "str", # Optional. A regex for + matching the indexes that this ACL should apply to. + "permission": "str" # Optional. Permission + set applied to the ACL. 'read' allows user to read from the + index. 'write' allows for user to write to the index. 'readwrite' + allows for both 'read' and 'write' permission. 'deny'(default) + restricts user from performing any operation over an index. + 'admin' allows for 'readwrite' as well as any operations to + administer the index. Known values are: "deny", "admin", "read", + "readwrite", and "write". + } + ], + "pg_allow_replication": bool # Optional. For Postgres + clusters, set to ``true`` for a user with replication rights. This option + is not currently supported for other database engines. + } + } + } + # response body for status code(s): 404 + response == { + "id": "str", # A short identifier corresponding to the HTTP status code + returned. For example, the ID for a response returning a 404 status code would + be "not_found.". Required. + "message": "str", # A message providing additional information about the + error, including details to help resolve it when possible. Required. + "request_id": "str" # Optional. Optionally, some endpoints may include a + request ID that should be provided when reporting bugs or opening support + tickets to help identify the issue. + } + """ @distributed_trace_async - async def list_replicas(self, database_cluster_uuid: str, **kwargs: Any) -> JSON: + async def update_user( + self, + database_cluster_uuid: str, + username: str, + body: Union[JSON, IO[bytes]], + **kwargs: Any + ) -> JSON: # pylint: disable=line-too-long - """List All Read-only Replicas. + """Update a Database User. - To list all of the read-only replicas associated with a database cluster, send a GET request to - ``/v2/databases/$DATABASE_ID/replicas``. + To update an existing database user, send a PUT request to + ``/v2/databases/$DATABASE_ID/users/$USERNAME`` + with the desired settings. - **Note**\\ : Read-only replicas are not supported for Caching or Valkey clusters. + **Note**\\ : only ``settings`` can be updated via this type of request. If you wish to change + the name of a user, + you must recreate a new user. - The result will be a JSON object with a ``replicas`` key. This will be set to an array of - database replica objects, each of which will contain the standard database replica attributes. + The response will be a JSON object with a key called ``user``. The value of this will be an + object that contains the name of the update database user, along with the ``settings`` object + that + has been updated. :param database_cluster_uuid: A unique identifier for a database cluster. Required. :type database_cluster_uuid: str + :param username: The name of the database user. Required. + :type username: str + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] :return: JSON object :rtype: JSON :raises ~azure.core.exceptions.HttpResponseError: @@ -109928,93 +118136,137 @@ async def list_replicas(self, database_cluster_uuid: str, **kwargs: Any) -> JSON Example: .. code-block:: python - # response body for status code(s): 200 + # JSON input template you can fill out and use as your body input. + body = { + "settings": { + "acl": [ + { + "permission": "str", # Permission set applied to the + ACL. 'consume' allows for messages to be consumed from the topic. + 'produce' allows for messages to be published to the topic. + 'produceconsume' allows for both 'consume' and 'produce' permission. + 'admin' allows for 'produceconsume' as well as any operations to + administer the topic (delete, update). Required. Known values are: + "admin", "consume", "produce", and "produceconsume". + "topic": "str", # A regex for matching the topic(s) + that this ACL should apply to. Required. + "id": "str" # Optional. An identifier for the ACL. + Will be computed after the ACL is created/updated. + } + ], + "mongo_user_settings": { + "databases": [ + "str" # Optional. A list of databases to which the + user should have access. When the database is set to ``admin``"" , + the user will have access to all databases based on the user's role + i.e. a user with the role ``readOnly`` assigned to the ``admin`` + database will have read access to all databases. + ], + "role": "str" # Optional. The role to assign to the user + with each role mapping to a MongoDB built-in role. ``readOnly`` maps to + a `read + `_ + role. ``readWrite`` maps to a `readWrite + `_ + role. ``dbAdmin`` maps to a `dbAdmin + `_ + role. Known values are: "readOnly", "readWrite", and "dbAdmin". + }, + "opensearch_acl": [ + { + "index": "str", # Optional. A regex for matching the + indexes that this ACL should apply to. + "permission": "str" # Optional. Permission set + applied to the ACL. 'read' allows user to read from the index. + 'write' allows for user to write to the index. 'readwrite' allows for + both 'read' and 'write' permission. 'deny'(default) restricts user + from performing any operation over an index. 'admin' allows for + 'readwrite' as well as any operations to administer the index. Known + values are: "deny", "admin", "read", "readwrite", and "write". + } + ], + "pg_allow_replication": bool # Optional. For Postgres clusters, set + to ``true`` for a user with replication rights. This option is not currently + supported for other database engines. + } + } + + # response body for status code(s): 201 response == { - "replicas": [ - { - "name": "str", # The name to give the read-only replicating. - Required. - "connection": { - "database": "str", # Optional. The name of the - default database. - "host": "str", # Optional. The FQDN pointing to the - database cluster's current primary node. - "password": "str", # Optional. The randomly - generated password for the default - user.:code:`
`:code:`
`Requires ``database:view_credentials`` - scope. - "port": 0, # Optional. The port on which the - database cluster is listening. - "ssl": bool, # Optional. A boolean value indicating - if the connection should be made over SSL. - "uri": "str", # Optional. A connection string in the - format accepted by the ``psql`` command. This is provided as a - convenience and should be able to be constructed by the other - attributes. - "user": "str" # Optional. The default user for the - database.:code:`
`:code:`
`Requires - ``database:view_credentials`` scope. - }, - "created_at": "2020-02-20 00:00:00", # Optional. A time - value given in ISO8601 combined date and time format that represents when - the database cluster was created. - "do_settings": { - "service_cnames": [ - "str" # Optional. An array of custom CNAMEs - for the database cluster. Each CNAME must be a valid RFC 1123 - hostname (e.g., "db.example.com"). Maximum of 16 CNAMEs allowed, - each up to 253 characters. - ] - }, - "id": "str", # Optional. A unique ID that can be used to - identify and reference a database replica. - "private_connection": { - "database": "str", # Optional. The name of the - default database. - "host": "str", # Optional. The FQDN pointing to the - database cluster's current primary node. - "password": "str", # Optional. The randomly - generated password for the default - user.:code:`
`:code:`
`Requires ``database:view_credentials`` - scope. - "port": 0, # Optional. The port on which the - database cluster is listening. - "ssl": bool, # Optional. A boolean value indicating - if the connection should be made over SSL. - "uri": "str", # Optional. A connection string in the - format accepted by the ``psql`` command. This is provided as a - convenience and should be able to be constructed by the other - attributes. - "user": "str" # Optional. The default user for the - database.:code:`
`:code:`
`Requires - ``database:view_credentials`` scope. + "user": { + "name": "str", # The name of a database user. Required. + "access_cert": "str", # Optional. Access certificate for TLS client + authentication. (Kafka only). + "access_key": "str", # Optional. Access key for TLS client + authentication. (Kafka only). + "mysql_settings": { + "auth_plugin": "str" # A string specifying the + authentication method to be used for connections to the MySQL user + account. The valid values are ``mysql_native_password`` or + ``caching_sha2_password``. If excluded when creating a new user, the + default for the version of MySQL in use will be used. As of MySQL 8.0, + the default is ``caching_sha2_password``. Required. Known values are: + "mysql_native_password" and "caching_sha2_password". + }, + "password": "str", # Optional. A randomly generated password for the + database user.:code:`
`Requires ``database:view_credentials`` scope. + "role": "str", # Optional. A string representing the database user's + role. The value will be either "primary" or "normal". Known values are: + "primary" and "normal". + "settings": { + "acl": [ + { + "permission": "str", # Permission set + applied to the ACL. 'consume' allows for messages to be consumed + from the topic. 'produce' allows for messages to be published to + the topic. 'produceconsume' allows for both 'consume' and + 'produce' permission. 'admin' allows for 'produceconsume' as well + as any operations to administer the topic (delete, update). + Required. Known values are: "admin", "consume", "produce", and + "produceconsume". + "topic": "str", # A regex for matching the + topic(s) that this ACL should apply to. Required. + "id": "str" # Optional. An identifier for + the ACL. Will be computed after the ACL is created/updated. + } + ], + "mongo_user_settings": { + "databases": [ + "str" # Optional. A list of databases to + which the user should have access. When the database is set to + ``admin``"" , the user will have access to all databases based on + the user's role i.e. a user with the role ``readOnly`` assigned + to the ``admin`` database will have read access to all databases. + ], + "role": "str" # Optional. The role to assign to the + user with each role mapping to a MongoDB built-in role. ``readOnly`` + maps to a `read + `_ + role. ``readWrite`` maps to a `readWrite + `_ + role. ``dbAdmin`` maps to a `dbAdmin + `_ + role. Known values are: "readOnly", "readWrite", and "dbAdmin". }, - "private_network_uuid": "str", # Optional. A string - specifying the UUID of the VPC to which the read-only replica will be - assigned. If excluded, the replica will be assigned to your account's - default VPC for the region. :code:`
`:code:`
`Requires ``vpc:read`` - scope. - "region": "str", # Optional. A slug identifier for the - region where the read-only replica will be located. If excluded, the - replica will be placed in the same region as the cluster. - "size": "str", # Optional. A slug identifier representing - the size of the node for the read-only replica. The size of the replica - must be at least as large as the node size for the database cluster from - which it is replicating. - "status": "str", # Optional. A string representing the - current status of the database cluster. Known values are: "creating", - "online", "resizing", "migrating", and "forking". - "storage_size_mib": 0, # Optional. Additional storage added - to the cluster, in MiB. If null, no additional storage is added to the - cluster, beyond what is provided as a base amount from the 'size' and any - previously added additional storage. - "tags": [ - "str" # Optional. A flat array of tag names as - strings applied to the read-only - replica.:code:`
`:code:`
`Requires ``tag:read`` scope. - ] + "opensearch_acl": [ + { + "index": "str", # Optional. A regex for + matching the indexes that this ACL should apply to. + "permission": "str" # Optional. Permission + set applied to the ACL. 'read' allows user to read from the + index. 'write' allows for user to write to the index. 'readwrite' + allows for both 'read' and 'write' permission. 'deny'(default) + restricts user from performing any operation over an index. + 'admin' allows for 'readwrite' as well as any operations to + administer the index. Known values are: "deny", "admin", "read", + "readwrite", and "write". + } + ], + "pg_allow_replication": bool # Optional. For Postgres + clusters, set to ``true`` for a user with replication rights. This option + is not currently supported for other database engines. } - ] + } } # response body for status code(s): 404 response == { @@ -110041,13 +118293,28 @@ async def list_replicas(self, database_cluster_uuid: str, **kwargs: Any) -> JSON } error_map.update(kwargs.pop("error_map", {}) or {}) - _headers = kwargs.pop("headers", {}) or {} + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = kwargs.pop("params", {}) or {} + content_type: Optional[str] = kwargs.pop( + "content_type", _headers.pop("Content-Type", None) + ) cls: ClsType[JSON] = kwargs.pop("cls", None) - _request = build_databases_list_replicas_request( + content_type = content_type or "application/json" + _json = None + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _json = body + + _request = build_databases_update_user_request( database_cluster_uuid=database_cluster_uuid, + username=username, + content_type=content_type, + json=_json, + content=_content, headers=_headers, params=_params, ) @@ -110062,14 +118329,14 @@ async def list_replicas(self, database_cluster_uuid: str, **kwargs: Any) -> JSON response = pipeline_response.http_response - if response.status_code not in [200, 404]: + if response.status_code not in [201, 404]: if _stream: await response.read() # Load the body in memory and close the socket map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore raise HttpResponseError(response=response) response_headers = {} - if response.status_code == 200: + if response.status_code == 201: response_headers["ratelimit-limit"] = self._deserialize( "int", response.headers.get("ratelimit-limit") ) @@ -110107,31 +118374,33 @@ async def list_replicas(self, database_cluster_uuid: str, **kwargs: Any) -> JSON return cast(JSON, deserialized) # type: ignore @overload - async def create_replica( + async def reset_auth( self, database_cluster_uuid: str, - body: Optional[JSON] = None, + username: str, + body: JSON, *, content_type: str = "application/json", **kwargs: Any ) -> JSON: # pylint: disable=line-too-long - """Create a Read-only Replica. + """Reset a Database User's Password or Authentication Method. - To create a read-only replica for a PostgreSQL or MySQL database cluster, send a POST request - to ``/v2/databases/$DATABASE_ID/replicas`` specifying the name it should be given, the size of - the node to be used, and the region where it will be located. + To reset the password for a database user, send a POST request to + ``/v2/databases/$DATABASE_ID/users/$USERNAME/reset_auth``. - **Note**\\ : Read-only replicas are not supported for Caching or Valkey clusters. + For ``mysql`` databases, the authentication method can be specifying by + including a key in the JSON body called ``mysql_settings`` with the ``auth_plugin`` + value specified. - The response will be a JSON object with a key called ``replica``. The value of this will be an - object that contains the standard attributes associated with a database replica. The initial - value of the read-only replica's ``status`` attribute will be ``forking``. When the replica is - ready to receive traffic, this will transition to ``active``. + The response will be a JSON object with a ``user`` key. This will be set to an + object containing the standard database user attributes. :param database_cluster_uuid: A unique identifier for a database cluster. Required. :type database_cluster_uuid: str - :param body: Default value is None. + :param username: The name of the database user. Required. + :type username: str + :param body: Required. :type body: JSON :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". @@ -110145,159 +118414,91 @@ async def create_replica( # JSON input template you can fill out and use as your body input. body = { - "name": "str", # The name to give the read-only replicating. Required. - "connection": { - "database": "str", # Optional. The name of the default database. - "host": "str", # Optional. The FQDN pointing to the database - cluster's current primary node. - "password": "str", # Optional. The randomly generated password for - the default user.:code:`
`:code:`
`Requires - ``database:view_credentials`` scope. - "port": 0, # Optional. The port on which the database cluster is - listening. - "ssl": bool, # Optional. A boolean value indicating if the - connection should be made over SSL. - "uri": "str", # Optional. A connection string in the format accepted - by the ``psql`` command. This is provided as a convenience and should be able - to be constructed by the other attributes. - "user": "str" # Optional. The default user for the - database.:code:`
`:code:`
`Requires ``database:view_credentials`` - scope. - }, - "created_at": "2020-02-20 00:00:00", # Optional. A time value given in - ISO8601 combined date and time format that represents when the database cluster - was created. - "do_settings": { - "service_cnames": [ - "str" # Optional. An array of custom CNAMEs for the database - cluster. Each CNAME must be a valid RFC 1123 hostname (e.g., - "db.example.com"). Maximum of 16 CNAMEs allowed, each up to 253 - characters. - ] - }, - "id": "str", # Optional. A unique ID that can be used to identify and - reference a database replica. - "private_connection": { - "database": "str", # Optional. The name of the default database. - "host": "str", # Optional. The FQDN pointing to the database - cluster's current primary node. - "password": "str", # Optional. The randomly generated password for - the default user.:code:`
`:code:`
`Requires - ``database:view_credentials`` scope. - "port": 0, # Optional. The port on which the database cluster is - listening. - "ssl": bool, # Optional. A boolean value indicating if the - connection should be made over SSL. - "uri": "str", # Optional. A connection string in the format accepted - by the ``psql`` command. This is provided as a convenience and should be able - to be constructed by the other attributes. - "user": "str" # Optional. The default user for the - database.:code:`
`:code:`
`Requires ``database:view_credentials`` - scope. - }, - "private_network_uuid": "str", # Optional. A string specifying the UUID of - the VPC to which the read-only replica will be assigned. If excluded, the replica - will be assigned to your account's default VPC for the region. - :code:`
`:code:`
`Requires ``vpc:read`` scope. - "region": "str", # Optional. A slug identifier for the region where the - read-only replica will be located. If excluded, the replica will be placed in the - same region as the cluster. - "size": "str", # Optional. A slug identifier representing the size of the - node for the read-only replica. The size of the replica must be at least as large - as the node size for the database cluster from which it is replicating. - "status": "str", # Optional. A string representing the current status of the - database cluster. Known values are: "creating", "online", "resizing", - "migrating", and "forking". - "storage_size_mib": 0, # Optional. Additional storage added to the cluster, - in MiB. If null, no additional storage is added to the cluster, beyond what is - provided as a base amount from the 'size' and any previously added additional - storage. - "tags": [ - "str" # Optional. A flat array of tag names as strings to apply to - the read-only replica after it is created. Tag names can either be existing - or new tags. :code:`
`:code:`
`Requires ``tag:create`` scope. - ] + "mysql_settings": { + "auth_plugin": "str" # A string specifying the authentication method + to be used for connections to the MySQL user account. The valid values are + ``mysql_native_password`` or ``caching_sha2_password``. If excluded when + creating a new user, the default for the version of MySQL in use will be + used. As of MySQL 8.0, the default is ``caching_sha2_password``. Required. + Known values are: "mysql_native_password" and "caching_sha2_password". + } } - # response body for status code(s): 201 + # response body for status code(s): 200 response == { - "replica": { - "name": "str", # The name to give the read-only replicating. - Required. - "connection": { - "database": "str", # Optional. The name of the default - database. - "host": "str", # Optional. The FQDN pointing to the database - cluster's current primary node. - "password": "str", # Optional. The randomly generated - password for the default user.:code:`
`:code:`
`Requires - ``database:view_credentials`` scope. - "port": 0, # Optional. The port on which the database - cluster is listening. - "ssl": bool, # Optional. A boolean value indicating if the - connection should be made over SSL. - "uri": "str", # Optional. A connection string in the format - accepted by the ``psql`` command. This is provided as a convenience and - should be able to be constructed by the other attributes. - "user": "str" # Optional. The default user for the - database.:code:`
`:code:`
`Requires ``database:view_credentials`` - scope. - }, - "created_at": "2020-02-20 00:00:00", # Optional. A time value given - in ISO8601 combined date and time format that represents when the database - cluster was created. - "do_settings": { - "service_cnames": [ - "str" # Optional. An array of custom CNAMEs for the - database cluster. Each CNAME must be a valid RFC 1123 hostname (e.g., - "db.example.com"). Maximum of 16 CNAMEs allowed, each up to 253 - characters. - ] - }, - "id": "str", # Optional. A unique ID that can be used to identify - and reference a database replica. - "private_connection": { - "database": "str", # Optional. The name of the default - database. - "host": "str", # Optional. The FQDN pointing to the database - cluster's current primary node. - "password": "str", # Optional. The randomly generated - password for the default user.:code:`
`:code:`
`Requires - ``database:view_credentials`` scope. - "port": 0, # Optional. The port on which the database - cluster is listening. - "ssl": bool, # Optional. A boolean value indicating if the - connection should be made over SSL. - "uri": "str", # Optional. A connection string in the format - accepted by the ``psql`` command. This is provided as a convenience and - should be able to be constructed by the other attributes. - "user": "str" # Optional. The default user for the - database.:code:`
`:code:`
`Requires ``database:view_credentials`` - scope. + "user": { + "name": "str", # The name of a database user. Required. + "access_cert": "str", # Optional. Access certificate for TLS client + authentication. (Kafka only). + "access_key": "str", # Optional. Access key for TLS client + authentication. (Kafka only). + "mysql_settings": { + "auth_plugin": "str" # A string specifying the + authentication method to be used for connections to the MySQL user + account. The valid values are ``mysql_native_password`` or + ``caching_sha2_password``. If excluded when creating a new user, the + default for the version of MySQL in use will be used. As of MySQL 8.0, + the default is ``caching_sha2_password``. Required. Known values are: + "mysql_native_password" and "caching_sha2_password". }, - "private_network_uuid": "str", # Optional. A string specifying the - UUID of the VPC to which the read-only replica will be assigned. If excluded, - the replica will be assigned to your account's default VPC for the region. - :code:`
`:code:`
`Requires ``vpc:read`` scope. - "region": "str", # Optional. A slug identifier for the region where - the read-only replica will be located. If excluded, the replica will be - placed in the same region as the cluster. - "size": "str", # Optional. A slug identifier representing the size - of the node for the read-only replica. The size of the replica must be at - least as large as the node size for the database cluster from which it is - replicating. - "status": "str", # Optional. A string representing the current - status of the database cluster. Known values are: "creating", "online", - "resizing", "migrating", and "forking". - "storage_size_mib": 0, # Optional. Additional storage added to the - cluster, in MiB. If null, no additional storage is added to the cluster, - beyond what is provided as a base amount from the 'size' and any previously - added additional storage. - "tags": [ - "str" # Optional. A flat array of tag names as strings - applied to the read-only replica.:code:`
`:code:`
`Requires - ``tag:read`` scope. - ] + "password": "str", # Optional. A randomly generated password for the + database user.:code:`
`Requires ``database:view_credentials`` scope. + "role": "str", # Optional. A string representing the database user's + role. The value will be either "primary" or "normal". Known values are: + "primary" and "normal". + "settings": { + "acl": [ + { + "permission": "str", # Permission set + applied to the ACL. 'consume' allows for messages to be consumed + from the topic. 'produce' allows for messages to be published to + the topic. 'produceconsume' allows for both 'consume' and + 'produce' permission. 'admin' allows for 'produceconsume' as well + as any operations to administer the topic (delete, update). + Required. Known values are: "admin", "consume", "produce", and + "produceconsume". + "topic": "str", # A regex for matching the + topic(s) that this ACL should apply to. Required. + "id": "str" # Optional. An identifier for + the ACL. Will be computed after the ACL is created/updated. + } + ], + "mongo_user_settings": { + "databases": [ + "str" # Optional. A list of databases to + which the user should have access. When the database is set to + ``admin``"" , the user will have access to all databases based on + the user's role i.e. a user with the role ``readOnly`` assigned + to the ``admin`` database will have read access to all databases. + ], + "role": "str" # Optional. The role to assign to the + user with each role mapping to a MongoDB built-in role. ``readOnly`` + maps to a `read + `_ + role. ``readWrite`` maps to a `readWrite + `_ + role. ``dbAdmin`` maps to a `dbAdmin + `_ + role. Known values are: "readOnly", "readWrite", and "dbAdmin". + }, + "opensearch_acl": [ + { + "index": "str", # Optional. A regex for + matching the indexes that this ACL should apply to. + "permission": "str" # Optional. Permission + set applied to the ACL. 'read' allows user to read from the + index. 'write' allows for user to write to the index. 'readwrite' + allows for both 'read' and 'write' permission. 'deny'(default) + restricts user from performing any operation over an index. + 'admin' allows for 'readwrite' as well as any operations to + administer the index. Known values are: "deny", "admin", "read", + "readwrite", and "write". + } + ], + "pg_allow_replication": bool # Optional. For Postgres + clusters, set to ``true`` for a user with replication rights. This option + is not currently supported for other database engines. + } } } # response body for status code(s): 404 @@ -110314,31 +118515,33 @@ async def create_replica( """ @overload - async def create_replica( + async def reset_auth( self, database_cluster_uuid: str, - body: Optional[IO[bytes]] = None, + username: str, + body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any ) -> JSON: # pylint: disable=line-too-long - """Create a Read-only Replica. - - To create a read-only replica for a PostgreSQL or MySQL database cluster, send a POST request - to ``/v2/databases/$DATABASE_ID/replicas`` specifying the name it should be given, the size of - the node to be used, and the region where it will be located. + """Reset a Database User's Password or Authentication Method. - **Note**\\ : Read-only replicas are not supported for Caching or Valkey clusters. + To reset the password for a database user, send a POST request to + ``/v2/databases/$DATABASE_ID/users/$USERNAME/reset_auth``. - The response will be a JSON object with a key called ``replica``. The value of this will be an - object that contains the standard attributes associated with a database replica. The initial - value of the read-only replica's ``status`` attribute will be ``forking``. When the replica is - ready to receive traffic, this will transition to ``active``. + For ``mysql`` databases, the authentication method can be specifying by + including a key in the JSON body called ``mysql_settings`` with the ``auth_plugin`` + value specified. + + The response will be a JSON object with a ``user`` key. This will be set to an + object containing the standard database user attributes. :param database_cluster_uuid: A unique identifier for a database cluster. Required. :type database_cluster_uuid: str - :param body: Default value is None. + :param username: The name of the database user. Required. + :type username: str + :param body: Required. :type body: IO[bytes] :keyword content_type: Body Parameter content-type. Content type parameter for binary body. Default value is "application/json". @@ -110350,85 +118553,81 @@ async def create_replica( Example: .. code-block:: python - # response body for status code(s): 201 + # response body for status code(s): 200 response == { - "replica": { - "name": "str", # The name to give the read-only replicating. - Required. - "connection": { - "database": "str", # Optional. The name of the default - database. - "host": "str", # Optional. The FQDN pointing to the database - cluster's current primary node. - "password": "str", # Optional. The randomly generated - password for the default user.:code:`
`:code:`
`Requires - ``database:view_credentials`` scope. - "port": 0, # Optional. The port on which the database - cluster is listening. - "ssl": bool, # Optional. A boolean value indicating if the - connection should be made over SSL. - "uri": "str", # Optional. A connection string in the format - accepted by the ``psql`` command. This is provided as a convenience and - should be able to be constructed by the other attributes. - "user": "str" # Optional. The default user for the - database.:code:`
`:code:`
`Requires ``database:view_credentials`` - scope. - }, - "created_at": "2020-02-20 00:00:00", # Optional. A time value given - in ISO8601 combined date and time format that represents when the database - cluster was created. - "do_settings": { - "service_cnames": [ - "str" # Optional. An array of custom CNAMEs for the - database cluster. Each CNAME must be a valid RFC 1123 hostname (e.g., - "db.example.com"). Maximum of 16 CNAMEs allowed, each up to 253 - characters. - ] - }, - "id": "str", # Optional. A unique ID that can be used to identify - and reference a database replica. - "private_connection": { - "database": "str", # Optional. The name of the default - database. - "host": "str", # Optional. The FQDN pointing to the database - cluster's current primary node. - "password": "str", # Optional. The randomly generated - password for the default user.:code:`
`:code:`
`Requires - ``database:view_credentials`` scope. - "port": 0, # Optional. The port on which the database - cluster is listening. - "ssl": bool, # Optional. A boolean value indicating if the - connection should be made over SSL. - "uri": "str", # Optional. A connection string in the format - accepted by the ``psql`` command. This is provided as a convenience and - should be able to be constructed by the other attributes. - "user": "str" # Optional. The default user for the - database.:code:`
`:code:`
`Requires ``database:view_credentials`` - scope. + "user": { + "name": "str", # The name of a database user. Required. + "access_cert": "str", # Optional. Access certificate for TLS client + authentication. (Kafka only). + "access_key": "str", # Optional. Access key for TLS client + authentication. (Kafka only). + "mysql_settings": { + "auth_plugin": "str" # A string specifying the + authentication method to be used for connections to the MySQL user + account. The valid values are ``mysql_native_password`` or + ``caching_sha2_password``. If excluded when creating a new user, the + default for the version of MySQL in use will be used. As of MySQL 8.0, + the default is ``caching_sha2_password``. Required. Known values are: + "mysql_native_password" and "caching_sha2_password". }, - "private_network_uuid": "str", # Optional. A string specifying the - UUID of the VPC to which the read-only replica will be assigned. If excluded, - the replica will be assigned to your account's default VPC for the region. - :code:`
`:code:`
`Requires ``vpc:read`` scope. - "region": "str", # Optional. A slug identifier for the region where - the read-only replica will be located. If excluded, the replica will be - placed in the same region as the cluster. - "size": "str", # Optional. A slug identifier representing the size - of the node for the read-only replica. The size of the replica must be at - least as large as the node size for the database cluster from which it is - replicating. - "status": "str", # Optional. A string representing the current - status of the database cluster. Known values are: "creating", "online", - "resizing", "migrating", and "forking". - "storage_size_mib": 0, # Optional. Additional storage added to the - cluster, in MiB. If null, no additional storage is added to the cluster, - beyond what is provided as a base amount from the 'size' and any previously - added additional storage. - "tags": [ - "str" # Optional. A flat array of tag names as strings - applied to the read-only replica.:code:`
`:code:`
`Requires - ``tag:read`` scope. - ] + "password": "str", # Optional. A randomly generated password for the + database user.:code:`
`Requires ``database:view_credentials`` scope. + "role": "str", # Optional. A string representing the database user's + role. The value will be either "primary" or "normal". Known values are: + "primary" and "normal". + "settings": { + "acl": [ + { + "permission": "str", # Permission set + applied to the ACL. 'consume' allows for messages to be consumed + from the topic. 'produce' allows for messages to be published to + the topic. 'produceconsume' allows for both 'consume' and + 'produce' permission. 'admin' allows for 'produceconsume' as well + as any operations to administer the topic (delete, update). + Required. Known values are: "admin", "consume", "produce", and + "produceconsume". + "topic": "str", # A regex for matching the + topic(s) that this ACL should apply to. Required. + "id": "str" # Optional. An identifier for + the ACL. Will be computed after the ACL is created/updated. + } + ], + "mongo_user_settings": { + "databases": [ + "str" # Optional. A list of databases to + which the user should have access. When the database is set to + ``admin``"" , the user will have access to all databases based on + the user's role i.e. a user with the role ``readOnly`` assigned + to the ``admin`` database will have read access to all databases. + ], + "role": "str" # Optional. The role to assign to the + user with each role mapping to a MongoDB built-in role. ``readOnly`` + maps to a `read + `_ + role. ``readWrite`` maps to a `readWrite + `_ + role. ``dbAdmin`` maps to a `dbAdmin + `_ + role. Known values are: "readOnly", "readWrite", and "dbAdmin". + }, + "opensearch_acl": [ + { + "index": "str", # Optional. A regex for + matching the indexes that this ACL should apply to. + "permission": "str" # Optional. Permission + set applied to the ACL. 'read' allows user to read from the + index. 'write' allows for user to write to the index. 'readwrite' + allows for both 'read' and 'write' permission. 'deny'(default) + restricts user from performing any operation over an index. + 'admin' allows for 'readwrite' as well as any operations to + administer the index. Known values are: "deny", "admin", "read", + "readwrite", and "write". + } + ], + "pg_allow_replication": bool # Optional. For Postgres + clusters, set to ``true`` for a user with replication rights. This option + is not currently supported for other database engines. + } } } # response body for status code(s): 404 @@ -110445,29 +118644,31 @@ async def create_replica( """ @distributed_trace_async - async def create_replica( + async def reset_auth( self, database_cluster_uuid: str, - body: Optional[Union[JSON, IO[bytes]]] = None, + username: str, + body: Union[JSON, IO[bytes]], **kwargs: Any ) -> JSON: # pylint: disable=line-too-long - """Create a Read-only Replica. + """Reset a Database User's Password or Authentication Method. - To create a read-only replica for a PostgreSQL or MySQL database cluster, send a POST request - to ``/v2/databases/$DATABASE_ID/replicas`` specifying the name it should be given, the size of - the node to be used, and the region where it will be located. + To reset the password for a database user, send a POST request to + ``/v2/databases/$DATABASE_ID/users/$USERNAME/reset_auth``. - **Note**\\ : Read-only replicas are not supported for Caching or Valkey clusters. + For ``mysql`` databases, the authentication method can be specifying by + including a key in the JSON body called ``mysql_settings`` with the ``auth_plugin`` + value specified. - The response will be a JSON object with a key called ``replica``. The value of this will be an - object that contains the standard attributes associated with a database replica. The initial - value of the read-only replica's ``status`` attribute will be ``forking``. When the replica is - ready to receive traffic, this will transition to ``active``. + The response will be a JSON object with a ``user`` key. This will be set to an + object containing the standard database user attributes. :param database_cluster_uuid: A unique identifier for a database cluster. Required. :type database_cluster_uuid: str - :param body: Is either a JSON type or a IO[bytes] type. Default value is None. + :param username: The name of the database user. Required. + :type username: str + :param body: Is either a JSON type or a IO[bytes] type. Required. :type body: JSON or IO[bytes] :return: JSON object :rtype: JSON @@ -110478,159 +118679,91 @@ async def create_replica( # JSON input template you can fill out and use as your body input. body = { - "name": "str", # The name to give the read-only replicating. Required. - "connection": { - "database": "str", # Optional. The name of the default database. - "host": "str", # Optional. The FQDN pointing to the database - cluster's current primary node. - "password": "str", # Optional. The randomly generated password for - the default user.:code:`
`:code:`
`Requires - ``database:view_credentials`` scope. - "port": 0, # Optional. The port on which the database cluster is - listening. - "ssl": bool, # Optional. A boolean value indicating if the - connection should be made over SSL. - "uri": "str", # Optional. A connection string in the format accepted - by the ``psql`` command. This is provided as a convenience and should be able - to be constructed by the other attributes. - "user": "str" # Optional. The default user for the - database.:code:`
`:code:`
`Requires ``database:view_credentials`` - scope. - }, - "created_at": "2020-02-20 00:00:00", # Optional. A time value given in - ISO8601 combined date and time format that represents when the database cluster - was created. - "do_settings": { - "service_cnames": [ - "str" # Optional. An array of custom CNAMEs for the database - cluster. Each CNAME must be a valid RFC 1123 hostname (e.g., - "db.example.com"). Maximum of 16 CNAMEs allowed, each up to 253 - characters. - ] - }, - "id": "str", # Optional. A unique ID that can be used to identify and - reference a database replica. - "private_connection": { - "database": "str", # Optional. The name of the default database. - "host": "str", # Optional. The FQDN pointing to the database - cluster's current primary node. - "password": "str", # Optional. The randomly generated password for - the default user.:code:`
`:code:`
`Requires - ``database:view_credentials`` scope. - "port": 0, # Optional. The port on which the database cluster is - listening. - "ssl": bool, # Optional. A boolean value indicating if the - connection should be made over SSL. - "uri": "str", # Optional. A connection string in the format accepted - by the ``psql`` command. This is provided as a convenience and should be able - to be constructed by the other attributes. - "user": "str" # Optional. The default user for the - database.:code:`
`:code:`
`Requires ``database:view_credentials`` - scope. - }, - "private_network_uuid": "str", # Optional. A string specifying the UUID of - the VPC to which the read-only replica will be assigned. If excluded, the replica - will be assigned to your account's default VPC for the region. - :code:`
`:code:`
`Requires ``vpc:read`` scope. - "region": "str", # Optional. A slug identifier for the region where the - read-only replica will be located. If excluded, the replica will be placed in the - same region as the cluster. - "size": "str", # Optional. A slug identifier representing the size of the - node for the read-only replica. The size of the replica must be at least as large - as the node size for the database cluster from which it is replicating. - "status": "str", # Optional. A string representing the current status of the - database cluster. Known values are: "creating", "online", "resizing", - "migrating", and "forking". - "storage_size_mib": 0, # Optional. Additional storage added to the cluster, - in MiB. If null, no additional storage is added to the cluster, beyond what is - provided as a base amount from the 'size' and any previously added additional - storage. - "tags": [ - "str" # Optional. A flat array of tag names as strings to apply to - the read-only replica after it is created. Tag names can either be existing - or new tags. :code:`
`:code:`
`Requires ``tag:create`` scope. - ] + "mysql_settings": { + "auth_plugin": "str" # A string specifying the authentication method + to be used for connections to the MySQL user account. The valid values are + ``mysql_native_password`` or ``caching_sha2_password``. If excluded when + creating a new user, the default for the version of MySQL in use will be + used. As of MySQL 8.0, the default is ``caching_sha2_password``. Required. + Known values are: "mysql_native_password" and "caching_sha2_password". + } } - # response body for status code(s): 201 + # response body for status code(s): 200 response == { - "replica": { - "name": "str", # The name to give the read-only replicating. - Required. - "connection": { - "database": "str", # Optional. The name of the default - database. - "host": "str", # Optional. The FQDN pointing to the database - cluster's current primary node. - "password": "str", # Optional. The randomly generated - password for the default user.:code:`
`:code:`
`Requires - ``database:view_credentials`` scope. - "port": 0, # Optional. The port on which the database - cluster is listening. - "ssl": bool, # Optional. A boolean value indicating if the - connection should be made over SSL. - "uri": "str", # Optional. A connection string in the format - accepted by the ``psql`` command. This is provided as a convenience and - should be able to be constructed by the other attributes. - "user": "str" # Optional. The default user for the - database.:code:`
`:code:`
`Requires ``database:view_credentials`` - scope. - }, - "created_at": "2020-02-20 00:00:00", # Optional. A time value given - in ISO8601 combined date and time format that represents when the database - cluster was created. - "do_settings": { - "service_cnames": [ - "str" # Optional. An array of custom CNAMEs for the - database cluster. Each CNAME must be a valid RFC 1123 hostname (e.g., - "db.example.com"). Maximum of 16 CNAMEs allowed, each up to 253 - characters. - ] - }, - "id": "str", # Optional. A unique ID that can be used to identify - and reference a database replica. - "private_connection": { - "database": "str", # Optional. The name of the default - database. - "host": "str", # Optional. The FQDN pointing to the database - cluster's current primary node. - "password": "str", # Optional. The randomly generated - password for the default user.:code:`
`:code:`
`Requires - ``database:view_credentials`` scope. - "port": 0, # Optional. The port on which the database - cluster is listening. - "ssl": bool, # Optional. A boolean value indicating if the - connection should be made over SSL. - "uri": "str", # Optional. A connection string in the format - accepted by the ``psql`` command. This is provided as a convenience and - should be able to be constructed by the other attributes. - "user": "str" # Optional. The default user for the - database.:code:`
`:code:`
`Requires ``database:view_credentials`` - scope. + "user": { + "name": "str", # The name of a database user. Required. + "access_cert": "str", # Optional. Access certificate for TLS client + authentication. (Kafka only). + "access_key": "str", # Optional. Access key for TLS client + authentication. (Kafka only). + "mysql_settings": { + "auth_plugin": "str" # A string specifying the + authentication method to be used for connections to the MySQL user + account. The valid values are ``mysql_native_password`` or + ``caching_sha2_password``. If excluded when creating a new user, the + default for the version of MySQL in use will be used. As of MySQL 8.0, + the default is ``caching_sha2_password``. Required. Known values are: + "mysql_native_password" and "caching_sha2_password". }, - "private_network_uuid": "str", # Optional. A string specifying the - UUID of the VPC to which the read-only replica will be assigned. If excluded, - the replica will be assigned to your account's default VPC for the region. - :code:`
`:code:`
`Requires ``vpc:read`` scope. - "region": "str", # Optional. A slug identifier for the region where - the read-only replica will be located. If excluded, the replica will be - placed in the same region as the cluster. - "size": "str", # Optional. A slug identifier representing the size - of the node for the read-only replica. The size of the replica must be at - least as large as the node size for the database cluster from which it is - replicating. - "status": "str", # Optional. A string representing the current - status of the database cluster. Known values are: "creating", "online", - "resizing", "migrating", and "forking". - "storage_size_mib": 0, # Optional. Additional storage added to the - cluster, in MiB. If null, no additional storage is added to the cluster, - beyond what is provided as a base amount from the 'size' and any previously - added additional storage. - "tags": [ - "str" # Optional. A flat array of tag names as strings - applied to the read-only replica.:code:`
`:code:`
`Requires - ``tag:read`` scope. - ] + "password": "str", # Optional. A randomly generated password for the + database user.:code:`
`Requires ``database:view_credentials`` scope. + "role": "str", # Optional. A string representing the database user's + role. The value will be either "primary" or "normal". Known values are: + "primary" and "normal". + "settings": { + "acl": [ + { + "permission": "str", # Permission set + applied to the ACL. 'consume' allows for messages to be consumed + from the topic. 'produce' allows for messages to be published to + the topic. 'produceconsume' allows for both 'consume' and + 'produce' permission. 'admin' allows for 'produceconsume' as well + as any operations to administer the topic (delete, update). + Required. Known values are: "admin", "consume", "produce", and + "produceconsume". + "topic": "str", # A regex for matching the + topic(s) that this ACL should apply to. Required. + "id": "str" # Optional. An identifier for + the ACL. Will be computed after the ACL is created/updated. + } + ], + "mongo_user_settings": { + "databases": [ + "str" # Optional. A list of databases to + which the user should have access. When the database is set to + ``admin``"" , the user will have access to all databases based on + the user's role i.e. a user with the role ``readOnly`` assigned + to the ``admin`` database will have read access to all databases. + ], + "role": "str" # Optional. The role to assign to the + user with each role mapping to a MongoDB built-in role. ``readOnly`` + maps to a `read + `_ + role. ``readWrite`` maps to a `readWrite + `_ + role. ``dbAdmin`` maps to a `dbAdmin + `_ + role. Known values are: "readOnly", "readWrite", and "dbAdmin". + }, + "opensearch_acl": [ + { + "index": "str", # Optional. A regex for + matching the indexes that this ACL should apply to. + "permission": "str" # Optional. Permission + set applied to the ACL. 'read' allows user to read from the + index. 'write' allows for user to write to the index. 'readwrite' + allows for both 'read' and 'write' permission. 'deny'(default) + restricts user from performing any operation over an index. + 'admin' allows for 'readwrite' as well as any operations to + administer the index. Known values are: "deny", "admin", "read", + "readwrite", and "write". + } + ], + "pg_allow_replication": bool # Optional. For Postgres + clusters, set to ``true`` for a user with replication rights. This option + is not currently supported for other database engines. + } } } # response body for status code(s): 404 @@ -110672,13 +118805,11 @@ async def create_replica( if isinstance(body, (IOBase, bytes)): _content = body else: - if body is not None: - _json = body - else: - _json = None + _json = body - _request = build_databases_create_replica_request( + _request = build_databases_reset_auth_request( database_cluster_uuid=database_cluster_uuid, + username=username, content_type=content_type, json=_json, content=_content, @@ -110696,14 +118827,14 @@ async def create_replica( response = pipeline_response.http_response - if response.status_code not in [201, 404]: + if response.status_code not in [200, 404]: if _stream: await response.read() # Load the body in memory and close the socket map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore raise HttpResponseError(response=response) response_headers = {} - if response.status_code == 201: + if response.status_code == 200: response_headers["ratelimit-limit"] = self._deserialize( "int", response.headers.get("ratelimit-limit") ) @@ -110741,14 +118872,17 @@ async def create_replica( return cast(JSON, deserialized) # type: ignore @distributed_trace_async - async def list_events_logs(self, database_cluster_uuid: str, **kwargs: Any) -> JSON: + async def list(self, database_cluster_uuid: str, **kwargs: Any) -> JSON: # pylint: disable=line-too-long - """List all Events Logs. + """List All Databases. - To list all of the cluster events, send a GET request to - ``/v2/databases/$DATABASE_ID/events``. + To list all of the databases in a clusters, send a GET request to + ``/v2/databases/$DATABASE_ID/dbs``. - The result will be a JSON object with a ``events`` key. + The result will be a JSON object with a ``dbs`` key. This will be set to an array + of database objects, each of which will contain the standard database attributes. + + Note: Database management is not supported for Caching or Valkey clusters. :param database_cluster_uuid: A unique identifier for a database cluster. Required. :type database_cluster_uuid: str @@ -110761,16 +118895,9 @@ async def list_events_logs(self, database_cluster_uuid: str, **kwargs: Any) -> J # response body for status code(s): 200 response == { - "events": [ + "dbs": [ { - "cluster_name": "str", # Optional. The name of cluster. - "create_time": "str", # Optional. The time of the generation - of a event. - "event_type": "str", # Optional. Type of the event. Known - values are: "cluster_maintenance_perform", "cluster_master_promotion", - "cluster_create", "cluster_update", "cluster_delete", "cluster_poweron", - and "cluster_poweroff". - "id": "str" # Optional. ID of the particular event. + "name": "str" # The name of the database. Required. } ] } @@ -110804,7 +118931,7 @@ async def list_events_logs(self, database_cluster_uuid: str, **kwargs: Any) -> J cls: ClsType[JSON] = kwargs.pop("cls", None) - _request = build_databases_list_events_logs_request( + _request = build_databases_list_request( database_cluster_uuid=database_cluster_uuid, headers=_headers, params=_params, @@ -110864,25 +118991,136 @@ async def list_events_logs(self, database_cluster_uuid: str, **kwargs: Any) -> J return cast(JSON, deserialized) # type: ignore + @overload + async def add( + self, + database_cluster_uuid: str, + body: JSON, + *, + content_type: str = "application/json", + **kwargs: Any + ) -> JSON: + # pylint: disable=line-too-long + """Add a New Database. + + To add a new database to an existing cluster, send a POST request to + ``/v2/databases/$DATABASE_ID/dbs``. + + Note: Database management is not supported for Caching or Valkey clusters. + + The response will be a JSON object with a key called ``db``. The value of this will be + an object that contains the standard attributes associated with a database. + + :param database_cluster_uuid: A unique identifier for a database cluster. Required. + :type database_cluster_uuid: str + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: JSON object + :rtype: JSON + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "name": "str" # The name of the database. Required. + } + + # response body for status code(s): 201 + response == { + "db": { + "name": "str" # The name of the database. Required. + } + } + # response body for status code(s): 404 + response == { + "id": "str", # A short identifier corresponding to the HTTP status code + returned. For example, the ID for a response returning a 404 status code would + be "not_found.". Required. + "message": "str", # A message providing additional information about the + error, including details to help resolve it when possible. Required. + "request_id": "str" # Optional. Optionally, some endpoints may include a + request ID that should be provided when reporting bugs or opening support + tickets to help identify the issue. + } + """ + + @overload + async def add( + self, + database_cluster_uuid: str, + body: IO[bytes], + *, + content_type: str = "application/json", + **kwargs: Any + ) -> JSON: + # pylint: disable=line-too-long + """Add a New Database. + + To add a new database to an existing cluster, send a POST request to + ``/v2/databases/$DATABASE_ID/dbs``. + + Note: Database management is not supported for Caching or Valkey clusters. + + The response will be a JSON object with a key called ``db``. The value of this will be + an object that contains the standard attributes associated with a database. + + :param database_cluster_uuid: A unique identifier for a database cluster. Required. + :type database_cluster_uuid: str + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: JSON object + :rtype: JSON + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 201 + response == { + "db": { + "name": "str" # The name of the database. Required. + } + } + # response body for status code(s): 404 + response == { + "id": "str", # A short identifier corresponding to the HTTP status code + returned. For example, the ID for a response returning a 404 status code would + be "not_found.". Required. + "message": "str", # A message providing additional information about the + error, including details to help resolve it when possible. Required. + "request_id": "str" # Optional. Optionally, some endpoints may include a + request ID that should be provided when reporting bugs or opening support + tickets to help identify the issue. + } + """ + @distributed_trace_async - async def get_replica( - self, database_cluster_uuid: str, replica_name: str, **kwargs: Any + async def add( + self, database_cluster_uuid: str, body: Union[JSON, IO[bytes]], **kwargs: Any ) -> JSON: # pylint: disable=line-too-long - """Retrieve an Existing Read-only Replica. + """Add a New Database. - To show information about an existing database replica, send a GET request to - ``/v2/databases/$DATABASE_ID/replicas/$REPLICA_NAME``. + To add a new database to an existing cluster, send a POST request to + ``/v2/databases/$DATABASE_ID/dbs``. - **Note**\\ : Read-only replicas are not supported for Caching or Valkey clusters. + Note: Database management is not supported for Caching or Valkey clusters. - The response will be a JSON object with a ``replica key``. This will be set to an object - containing the standard database replica attributes. + The response will be a JSON object with a key called ``db``. The value of this will be + an object that contains the standard attributes associated with a database. :param database_cluster_uuid: A unique identifier for a database cluster. Required. :type database_cluster_uuid: str - :param replica_name: The name of the database replica. Required. - :type replica_name: str + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] :return: JSON object :rtype: JSON :raises ~azure.core.exceptions.HttpResponseError: @@ -110890,85 +119128,15 @@ async def get_replica( Example: .. code-block:: python - # response body for status code(s): 200 + # JSON input template you can fill out and use as your body input. + body = { + "name": "str" # The name of the database. Required. + } + + # response body for status code(s): 201 response == { - "replica": { - "name": "str", # The name to give the read-only replicating. - Required. - "connection": { - "database": "str", # Optional. The name of the default - database. - "host": "str", # Optional. The FQDN pointing to the database - cluster's current primary node. - "password": "str", # Optional. The randomly generated - password for the default user.:code:`
`:code:`
`Requires - ``database:view_credentials`` scope. - "port": 0, # Optional. The port on which the database - cluster is listening. - "ssl": bool, # Optional. A boolean value indicating if the - connection should be made over SSL. - "uri": "str", # Optional. A connection string in the format - accepted by the ``psql`` command. This is provided as a convenience and - should be able to be constructed by the other attributes. - "user": "str" # Optional. The default user for the - database.:code:`
`:code:`
`Requires ``database:view_credentials`` - scope. - }, - "created_at": "2020-02-20 00:00:00", # Optional. A time value given - in ISO8601 combined date and time format that represents when the database - cluster was created. - "do_settings": { - "service_cnames": [ - "str" # Optional. An array of custom CNAMEs for the - database cluster. Each CNAME must be a valid RFC 1123 hostname (e.g., - "db.example.com"). Maximum of 16 CNAMEs allowed, each up to 253 - characters. - ] - }, - "id": "str", # Optional. A unique ID that can be used to identify - and reference a database replica. - "private_connection": { - "database": "str", # Optional. The name of the default - database. - "host": "str", # Optional. The FQDN pointing to the database - cluster's current primary node. - "password": "str", # Optional. The randomly generated - password for the default user.:code:`
`:code:`
`Requires - ``database:view_credentials`` scope. - "port": 0, # Optional. The port on which the database - cluster is listening. - "ssl": bool, # Optional. A boolean value indicating if the - connection should be made over SSL. - "uri": "str", # Optional. A connection string in the format - accepted by the ``psql`` command. This is provided as a convenience and - should be able to be constructed by the other attributes. - "user": "str" # Optional. The default user for the - database.:code:`
`:code:`
`Requires ``database:view_credentials`` - scope. - }, - "private_network_uuid": "str", # Optional. A string specifying the - UUID of the VPC to which the read-only replica will be assigned. If excluded, - the replica will be assigned to your account's default VPC for the region. - :code:`
`:code:`
`Requires ``vpc:read`` scope. - "region": "str", # Optional. A slug identifier for the region where - the read-only replica will be located. If excluded, the replica will be - placed in the same region as the cluster. - "size": "str", # Optional. A slug identifier representing the size - of the node for the read-only replica. The size of the replica must be at - least as large as the node size for the database cluster from which it is - replicating. - "status": "str", # Optional. A string representing the current - status of the database cluster. Known values are: "creating", "online", - "resizing", "migrating", and "forking". - "storage_size_mib": 0, # Optional. Additional storage added to the - cluster, in MiB. If null, no additional storage is added to the cluster, - beyond what is provided as a base amount from the 'size' and any previously - added additional storage. - "tags": [ - "str" # Optional. A flat array of tag names as strings - applied to the read-only replica.:code:`
`:code:`
`Requires - ``tag:read`` scope. - ] + "db": { + "name": "str" # The name of the database. Required. } } # response body for status code(s): 404 @@ -110996,14 +119164,27 @@ async def get_replica( } error_map.update(kwargs.pop("error_map", {}) or {}) - _headers = kwargs.pop("headers", {}) or {} + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = kwargs.pop("params", {}) or {} + content_type: Optional[str] = kwargs.pop( + "content_type", _headers.pop("Content-Type", None) + ) cls: ClsType[JSON] = kwargs.pop("cls", None) - _request = build_databases_get_replica_request( + content_type = content_type or "application/json" + _json = None + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _json = body + + _request = build_databases_add_request( database_cluster_uuid=database_cluster_uuid, - replica_name=replica_name, + content_type=content_type, + json=_json, + content=_content, headers=_headers, params=_params, ) @@ -111018,14 +119199,14 @@ async def get_replica( response = pipeline_response.http_response - if response.status_code not in [200, 404]: + if response.status_code not in [201, 404]: if _stream: await response.read() # Load the body in memory and close the socket map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore raise HttpResponseError(response=response) response_headers = {} - if response.status_code == 200: + if response.status_code == 201: response_headers["ratelimit-limit"] = self._deserialize( "int", response.headers.get("ratelimit-limit") ) @@ -111063,31 +119244,37 @@ async def get_replica( return cast(JSON, deserialized) # type: ignore @distributed_trace_async - async def destroy_replica( - self, database_cluster_uuid: str, replica_name: str, **kwargs: Any - ) -> Optional[JSON]: + async def get( + self, database_cluster_uuid: str, database_name: str, **kwargs: Any + ) -> JSON: # pylint: disable=line-too-long - """Destroy a Read-only Replica. + """Retrieve an Existing Database. - To destroy a specific read-only replica, send a DELETE request to - ``/v2/databases/$DATABASE_ID/replicas/$REPLICA_NAME``. + To show information about an existing database cluster, send a GET request to + ``/v2/databases/$DATABASE_ID/dbs/$DB_NAME``. - **Note**\\ : Read-only replicas are not supported for Caching or Valkey clusters. + Note: Database management is not supported for Caching or Valkey clusters. - A status of 204 will be given. This indicates that the request was processed successfully, but - that no response body is needed. + The response will be a JSON object with a ``db`` key. This will be set to an object + containing the standard database attributes. :param database_cluster_uuid: A unique identifier for a database cluster. Required. :type database_cluster_uuid: str - :param replica_name: The name of the database replica. Required. - :type replica_name: str - :return: JSON object or None - :rtype: JSON or None + :param database_name: The name of the database. Required. + :type database_name: str + :return: JSON object + :rtype: JSON :raises ~azure.core.exceptions.HttpResponseError: Example: .. code-block:: python + # response body for status code(s): 200 + response == { + "db": { + "name": "str" # The name of the database. Required. + } + } # response body for status code(s): 404 response == { "id": "str", # A short identifier corresponding to the HTTP status code @@ -111116,11 +119303,11 @@ async def destroy_replica( _headers = kwargs.pop("headers", {}) or {} _params = kwargs.pop("params", {}) or {} - cls: ClsType[Optional[JSON]] = kwargs.pop("cls", None) + cls: ClsType[JSON] = kwargs.pop("cls", None) - _request = build_databases_destroy_replica_request( + _request = build_databases_get_request( database_cluster_uuid=database_cluster_uuid, - replica_name=replica_name, + database_name=database_name, headers=_headers, params=_params, ) @@ -111135,15 +119322,14 @@ async def destroy_replica( response = pipeline_response.http_response - if response.status_code not in [204, 404]: + if response.status_code not in [200, 404]: if _stream: await response.read() # Load the body in memory and close the socket map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore raise HttpResponseError(response=response) - deserialized = None response_headers = {} - if response.status_code == 204: + if response.status_code == 200: response_headers["ratelimit-limit"] = self._deserialize( "int", response.headers.get("ratelimit-limit") ) @@ -111154,6 +119340,11 @@ async def destroy_replica( "int", response.headers.get("ratelimit-reset") ) + if response.content: + deserialized = response.json() + else: + deserialized = None + if response.status_code == 404: response_headers["ratelimit-limit"] = self._deserialize( "int", response.headers.get("ratelimit-limit") @@ -111171,29 +119362,29 @@ async def destroy_replica( deserialized = None if cls: - return cls(pipeline_response, deserialized, response_headers) # type: ignore + return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore - return deserialized # type: ignore + return cast(JSON, deserialized) # type: ignore @distributed_trace_async - async def promote_replica( - self, database_cluster_uuid: str, replica_name: str, **kwargs: Any + async def delete( + self, database_cluster_uuid: str, database_name: str, **kwargs: Any ) -> Optional[JSON]: # pylint: disable=line-too-long - """Promote a Read-only Replica to become a Primary Cluster. + """Delete a Database. - To promote a specific read-only replica, send a PUT request to - ``/v2/databases/$DATABASE_ID/replicas/$REPLICA_NAME/promote``. + To delete a specific database, send a DELETE request to + ``/v2/databases/$DATABASE_ID/dbs/$DB_NAME``. - **Note**\\ : Read-only replicas are not supported for Caching or Valkey clusters. + A status of 204 will be given. This indicates that the request was processed + successfully, but that no response body is needed. - A status of 204 will be given. This indicates that the request was processed successfully, but - that no response body is needed. + Note: Database management is not supported for Caching or Valkey clusters. :param database_cluster_uuid: A unique identifier for a database cluster. Required. :type database_cluster_uuid: str - :param replica_name: The name of the database replica. Required. - :type replica_name: str + :param database_name: The name of the database. Required. + :type database_name: str :return: JSON object or None :rtype: JSON or None :raises ~azure.core.exceptions.HttpResponseError: @@ -111231,9 +119422,9 @@ async def promote_replica( cls: ClsType[Optional[JSON]] = kwargs.pop("cls", None) - _request = build_databases_promote_replica_request( + _request = build_databases_delete_request( database_cluster_uuid=database_cluster_uuid, - replica_name=replica_name, + database_name=database_name, headers=_headers, params=_params, ) @@ -111289,23 +119480,16 @@ async def promote_replica( return deserialized # type: ignore @distributed_trace_async - async def list_users(self, database_cluster_uuid: str, **kwargs: Any) -> JSON: + async def list_connection_pools( + self, database_cluster_uuid: str, **kwargs: Any + ) -> JSON: # pylint: disable=line-too-long - """List all Database Users. - - To list all of the users for your database cluster, send a GET request to - ``/v2/databases/$DATABASE_ID/users``. - - Note: User management is not supported for Caching or Valkey clusters. - - The result will be a JSON object with a ``users`` key. This will be set to an array - of database user objects, each of which will contain the standard database user attributes. - User passwords will not show without the ``database:view_credentials`` scope. - - For MySQL clusters, additional options will be contained in the mysql_settings object. + """List Connection Pools (PostgreSQL). - For MongoDB clusters, additional information will be contained in the mongo_user_settings - object. + To list all of the connection pools available to a PostgreSQL database cluster, send a GET + request to ``/v2/databases/$DATABASE_ID/pools``. + The result will be a JSON object with a ``pools`` key. This will be set to an array of + connection pool objects. :param database_cluster_uuid: A unique identifier for a database cluster. Required. :type database_cluster_uuid: str @@ -111318,86 +119502,109 @@ async def list_users(self, database_cluster_uuid: str, **kwargs: Any) -> JSON: # response body for status code(s): 200 response == { - "users": [ + "pools": [ { - "name": "str", # The name of a database user. Required. - "access_cert": "str", # Optional. Access certificate for TLS - client authentication. (Kafka only). - "access_key": "str", # Optional. Access key for TLS client - authentication. (Kafka only). - "mysql_settings": { - "auth_plugin": "str" # A string specifying the - authentication method to be used for connections to the MySQL user - account. The valid values are ``mysql_native_password`` or - ``caching_sha2_password``. If excluded when creating a new user, the - default for the version of MySQL in use will be used. As of MySQL - 8.0, the default is ``caching_sha2_password``. Required. Known values - are: "mysql_native_password" and "caching_sha2_password". + "db": "str", # The database for use with the connection + pool. Required. + "mode": "str", # The PGBouncer transaction mode for the + connection pool. The allowed values are session, transaction, and + statement. Required. + "name": "str", # A unique name for the connection pool. Must + be between 3 and 60 characters. Required. + "size": 0, # The desired size of the PGBouncer connection + pool. The maximum allowed size is determined by the size of the cluster's + primary node. 25 backend server connections are allowed for every 1GB of + RAM. Three are reserved for maintenance. For example, a primary node with + 1 GB of RAM allows for a maximum of 22 backend server connections while + one with 4 GB would allow for 97. Note that these are shared across all + connection pools in a cluster. Required. + "connection": { + "database": "str", # Optional. The name of the + default database. + "host": "str", # Optional. The FQDN pointing to the + database cluster's current primary node. + "password": "str", # Optional. The randomly + generated password for the default + user.:code:`
`:code:`
`Requires ``database:view_credentials`` + scope. + "port": 0, # Optional. The port on which the + database cluster is listening. + "ssl": bool, # Optional. A boolean value indicating + if the connection should be made over SSL. + "uri": "str", # Optional. A connection string in the + format accepted by the ``psql`` command. This is provided as a + convenience and should be able to be constructed by the other + attributes. + "user": "str" # Optional. The default user for the + database.:code:`
`:code:`
`Requires + ``database:view_credentials`` scope. }, - "password": "str", # Optional. A randomly generated password - for the database user.:code:`
`Requires ``database:view_credentials`` - scope. - "role": "str", # Optional. A string representing the - database user's role. The value will be either "primary" or "normal". - Known values are: "primary" and "normal". - "settings": { - "acl": [ - { - "permission": "str", # Permission - set applied to the ACL. 'consume' allows for messages to be - consumed from the topic. 'produce' allows for messages to be - published to the topic. 'produceconsume' allows for both - 'consume' and 'produce' permission. 'admin' allows for - 'produceconsume' as well as any operations to administer the - topic (delete, update). Required. Known values are: "admin", - "consume", "produce", and "produceconsume". - "topic": "str", # A regex for - matching the topic(s) that this ACL should apply to. - Required. - "id": "str" # Optional. An - identifier for the ACL. Will be computed after the ACL is - created/updated. - } - ], - "mongo_user_settings": { - "databases": [ - "str" # Optional. A list of - databases to which the user should have access. When the - database is set to ``admin``"" , the user will have access to - all databases based on the user's role i.e. a user with the - role ``readOnly`` assigned to the ``admin`` database will - have read access to all databases. - ], - "role": "str" # Optional. The role to assign - to the user with each role mapping to a MongoDB built-in role. - ``readOnly`` maps to a `read - `_ - role. ``readWrite`` maps to a `readWrite - `_ - role. ``dbAdmin`` maps to a `dbAdmin - `_ - role. Known values are: "readOnly", "readWrite", and "dbAdmin". - }, - "opensearch_acl": [ - { - "index": "str", # Optional. A regex - for matching the indexes that this ACL should apply to. - "permission": "str" # Optional. - Permission set applied to the ACL. 'read' allows user to read - from the index. 'write' allows for user to write to the - index. 'readwrite' allows for both 'read' and 'write' - permission. 'deny'(default) restricts user from performing - any operation over an index. 'admin' allows for 'readwrite' - as well as any operations to administer the index. Known - values are: "deny", "admin", "read", "readwrite", and - "write". - } - ], - "pg_allow_replication": bool # Optional. For - Postgres clusters, set to ``true`` for a user with replication - rights. This option is not currently supported for other database - engines. - } + "private_connection": { + "database": "str", # Optional. The name of the + default database. + "host": "str", # Optional. The FQDN pointing to the + database cluster's current primary node. + "password": "str", # Optional. The randomly + generated password for the default + user.:code:`
`:code:`
`Requires ``database:view_credentials`` + scope. + "port": 0, # Optional. The port on which the + database cluster is listening. + "ssl": bool, # Optional. A boolean value indicating + if the connection should be made over SSL. + "uri": "str", # Optional. A connection string in the + format accepted by the ``psql`` command. This is provided as a + convenience and should be able to be constructed by the other + attributes. + "user": "str" # Optional. The default user for the + database.:code:`
`:code:`
`Requires + ``database:view_credentials`` scope. + }, + "standby_connection": { + "database": "str", # Optional. The name of the + default database. + "host": "str", # Optional. The FQDN pointing to the + database cluster's current primary node. + "password": "str", # Optional. The randomly + generated password for the default + user.:code:`
`:code:`
`Requires ``database:view_credentials`` + scope. + "port": 0, # Optional. The port on which the + database cluster is listening. + "ssl": bool, # Optional. A boolean value indicating + if the connection should be made over SSL. + "uri": "str", # Optional. A connection string in the + format accepted by the ``psql`` command. This is provided as a + convenience and should be able to be constructed by the other + attributes. + "user": "str" # Optional. The default user for the + database.:code:`
`:code:`
`Requires + ``database:view_credentials`` scope. + }, + "standby_private_connection": { + "database": "str", # Optional. The name of the + default database. + "host": "str", # Optional. The FQDN pointing to the + database cluster's current primary node. + "password": "str", # Optional. The randomly + generated password for the default + user.:code:`
`:code:`
`Requires ``database:view_credentials`` + scope. + "port": 0, # Optional. The port on which the + database cluster is listening. + "ssl": bool, # Optional. A boolean value indicating + if the connection should be made over SSL. + "uri": "str", # Optional. A connection string in the + format accepted by the ``psql`` command. This is provided as a + convenience and should be able to be constructed by the other + attributes. + "user": "str" # Optional. The default user for the + database.:code:`
`:code:`
`Requires + ``database:view_credentials`` scope. + }, + "user": "str" # Optional. The name of the user for use with + the connection pool. When excluded, all sessions connect to the database + as the inbound user. } ] } @@ -111431,7 +119638,7 @@ async def list_users(self, database_cluster_uuid: str, **kwargs: Any) -> JSON: cls: ClsType[JSON] = kwargs.pop("cls", None) - _request = build_databases_list_users_request( + _request = build_databases_list_connection_pools_request( database_cluster_uuid=database_cluster_uuid, headers=_headers, params=_params, @@ -111492,7 +119699,7 @@ async def list_users(self, database_cluster_uuid: str, **kwargs: Any) -> JSON: return cast(JSON, deserialized) # type: ignore @overload - async def add_user( + async def add_connection_pool( self, database_cluster_uuid: str, body: JSON, @@ -111501,192 +119708,218 @@ async def add_user( **kwargs: Any ) -> JSON: # pylint: disable=line-too-long - """Add a Database User. - - To add a new database user, send a POST request to ``/v2/databases/$DATABASE_ID/users`` - with the desired username. - - Note: User management is not supported for Caching or Valkey clusters. - - When adding a user to a MySQL cluster, additional options can be configured in the - ``mysql_settings`` object. - - When adding a user to a Kafka cluster, additional options can be configured in - the ``settings`` object. + """Add a New Connection Pool (PostgreSQL). - When adding a user to a MongoDB cluster, additional options can be configured in - the ``settings.mongo_user_settings`` object. + For PostgreSQL database clusters, connection pools can be used to allow a + database to share its idle connections. The popular PostgreSQL connection + pooling utility PgBouncer is used to provide this service. `See here for more information + `_ + about how and why to use PgBouncer connection pooling including + details about the available transaction modes. - The response will be a JSON object with a key called ``user``. The value of this will be an - object that contains the standard attributes associated with a database user including - its randomly generated password. + To add a new connection pool to a PostgreSQL database cluster, send a POST + request to ``/v2/databases/$DATABASE_ID/pools`` specifying a name for the pool, + the user to connect with, the database to connect to, as well as its desired + size and transaction mode. :param database_cluster_uuid: A unique identifier for a database cluster. Required. :type database_cluster_uuid: str :param body: Required. :type body: JSON :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: JSON object - :rtype: JSON - :raises ~azure.core.exceptions.HttpResponseError: - - Example: - .. code-block:: python - - # JSON input template you can fill out and use as your body input. - body = { - "name": "str", # The name of a database user. Required. - "access_cert": "str", # Optional. Access certificate for TLS client - authentication. (Kafka only). - "access_key": "str", # Optional. Access key for TLS client authentication. - (Kafka only). - "mysql_settings": { - "auth_plugin": "str" # A string specifying the authentication method - to be used for connections to the MySQL user account. The valid values are - ``mysql_native_password`` or ``caching_sha2_password``. If excluded when - creating a new user, the default for the version of MySQL in use will be - used. As of MySQL 8.0, the default is ``caching_sha2_password``. Required. - Known values are: "mysql_native_password" and "caching_sha2_password". + Default value is "application/json". + :paramtype content_type: str + :return: JSON object + :rtype: JSON + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "db": "str", # The database for use with the connection pool. Required. + "mode": "str", # The PGBouncer transaction mode for the connection pool. The + allowed values are session, transaction, and statement. Required. + "name": "str", # A unique name for the connection pool. Must be between 3 + and 60 characters. Required. + "size": 0, # The desired size of the PGBouncer connection pool. The maximum + allowed size is determined by the size of the cluster's primary node. 25 backend + server connections are allowed for every 1GB of RAM. Three are reserved for + maintenance. For example, a primary node with 1 GB of RAM allows for a maximum of + 22 backend server connections while one with 4 GB would allow for 97. Note that + these are shared across all connection pools in a cluster. Required. + "connection": { + "database": "str", # Optional. The name of the default database. + "host": "str", # Optional. The FQDN pointing to the database + cluster's current primary node. + "password": "str", # Optional. The randomly generated password for + the default user.:code:`
`:code:`
`Requires + ``database:view_credentials`` scope. + "port": 0, # Optional. The port on which the database cluster is + listening. + "ssl": bool, # Optional. A boolean value indicating if the + connection should be made over SSL. + "uri": "str", # Optional. A connection string in the format accepted + by the ``psql`` command. This is provided as a convenience and should be able + to be constructed by the other attributes. + "user": "str" # Optional. The default user for the + database.:code:`
`:code:`
`Requires ``database:view_credentials`` + scope. }, - "password": "str", # Optional. A randomly generated password for the - database user.:code:`
`Requires ``database:view_credentials`` scope. - "readonly": bool, # Optional. (To be deprecated: use - settings.mongo_user_settings.role instead for access controls to MongoDB - databases). For MongoDB clusters, set to ``true`` to create a read-only user. - This option is not currently supported for other database engines. - "role": "str", # Optional. A string representing the database user's role. - The value will be either "primary" or "normal". Known values are: "primary" and - "normal". - "settings": { - "acl": [ - { - "permission": "str", # Permission set applied to the - ACL. 'consume' allows for messages to be consumed from the topic. - 'produce' allows for messages to be published to the topic. - 'produceconsume' allows for both 'consume' and 'produce' permission. - 'admin' allows for 'produceconsume' as well as any operations to - administer the topic (delete, update). Required. Known values are: - "admin", "consume", "produce", and "produceconsume". - "topic": "str", # A regex for matching the topic(s) - that this ACL should apply to. Required. - "id": "str" # Optional. An identifier for the ACL. - Will be computed after the ACL is created/updated. - } - ], - "mongo_user_settings": { - "databases": [ - "str" # Optional. A list of databases to which the - user should have access. When the database is set to ``admin``"" , - the user will have access to all databases based on the user's role - i.e. a user with the role ``readOnly`` assigned to the ``admin`` - database will have read access to all databases. - ], - "role": "str" # Optional. The role to assign to the user - with each role mapping to a MongoDB built-in role. ``readOnly`` maps to - a `read - `_ - role. ``readWrite`` maps to a `readWrite - `_ - role. ``dbAdmin`` maps to a `dbAdmin - `_ - role. Known values are: "readOnly", "readWrite", and "dbAdmin". - }, - "opensearch_acl": [ - { - "index": "str", # Optional. A regex for matching the - indexes that this ACL should apply to. - "permission": "str" # Optional. Permission set - applied to the ACL. 'read' allows user to read from the index. - 'write' allows for user to write to the index. 'readwrite' allows for - both 'read' and 'write' permission. 'deny'(default) restricts user - from performing any operation over an index. 'admin' allows for - 'readwrite' as well as any operations to administer the index. Known - values are: "deny", "admin", "read", "readwrite", and "write". - } - ], - "pg_allow_replication": bool # Optional. For Postgres clusters, set - to ``true`` for a user with replication rights. This option is not currently - supported for other database engines. - } + "private_connection": { + "database": "str", # Optional. The name of the default database. + "host": "str", # Optional. The FQDN pointing to the database + cluster's current primary node. + "password": "str", # Optional. The randomly generated password for + the default user.:code:`
`:code:`
`Requires + ``database:view_credentials`` scope. + "port": 0, # Optional. The port on which the database cluster is + listening. + "ssl": bool, # Optional. A boolean value indicating if the + connection should be made over SSL. + "uri": "str", # Optional. A connection string in the format accepted + by the ``psql`` command. This is provided as a convenience and should be able + to be constructed by the other attributes. + "user": "str" # Optional. The default user for the + database.:code:`
`:code:`
`Requires ``database:view_credentials`` + scope. + }, + "standby_connection": { + "database": "str", # Optional. The name of the default database. + "host": "str", # Optional. The FQDN pointing to the database + cluster's current primary node. + "password": "str", # Optional. The randomly generated password for + the default user.:code:`
`:code:`
`Requires + ``database:view_credentials`` scope. + "port": 0, # Optional. The port on which the database cluster is + listening. + "ssl": bool, # Optional. A boolean value indicating if the + connection should be made over SSL. + "uri": "str", # Optional. A connection string in the format accepted + by the ``psql`` command. This is provided as a convenience and should be able + to be constructed by the other attributes. + "user": "str" # Optional. The default user for the + database.:code:`
`:code:`
`Requires ``database:view_credentials`` + scope. + }, + "standby_private_connection": { + "database": "str", # Optional. The name of the default database. + "host": "str", # Optional. The FQDN pointing to the database + cluster's current primary node. + "password": "str", # Optional. The randomly generated password for + the default user.:code:`
`:code:`
`Requires + ``database:view_credentials`` scope. + "port": 0, # Optional. The port on which the database cluster is + listening. + "ssl": bool, # Optional. A boolean value indicating if the + connection should be made over SSL. + "uri": "str", # Optional. A connection string in the format accepted + by the ``psql`` command. This is provided as a convenience and should be able + to be constructed by the other attributes. + "user": "str" # Optional. The default user for the + database.:code:`
`:code:`
`Requires ``database:view_credentials`` + scope. + }, + "user": "str" # Optional. The name of the user for use with the connection + pool. When excluded, all sessions connect to the database as the inbound user. } # response body for status code(s): 201 response == { - "user": { - "name": "str", # The name of a database user. Required. - "access_cert": "str", # Optional. Access certificate for TLS client - authentication. (Kafka only). - "access_key": "str", # Optional. Access key for TLS client - authentication. (Kafka only). - "mysql_settings": { - "auth_plugin": "str" # A string specifying the - authentication method to be used for connections to the MySQL user - account. The valid values are ``mysql_native_password`` or - ``caching_sha2_password``. If excluded when creating a new user, the - default for the version of MySQL in use will be used. As of MySQL 8.0, - the default is ``caching_sha2_password``. Required. Known values are: - "mysql_native_password" and "caching_sha2_password". + "pool": { + "db": "str", # The database for use with the connection pool. + Required. + "mode": "str", # The PGBouncer transaction mode for the connection + pool. The allowed values are session, transaction, and statement. Required. + "name": "str", # A unique name for the connection pool. Must be + between 3 and 60 characters. Required. + "size": 0, # The desired size of the PGBouncer connection pool. The + maximum allowed size is determined by the size of the cluster's primary node. + 25 backend server connections are allowed for every 1GB of RAM. Three are + reserved for maintenance. For example, a primary node with 1 GB of RAM allows + for a maximum of 22 backend server connections while one with 4 GB would + allow for 97. Note that these are shared across all connection pools in a + cluster. Required. + "connection": { + "database": "str", # Optional. The name of the default + database. + "host": "str", # Optional. The FQDN pointing to the database + cluster's current primary node. + "password": "str", # Optional. The randomly generated + password for the default user.:code:`
`:code:`
`Requires + ``database:view_credentials`` scope. + "port": 0, # Optional. The port on which the database + cluster is listening. + "ssl": bool, # Optional. A boolean value indicating if the + connection should be made over SSL. + "uri": "str", # Optional. A connection string in the format + accepted by the ``psql`` command. This is provided as a convenience and + should be able to be constructed by the other attributes. + "user": "str" # Optional. The default user for the + database.:code:`
`:code:`
`Requires ``database:view_credentials`` + scope. }, - "password": "str", # Optional. A randomly generated password for the - database user.:code:`
`Requires ``database:view_credentials`` scope. - "role": "str", # Optional. A string representing the database user's - role. The value will be either "primary" or "normal". Known values are: - "primary" and "normal". - "settings": { - "acl": [ - { - "permission": "str", # Permission set - applied to the ACL. 'consume' allows for messages to be consumed - from the topic. 'produce' allows for messages to be published to - the topic. 'produceconsume' allows for both 'consume' and - 'produce' permission. 'admin' allows for 'produceconsume' as well - as any operations to administer the topic (delete, update). - Required. Known values are: "admin", "consume", "produce", and - "produceconsume". - "topic": "str", # A regex for matching the - topic(s) that this ACL should apply to. Required. - "id": "str" # Optional. An identifier for - the ACL. Will be computed after the ACL is created/updated. - } - ], - "mongo_user_settings": { - "databases": [ - "str" # Optional. A list of databases to - which the user should have access. When the database is set to - ``admin``"" , the user will have access to all databases based on - the user's role i.e. a user with the role ``readOnly`` assigned - to the ``admin`` database will have read access to all databases. - ], - "role": "str" # Optional. The role to assign to the - user with each role mapping to a MongoDB built-in role. ``readOnly`` - maps to a `read - `_ - role. ``readWrite`` maps to a `readWrite - `_ - role. ``dbAdmin`` maps to a `dbAdmin - `_ - role. Known values are: "readOnly", "readWrite", and "dbAdmin". - }, - "opensearch_acl": [ - { - "index": "str", # Optional. A regex for - matching the indexes that this ACL should apply to. - "permission": "str" # Optional. Permission - set applied to the ACL. 'read' allows user to read from the - index. 'write' allows for user to write to the index. 'readwrite' - allows for both 'read' and 'write' permission. 'deny'(default) - restricts user from performing any operation over an index. - 'admin' allows for 'readwrite' as well as any operations to - administer the index. Known values are: "deny", "admin", "read", - "readwrite", and "write". - } - ], - "pg_allow_replication": bool # Optional. For Postgres - clusters, set to ``true`` for a user with replication rights. This option - is not currently supported for other database engines. - } + "private_connection": { + "database": "str", # Optional. The name of the default + database. + "host": "str", # Optional. The FQDN pointing to the database + cluster's current primary node. + "password": "str", # Optional. The randomly generated + password for the default user.:code:`
`:code:`
`Requires + ``database:view_credentials`` scope. + "port": 0, # Optional. The port on which the database + cluster is listening. + "ssl": bool, # Optional. A boolean value indicating if the + connection should be made over SSL. + "uri": "str", # Optional. A connection string in the format + accepted by the ``psql`` command. This is provided as a convenience and + should be able to be constructed by the other attributes. + "user": "str" # Optional. The default user for the + database.:code:`
`:code:`
`Requires ``database:view_credentials`` + scope. + }, + "standby_connection": { + "database": "str", # Optional. The name of the default + database. + "host": "str", # Optional. The FQDN pointing to the database + cluster's current primary node. + "password": "str", # Optional. The randomly generated + password for the default user.:code:`
`:code:`
`Requires + ``database:view_credentials`` scope. + "port": 0, # Optional. The port on which the database + cluster is listening. + "ssl": bool, # Optional. A boolean value indicating if the + connection should be made over SSL. + "uri": "str", # Optional. A connection string in the format + accepted by the ``psql`` command. This is provided as a convenience and + should be able to be constructed by the other attributes. + "user": "str" # Optional. The default user for the + database.:code:`
`:code:`
`Requires ``database:view_credentials`` + scope. + }, + "standby_private_connection": { + "database": "str", # Optional. The name of the default + database. + "host": "str", # Optional. The FQDN pointing to the database + cluster's current primary node. + "password": "str", # Optional. The randomly generated + password for the default user.:code:`
`:code:`
`Requires + ``database:view_credentials`` scope. + "port": 0, # Optional. The port on which the database + cluster is listening. + "ssl": bool, # Optional. A boolean value indicating if the + connection should be made over SSL. + "uri": "str", # Optional. A connection string in the format + accepted by the ``psql`` command. This is provided as a convenience and + should be able to be constructed by the other attributes. + "user": "str" # Optional. The default user for the + database.:code:`
`:code:`
`Requires ``database:view_credentials`` + scope. + }, + "user": "str" # Optional. The name of the user for use with the + connection pool. When excluded, all sessions connect to the database as the + inbound user. } } # response body for status code(s): 404 @@ -111703,7 +119936,7 @@ async def add_user( """ @overload - async def add_user( + async def add_connection_pool( self, database_cluster_uuid: str, body: IO[bytes], @@ -111712,25 +119945,19 @@ async def add_user( **kwargs: Any ) -> JSON: # pylint: disable=line-too-long - """Add a Database User. - - To add a new database user, send a POST request to ``/v2/databases/$DATABASE_ID/users`` - with the desired username. - - Note: User management is not supported for Caching or Valkey clusters. - - When adding a user to a MySQL cluster, additional options can be configured in the - ``mysql_settings`` object. - - When adding a user to a Kafka cluster, additional options can be configured in - the ``settings`` object. + """Add a New Connection Pool (PostgreSQL). - When adding a user to a MongoDB cluster, additional options can be configured in - the ``settings.mongo_user_settings`` object. + For PostgreSQL database clusters, connection pools can be used to allow a + database to share its idle connections. The popular PostgreSQL connection + pooling utility PgBouncer is used to provide this service. `See here for more information + `_ + about how and why to use PgBouncer connection pooling including + details about the available transaction modes. - The response will be a JSON object with a key called ``user``. The value of this will be an - object that contains the standard attributes associated with a database user including - its randomly generated password. + To add a new connection pool to a PostgreSQL database cluster, send a POST + request to ``/v2/databases/$DATABASE_ID/pools`` specifying a name for the pool, + the user to connect with, the database to connect to, as well as its desired + size and transaction mode. :param database_cluster_uuid: A unique identifier for a database cluster. Required. :type database_cluster_uuid: str @@ -111748,79 +119975,99 @@ async def add_user( # response body for status code(s): 201 response == { - "user": { - "name": "str", # The name of a database user. Required. - "access_cert": "str", # Optional. Access certificate for TLS client - authentication. (Kafka only). - "access_key": "str", # Optional. Access key for TLS client - authentication. (Kafka only). - "mysql_settings": { - "auth_plugin": "str" # A string specifying the - authentication method to be used for connections to the MySQL user - account. The valid values are ``mysql_native_password`` or - ``caching_sha2_password``. If excluded when creating a new user, the - default for the version of MySQL in use will be used. As of MySQL 8.0, - the default is ``caching_sha2_password``. Required. Known values are: - "mysql_native_password" and "caching_sha2_password". + "pool": { + "db": "str", # The database for use with the connection pool. + Required. + "mode": "str", # The PGBouncer transaction mode for the connection + pool. The allowed values are session, transaction, and statement. Required. + "name": "str", # A unique name for the connection pool. Must be + between 3 and 60 characters. Required. + "size": 0, # The desired size of the PGBouncer connection pool. The + maximum allowed size is determined by the size of the cluster's primary node. + 25 backend server connections are allowed for every 1GB of RAM. Three are + reserved for maintenance. For example, a primary node with 1 GB of RAM allows + for a maximum of 22 backend server connections while one with 4 GB would + allow for 97. Note that these are shared across all connection pools in a + cluster. Required. + "connection": { + "database": "str", # Optional. The name of the default + database. + "host": "str", # Optional. The FQDN pointing to the database + cluster's current primary node. + "password": "str", # Optional. The randomly generated + password for the default user.:code:`
`:code:`
`Requires + ``database:view_credentials`` scope. + "port": 0, # Optional. The port on which the database + cluster is listening. + "ssl": bool, # Optional. A boolean value indicating if the + connection should be made over SSL. + "uri": "str", # Optional. A connection string in the format + accepted by the ``psql`` command. This is provided as a convenience and + should be able to be constructed by the other attributes. + "user": "str" # Optional. The default user for the + database.:code:`
`:code:`
`Requires ``database:view_credentials`` + scope. }, - "password": "str", # Optional. A randomly generated password for the - database user.:code:`
`Requires ``database:view_credentials`` scope. - "role": "str", # Optional. A string representing the database user's - role. The value will be either "primary" or "normal". Known values are: - "primary" and "normal". - "settings": { - "acl": [ - { - "permission": "str", # Permission set - applied to the ACL. 'consume' allows for messages to be consumed - from the topic. 'produce' allows for messages to be published to - the topic. 'produceconsume' allows for both 'consume' and - 'produce' permission. 'admin' allows for 'produceconsume' as well - as any operations to administer the topic (delete, update). - Required. Known values are: "admin", "consume", "produce", and - "produceconsume". - "topic": "str", # A regex for matching the - topic(s) that this ACL should apply to. Required. - "id": "str" # Optional. An identifier for - the ACL. Will be computed after the ACL is created/updated. - } - ], - "mongo_user_settings": { - "databases": [ - "str" # Optional. A list of databases to - which the user should have access. When the database is set to - ``admin``"" , the user will have access to all databases based on - the user's role i.e. a user with the role ``readOnly`` assigned - to the ``admin`` database will have read access to all databases. - ], - "role": "str" # Optional. The role to assign to the - user with each role mapping to a MongoDB built-in role. ``readOnly`` - maps to a `read - `_ - role. ``readWrite`` maps to a `readWrite - `_ - role. ``dbAdmin`` maps to a `dbAdmin - `_ - role. Known values are: "readOnly", "readWrite", and "dbAdmin". - }, - "opensearch_acl": [ - { - "index": "str", # Optional. A regex for - matching the indexes that this ACL should apply to. - "permission": "str" # Optional. Permission - set applied to the ACL. 'read' allows user to read from the - index. 'write' allows for user to write to the index. 'readwrite' - allows for both 'read' and 'write' permission. 'deny'(default) - restricts user from performing any operation over an index. - 'admin' allows for 'readwrite' as well as any operations to - administer the index. Known values are: "deny", "admin", "read", - "readwrite", and "write". - } - ], - "pg_allow_replication": bool # Optional. For Postgres - clusters, set to ``true`` for a user with replication rights. This option - is not currently supported for other database engines. - } + "private_connection": { + "database": "str", # Optional. The name of the default + database. + "host": "str", # Optional. The FQDN pointing to the database + cluster's current primary node. + "password": "str", # Optional. The randomly generated + password for the default user.:code:`
`:code:`
`Requires + ``database:view_credentials`` scope. + "port": 0, # Optional. The port on which the database + cluster is listening. + "ssl": bool, # Optional. A boolean value indicating if the + connection should be made over SSL. + "uri": "str", # Optional. A connection string in the format + accepted by the ``psql`` command. This is provided as a convenience and + should be able to be constructed by the other attributes. + "user": "str" # Optional. The default user for the + database.:code:`
`:code:`
`Requires ``database:view_credentials`` + scope. + }, + "standby_connection": { + "database": "str", # Optional. The name of the default + database. + "host": "str", # Optional. The FQDN pointing to the database + cluster's current primary node. + "password": "str", # Optional. The randomly generated + password for the default user.:code:`
`:code:`
`Requires + ``database:view_credentials`` scope. + "port": 0, # Optional. The port on which the database + cluster is listening. + "ssl": bool, # Optional. A boolean value indicating if the + connection should be made over SSL. + "uri": "str", # Optional. A connection string in the format + accepted by the ``psql`` command. This is provided as a convenience and + should be able to be constructed by the other attributes. + "user": "str" # Optional. The default user for the + database.:code:`
`:code:`
`Requires ``database:view_credentials`` + scope. + }, + "standby_private_connection": { + "database": "str", # Optional. The name of the default + database. + "host": "str", # Optional. The FQDN pointing to the database + cluster's current primary node. + "password": "str", # Optional. The randomly generated + password for the default user.:code:`
`:code:`
`Requires + ``database:view_credentials`` scope. + "port": 0, # Optional. The port on which the database + cluster is listening. + "ssl": bool, # Optional. A boolean value indicating if the + connection should be made over SSL. + "uri": "str", # Optional. A connection string in the format + accepted by the ``psql`` command. This is provided as a convenience and + should be able to be constructed by the other attributes. + "user": "str" # Optional. The default user for the + database.:code:`
`:code:`
`Requires ``database:view_credentials`` + scope. + }, + "user": "str" # Optional. The name of the user for use with the + connection pool. When excluded, all sessions connect to the database as the + inbound user. } } # response body for status code(s): 404 @@ -111837,29 +120084,23 @@ async def add_user( """ @distributed_trace_async - async def add_user( + async def add_connection_pool( self, database_cluster_uuid: str, body: Union[JSON, IO[bytes]], **kwargs: Any ) -> JSON: # pylint: disable=line-too-long - """Add a Database User. - - To add a new database user, send a POST request to ``/v2/databases/$DATABASE_ID/users`` - with the desired username. - - Note: User management is not supported for Caching or Valkey clusters. - - When adding a user to a MySQL cluster, additional options can be configured in the - ``mysql_settings`` object. - - When adding a user to a Kafka cluster, additional options can be configured in - the ``settings`` object. + """Add a New Connection Pool (PostgreSQL). - When adding a user to a MongoDB cluster, additional options can be configured in - the ``settings.mongo_user_settings`` object. + For PostgreSQL database clusters, connection pools can be used to allow a + database to share its idle connections. The popular PostgreSQL connection + pooling utility PgBouncer is used to provide this service. `See here for more information + `_ + about how and why to use PgBouncer connection pooling including + details about the available transaction modes. - The response will be a JSON object with a key called ``user``. The value of this will be an - object that contains the standard attributes associated with a database user including - its randomly generated password. + To add a new connection pool to a PostgreSQL database cluster, send a POST + request to ``/v2/databases/$DATABASE_ID/pools`` specifying a name for the pool, + the user to connect with, the database to connect to, as well as its desired + size and transaction mode. :param database_cluster_uuid: A unique identifier for a database cluster. Required. :type database_cluster_uuid: str @@ -111874,156 +120115,188 @@ async def add_user( # JSON input template you can fill out and use as your body input. body = { - "name": "str", # The name of a database user. Required. - "access_cert": "str", # Optional. Access certificate for TLS client - authentication. (Kafka only). - "access_key": "str", # Optional. Access key for TLS client authentication. - (Kafka only). - "mysql_settings": { - "auth_plugin": "str" # A string specifying the authentication method - to be used for connections to the MySQL user account. The valid values are - ``mysql_native_password`` or ``caching_sha2_password``. If excluded when - creating a new user, the default for the version of MySQL in use will be - used. As of MySQL 8.0, the default is ``caching_sha2_password``. Required. - Known values are: "mysql_native_password" and "caching_sha2_password". + "db": "str", # The database for use with the connection pool. Required. + "mode": "str", # The PGBouncer transaction mode for the connection pool. The + allowed values are session, transaction, and statement. Required. + "name": "str", # A unique name for the connection pool. Must be between 3 + and 60 characters. Required. + "size": 0, # The desired size of the PGBouncer connection pool. The maximum + allowed size is determined by the size of the cluster's primary node. 25 backend + server connections are allowed for every 1GB of RAM. Three are reserved for + maintenance. For example, a primary node with 1 GB of RAM allows for a maximum of + 22 backend server connections while one with 4 GB would allow for 97. Note that + these are shared across all connection pools in a cluster. Required. + "connection": { + "database": "str", # Optional. The name of the default database. + "host": "str", # Optional. The FQDN pointing to the database + cluster's current primary node. + "password": "str", # Optional. The randomly generated password for + the default user.:code:`
`:code:`
`Requires + ``database:view_credentials`` scope. + "port": 0, # Optional. The port on which the database cluster is + listening. + "ssl": bool, # Optional. A boolean value indicating if the + connection should be made over SSL. + "uri": "str", # Optional. A connection string in the format accepted + by the ``psql`` command. This is provided as a convenience and should be able + to be constructed by the other attributes. + "user": "str" # Optional. The default user for the + database.:code:`
`:code:`
`Requires ``database:view_credentials`` + scope. }, - "password": "str", # Optional. A randomly generated password for the - database user.:code:`
`Requires ``database:view_credentials`` scope. - "readonly": bool, # Optional. (To be deprecated: use - settings.mongo_user_settings.role instead for access controls to MongoDB - databases). For MongoDB clusters, set to ``true`` to create a read-only user. - This option is not currently supported for other database engines. - "role": "str", # Optional. A string representing the database user's role. - The value will be either "primary" or "normal". Known values are: "primary" and - "normal". - "settings": { - "acl": [ - { - "permission": "str", # Permission set applied to the - ACL. 'consume' allows for messages to be consumed from the topic. - 'produce' allows for messages to be published to the topic. - 'produceconsume' allows for both 'consume' and 'produce' permission. - 'admin' allows for 'produceconsume' as well as any operations to - administer the topic (delete, update). Required. Known values are: - "admin", "consume", "produce", and "produceconsume". - "topic": "str", # A regex for matching the topic(s) - that this ACL should apply to. Required. - "id": "str" # Optional. An identifier for the ACL. - Will be computed after the ACL is created/updated. - } - ], - "mongo_user_settings": { - "databases": [ - "str" # Optional. A list of databases to which the - user should have access. When the database is set to ``admin``"" , - the user will have access to all databases based on the user's role - i.e. a user with the role ``readOnly`` assigned to the ``admin`` - database will have read access to all databases. - ], - "role": "str" # Optional. The role to assign to the user - with each role mapping to a MongoDB built-in role. ``readOnly`` maps to - a `read - `_ - role. ``readWrite`` maps to a `readWrite - `_ - role. ``dbAdmin`` maps to a `dbAdmin - `_ - role. Known values are: "readOnly", "readWrite", and "dbAdmin". - }, - "opensearch_acl": [ - { - "index": "str", # Optional. A regex for matching the - indexes that this ACL should apply to. - "permission": "str" # Optional. Permission set - applied to the ACL. 'read' allows user to read from the index. - 'write' allows for user to write to the index. 'readwrite' allows for - both 'read' and 'write' permission. 'deny'(default) restricts user - from performing any operation over an index. 'admin' allows for - 'readwrite' as well as any operations to administer the index. Known - values are: "deny", "admin", "read", "readwrite", and "write". - } - ], - "pg_allow_replication": bool # Optional. For Postgres clusters, set - to ``true`` for a user with replication rights. This option is not currently - supported for other database engines. - } + "private_connection": { + "database": "str", # Optional. The name of the default database. + "host": "str", # Optional. The FQDN pointing to the database + cluster's current primary node. + "password": "str", # Optional. The randomly generated password for + the default user.:code:`
`:code:`
`Requires + ``database:view_credentials`` scope. + "port": 0, # Optional. The port on which the database cluster is + listening. + "ssl": bool, # Optional. A boolean value indicating if the + connection should be made over SSL. + "uri": "str", # Optional. A connection string in the format accepted + by the ``psql`` command. This is provided as a convenience and should be able + to be constructed by the other attributes. + "user": "str" # Optional. The default user for the + database.:code:`
`:code:`
`Requires ``database:view_credentials`` + scope. + }, + "standby_connection": { + "database": "str", # Optional. The name of the default database. + "host": "str", # Optional. The FQDN pointing to the database + cluster's current primary node. + "password": "str", # Optional. The randomly generated password for + the default user.:code:`
`:code:`
`Requires + ``database:view_credentials`` scope. + "port": 0, # Optional. The port on which the database cluster is + listening. + "ssl": bool, # Optional. A boolean value indicating if the + connection should be made over SSL. + "uri": "str", # Optional. A connection string in the format accepted + by the ``psql`` command. This is provided as a convenience and should be able + to be constructed by the other attributes. + "user": "str" # Optional. The default user for the + database.:code:`
`:code:`
`Requires ``database:view_credentials`` + scope. + }, + "standby_private_connection": { + "database": "str", # Optional. The name of the default database. + "host": "str", # Optional. The FQDN pointing to the database + cluster's current primary node. + "password": "str", # Optional. The randomly generated password for + the default user.:code:`
`:code:`
`Requires + ``database:view_credentials`` scope. + "port": 0, # Optional. The port on which the database cluster is + listening. + "ssl": bool, # Optional. A boolean value indicating if the + connection should be made over SSL. + "uri": "str", # Optional. A connection string in the format accepted + by the ``psql`` command. This is provided as a convenience and should be able + to be constructed by the other attributes. + "user": "str" # Optional. The default user for the + database.:code:`
`:code:`
`Requires ``database:view_credentials`` + scope. + }, + "user": "str" # Optional. The name of the user for use with the connection + pool. When excluded, all sessions connect to the database as the inbound user. } # response body for status code(s): 201 response == { - "user": { - "name": "str", # The name of a database user. Required. - "access_cert": "str", # Optional. Access certificate for TLS client - authentication. (Kafka only). - "access_key": "str", # Optional. Access key for TLS client - authentication. (Kafka only). - "mysql_settings": { - "auth_plugin": "str" # A string specifying the - authentication method to be used for connections to the MySQL user - account. The valid values are ``mysql_native_password`` or - ``caching_sha2_password``. If excluded when creating a new user, the - default for the version of MySQL in use will be used. As of MySQL 8.0, - the default is ``caching_sha2_password``. Required. Known values are: - "mysql_native_password" and "caching_sha2_password". + "pool": { + "db": "str", # The database for use with the connection pool. + Required. + "mode": "str", # The PGBouncer transaction mode for the connection + pool. The allowed values are session, transaction, and statement. Required. + "name": "str", # A unique name for the connection pool. Must be + between 3 and 60 characters. Required. + "size": 0, # The desired size of the PGBouncer connection pool. The + maximum allowed size is determined by the size of the cluster's primary node. + 25 backend server connections are allowed for every 1GB of RAM. Three are + reserved for maintenance. For example, a primary node with 1 GB of RAM allows + for a maximum of 22 backend server connections while one with 4 GB would + allow for 97. Note that these are shared across all connection pools in a + cluster. Required. + "connection": { + "database": "str", # Optional. The name of the default + database. + "host": "str", # Optional. The FQDN pointing to the database + cluster's current primary node. + "password": "str", # Optional. The randomly generated + password for the default user.:code:`
`:code:`
`Requires + ``database:view_credentials`` scope. + "port": 0, # Optional. The port on which the database + cluster is listening. + "ssl": bool, # Optional. A boolean value indicating if the + connection should be made over SSL. + "uri": "str", # Optional. A connection string in the format + accepted by the ``psql`` command. This is provided as a convenience and + should be able to be constructed by the other attributes. + "user": "str" # Optional. The default user for the + database.:code:`
`:code:`
`Requires ``database:view_credentials`` + scope. }, - "password": "str", # Optional. A randomly generated password for the - database user.:code:`
`Requires ``database:view_credentials`` scope. - "role": "str", # Optional. A string representing the database user's - role. The value will be either "primary" or "normal". Known values are: - "primary" and "normal". - "settings": { - "acl": [ - { - "permission": "str", # Permission set - applied to the ACL. 'consume' allows for messages to be consumed - from the topic. 'produce' allows for messages to be published to - the topic. 'produceconsume' allows for both 'consume' and - 'produce' permission. 'admin' allows for 'produceconsume' as well - as any operations to administer the topic (delete, update). - Required. Known values are: "admin", "consume", "produce", and - "produceconsume". - "topic": "str", # A regex for matching the - topic(s) that this ACL should apply to. Required. - "id": "str" # Optional. An identifier for - the ACL. Will be computed after the ACL is created/updated. - } - ], - "mongo_user_settings": { - "databases": [ - "str" # Optional. A list of databases to - which the user should have access. When the database is set to - ``admin``"" , the user will have access to all databases based on - the user's role i.e. a user with the role ``readOnly`` assigned - to the ``admin`` database will have read access to all databases. - ], - "role": "str" # Optional. The role to assign to the - user with each role mapping to a MongoDB built-in role. ``readOnly`` - maps to a `read - `_ - role. ``readWrite`` maps to a `readWrite - `_ - role. ``dbAdmin`` maps to a `dbAdmin - `_ - role. Known values are: "readOnly", "readWrite", and "dbAdmin". - }, - "opensearch_acl": [ - { - "index": "str", # Optional. A regex for - matching the indexes that this ACL should apply to. - "permission": "str" # Optional. Permission - set applied to the ACL. 'read' allows user to read from the - index. 'write' allows for user to write to the index. 'readwrite' - allows for both 'read' and 'write' permission. 'deny'(default) - restricts user from performing any operation over an index. - 'admin' allows for 'readwrite' as well as any operations to - administer the index. Known values are: "deny", "admin", "read", - "readwrite", and "write". - } - ], - "pg_allow_replication": bool # Optional. For Postgres - clusters, set to ``true`` for a user with replication rights. This option - is not currently supported for other database engines. - } + "private_connection": { + "database": "str", # Optional. The name of the default + database. + "host": "str", # Optional. The FQDN pointing to the database + cluster's current primary node. + "password": "str", # Optional. The randomly generated + password for the default user.:code:`
`:code:`
`Requires + ``database:view_credentials`` scope. + "port": 0, # Optional. The port on which the database + cluster is listening. + "ssl": bool, # Optional. A boolean value indicating if the + connection should be made over SSL. + "uri": "str", # Optional. A connection string in the format + accepted by the ``psql`` command. This is provided as a convenience and + should be able to be constructed by the other attributes. + "user": "str" # Optional. The default user for the + database.:code:`
`:code:`
`Requires ``database:view_credentials`` + scope. + }, + "standby_connection": { + "database": "str", # Optional. The name of the default + database. + "host": "str", # Optional. The FQDN pointing to the database + cluster's current primary node. + "password": "str", # Optional. The randomly generated + password for the default user.:code:`
`:code:`
`Requires + ``database:view_credentials`` scope. + "port": 0, # Optional. The port on which the database + cluster is listening. + "ssl": bool, # Optional. A boolean value indicating if the + connection should be made over SSL. + "uri": "str", # Optional. A connection string in the format + accepted by the ``psql`` command. This is provided as a convenience and + should be able to be constructed by the other attributes. + "user": "str" # Optional. The default user for the + database.:code:`
`:code:`
`Requires ``database:view_credentials`` + scope. + }, + "standby_private_connection": { + "database": "str", # Optional. The name of the default + database. + "host": "str", # Optional. The FQDN pointing to the database + cluster's current primary node. + "password": "str", # Optional. The randomly generated + password for the default user.:code:`
`:code:`
`Requires + ``database:view_credentials`` scope. + "port": 0, # Optional. The port on which the database + cluster is listening. + "ssl": bool, # Optional. A boolean value indicating if the + connection should be made over SSL. + "uri": "str", # Optional. A connection string in the format + accepted by the ``psql`` command. This is provided as a convenience and + should be able to be constructed by the other attributes. + "user": "str" # Optional. The default user for the + database.:code:`
`:code:`
`Requires ``database:view_credentials`` + scope. + }, + "user": "str" # Optional. The name of the user for use with the + connection pool. When excluded, all sessions connect to the database as the + inbound user. } } # response body for status code(s): 404 @@ -112067,7 +120340,7 @@ async def add_user( else: _json = body - _request = build_databases_add_user_request( + _request = build_databases_add_connection_pool_request( database_cluster_uuid=database_cluster_uuid, content_type=content_type, json=_json, @@ -112131,33 +120404,20 @@ async def add_user( return cast(JSON, deserialized) # type: ignore @distributed_trace_async - async def get_user( - self, database_cluster_uuid: str, username: str, **kwargs: Any + async def get_connection_pool( + self, database_cluster_uuid: str, pool_name: str, **kwargs: Any ) -> JSON: # pylint: disable=line-too-long - """Retrieve an Existing Database User. - - To show information about an existing database user, send a GET request to - ``/v2/databases/$DATABASE_ID/users/$USERNAME``. - - Note: User management is not supported for Caching or Valkey clusters. - - The response will be a JSON object with a ``user`` key. This will be set to an object - containing the standard database user attributes. The user's password will not show - up unless the ``database:view_credentials`` scope is present. - - For MySQL clusters, additional options will be contained in the ``mysql_settings`` - object. - - For Kafka clusters, additional options will be contained in the ``settings`` object. + """Retrieve Existing Connection Pool (PostgreSQL). - For MongoDB clusters, additional information will be contained in the mongo_user_settings - object. + To show information about an existing connection pool for a PostgreSQL database cluster, send a + GET request to ``/v2/databases/$DATABASE_ID/pools/$POOL_NAME``. + The response will be a JSON object with a ``pool`` key. :param database_cluster_uuid: A unique identifier for a database cluster. Required. :type database_cluster_uuid: str - :param username: The name of the database user. Required. - :type username: str + :param pool_name: The name used to identify the connection pool. Required. + :type pool_name: str :return: JSON object :rtype: JSON :raises ~azure.core.exceptions.HttpResponseError: @@ -112167,79 +120427,99 @@ async def get_user( # response body for status code(s): 200 response == { - "user": { - "name": "str", # The name of a database user. Required. - "access_cert": "str", # Optional. Access certificate for TLS client - authentication. (Kafka only). - "access_key": "str", # Optional. Access key for TLS client - authentication. (Kafka only). - "mysql_settings": { - "auth_plugin": "str" # A string specifying the - authentication method to be used for connections to the MySQL user - account. The valid values are ``mysql_native_password`` or - ``caching_sha2_password``. If excluded when creating a new user, the - default for the version of MySQL in use will be used. As of MySQL 8.0, - the default is ``caching_sha2_password``. Required. Known values are: - "mysql_native_password" and "caching_sha2_password". + "pool": { + "db": "str", # The database for use with the connection pool. + Required. + "mode": "str", # The PGBouncer transaction mode for the connection + pool. The allowed values are session, transaction, and statement. Required. + "name": "str", # A unique name for the connection pool. Must be + between 3 and 60 characters. Required. + "size": 0, # The desired size of the PGBouncer connection pool. The + maximum allowed size is determined by the size of the cluster's primary node. + 25 backend server connections are allowed for every 1GB of RAM. Three are + reserved for maintenance. For example, a primary node with 1 GB of RAM allows + for a maximum of 22 backend server connections while one with 4 GB would + allow for 97. Note that these are shared across all connection pools in a + cluster. Required. + "connection": { + "database": "str", # Optional. The name of the default + database. + "host": "str", # Optional. The FQDN pointing to the database + cluster's current primary node. + "password": "str", # Optional. The randomly generated + password for the default user.:code:`
`:code:`
`Requires + ``database:view_credentials`` scope. + "port": 0, # Optional. The port on which the database + cluster is listening. + "ssl": bool, # Optional. A boolean value indicating if the + connection should be made over SSL. + "uri": "str", # Optional. A connection string in the format + accepted by the ``psql`` command. This is provided as a convenience and + should be able to be constructed by the other attributes. + "user": "str" # Optional. The default user for the + database.:code:`
`:code:`
`Requires ``database:view_credentials`` + scope. }, - "password": "str", # Optional. A randomly generated password for the - database user.:code:`
`Requires ``database:view_credentials`` scope. - "role": "str", # Optional. A string representing the database user's - role. The value will be either "primary" or "normal". Known values are: - "primary" and "normal". - "settings": { - "acl": [ - { - "permission": "str", # Permission set - applied to the ACL. 'consume' allows for messages to be consumed - from the topic. 'produce' allows for messages to be published to - the topic. 'produceconsume' allows for both 'consume' and - 'produce' permission. 'admin' allows for 'produceconsume' as well - as any operations to administer the topic (delete, update). - Required. Known values are: "admin", "consume", "produce", and - "produceconsume". - "topic": "str", # A regex for matching the - topic(s) that this ACL should apply to. Required. - "id": "str" # Optional. An identifier for - the ACL. Will be computed after the ACL is created/updated. - } - ], - "mongo_user_settings": { - "databases": [ - "str" # Optional. A list of databases to - which the user should have access. When the database is set to - ``admin``"" , the user will have access to all databases based on - the user's role i.e. a user with the role ``readOnly`` assigned - to the ``admin`` database will have read access to all databases. - ], - "role": "str" # Optional. The role to assign to the - user with each role mapping to a MongoDB built-in role. ``readOnly`` - maps to a `read - `_ - role. ``readWrite`` maps to a `readWrite - `_ - role. ``dbAdmin`` maps to a `dbAdmin - `_ - role. Known values are: "readOnly", "readWrite", and "dbAdmin". - }, - "opensearch_acl": [ - { - "index": "str", # Optional. A regex for - matching the indexes that this ACL should apply to. - "permission": "str" # Optional. Permission - set applied to the ACL. 'read' allows user to read from the - index. 'write' allows for user to write to the index. 'readwrite' - allows for both 'read' and 'write' permission. 'deny'(default) - restricts user from performing any operation over an index. - 'admin' allows for 'readwrite' as well as any operations to - administer the index. Known values are: "deny", "admin", "read", - "readwrite", and "write". - } - ], - "pg_allow_replication": bool # Optional. For Postgres - clusters, set to ``true`` for a user with replication rights. This option - is not currently supported for other database engines. - } + "private_connection": { + "database": "str", # Optional. The name of the default + database. + "host": "str", # Optional. The FQDN pointing to the database + cluster's current primary node. + "password": "str", # Optional. The randomly generated + password for the default user.:code:`
`:code:`
`Requires + ``database:view_credentials`` scope. + "port": 0, # Optional. The port on which the database + cluster is listening. + "ssl": bool, # Optional. A boolean value indicating if the + connection should be made over SSL. + "uri": "str", # Optional. A connection string in the format + accepted by the ``psql`` command. This is provided as a convenience and + should be able to be constructed by the other attributes. + "user": "str" # Optional. The default user for the + database.:code:`
`:code:`
`Requires ``database:view_credentials`` + scope. + }, + "standby_connection": { + "database": "str", # Optional. The name of the default + database. + "host": "str", # Optional. The FQDN pointing to the database + cluster's current primary node. + "password": "str", # Optional. The randomly generated + password for the default user.:code:`
`:code:`
`Requires + ``database:view_credentials`` scope. + "port": 0, # Optional. The port on which the database + cluster is listening. + "ssl": bool, # Optional. A boolean value indicating if the + connection should be made over SSL. + "uri": "str", # Optional. A connection string in the format + accepted by the ``psql`` command. This is provided as a convenience and + should be able to be constructed by the other attributes. + "user": "str" # Optional. The default user for the + database.:code:`
`:code:`
`Requires ``database:view_credentials`` + scope. + }, + "standby_private_connection": { + "database": "str", # Optional. The name of the default + database. + "host": "str", # Optional. The FQDN pointing to the database + cluster's current primary node. + "password": "str", # Optional. The randomly generated + password for the default user.:code:`
`:code:`
`Requires + ``database:view_credentials`` scope. + "port": 0, # Optional. The port on which the database + cluster is listening. + "ssl": bool, # Optional. A boolean value indicating if the + connection should be made over SSL. + "uri": "str", # Optional. A connection string in the format + accepted by the ``psql`` command. This is provided as a convenience and + should be able to be constructed by the other attributes. + "user": "str" # Optional. The default user for the + database.:code:`
`:code:`
`Requires ``database:view_credentials`` + scope. + }, + "user": "str" # Optional. The name of the user for use with the + connection pool. When excluded, all sessions connect to the database as the + inbound user. } } # response body for status code(s): 404 @@ -112272,9 +120552,9 @@ async def get_user( cls: ClsType[JSON] = kwargs.pop("cls", None) - _request = build_databases_get_user_request( + _request = build_databases_get_connection_pool_request( database_cluster_uuid=database_cluster_uuid, - username=username, + pool_name=pool_name, headers=_headers, params=_params, ) @@ -112333,25 +120613,131 @@ async def get_user( return cast(JSON, deserialized) # type: ignore - @distributed_trace_async - async def delete_user( - self, database_cluster_uuid: str, username: str, **kwargs: Any + @overload + async def update_connection_pool( + self, + database_cluster_uuid: str, + pool_name: str, + body: JSON, + *, + content_type: str = "application/json", + **kwargs: Any ) -> Optional[JSON]: # pylint: disable=line-too-long - """Remove a Database User. + """Update Connection Pools (PostgreSQL). - To remove a specific database user, send a DELETE request to - ``/v2/databases/$DATABASE_ID/users/$USERNAME``. + To update a connection pool for a PostgreSQL database cluster, send a PUT request to + ``/v2/databases/$DATABASE_ID/pools/$POOL_NAME``. - A status of 204 will be given. This indicates that the request was processed - successfully, but that no response body is needed. + :param database_cluster_uuid: A unique identifier for a database cluster. Required. + :type database_cluster_uuid: str + :param pool_name: The name used to identify the connection pool. Required. + :type pool_name: str + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: JSON object or None + :rtype: JSON or None + :raises ~azure.core.exceptions.HttpResponseError: - Note: User management is not supported for Caching or Valkey clusters. + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "db": "str", # The database for use with the connection pool. Required. + "mode": "str", # The PGBouncer transaction mode for the connection pool. The + allowed values are session, transaction, and statement. Required. + "size": 0, # The desired size of the PGBouncer connection pool. The maximum + allowed size is determined by the size of the cluster's primary node. 25 backend + server connections are allowed for every 1GB of RAM. Three are reserved for + maintenance. For example, a primary node with 1 GB of RAM allows for a maximum of + 22 backend server connections while one with 4 GB would allow for 97. Note that + these are shared across all connection pools in a cluster. Required. + "user": "str" # Optional. The name of the user for use with the connection + pool. When excluded, all sessions connect to the database as the inbound user. + } + + # response body for status code(s): 404 + response == { + "id": "str", # A short identifier corresponding to the HTTP status code + returned. For example, the ID for a response returning a 404 status code would + be "not_found.". Required. + "message": "str", # A message providing additional information about the + error, including details to help resolve it when possible. Required. + "request_id": "str" # Optional. Optionally, some endpoints may include a + request ID that should be provided when reporting bugs or opening support + tickets to help identify the issue. + } + """ + + @overload + async def update_connection_pool( + self, + database_cluster_uuid: str, + pool_name: str, + body: IO[bytes], + *, + content_type: str = "application/json", + **kwargs: Any + ) -> Optional[JSON]: + # pylint: disable=line-too-long + """Update Connection Pools (PostgreSQL). + + To update a connection pool for a PostgreSQL database cluster, send a PUT request to + ``/v2/databases/$DATABASE_ID/pools/$POOL_NAME``. :param database_cluster_uuid: A unique identifier for a database cluster. Required. :type database_cluster_uuid: str - :param username: The name of the database user. Required. - :type username: str + :param pool_name: The name used to identify the connection pool. Required. + :type pool_name: str + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: JSON object or None + :rtype: JSON or None + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 404 + response == { + "id": "str", # A short identifier corresponding to the HTTP status code + returned. For example, the ID for a response returning a 404 status code would + be "not_found.". Required. + "message": "str", # A message providing additional information about the + error, including details to help resolve it when possible. Required. + "request_id": "str" # Optional. Optionally, some endpoints may include a + request ID that should be provided when reporting bugs or opening support + tickets to help identify the issue. + } + """ + + @distributed_trace_async + async def update_connection_pool( + self, + database_cluster_uuid: str, + pool_name: str, + body: Union[JSON, IO[bytes]], + **kwargs: Any + ) -> Optional[JSON]: + # pylint: disable=line-too-long + """Update Connection Pools (PostgreSQL). + + To update a connection pool for a PostgreSQL database cluster, send a PUT request to + ``/v2/databases/$DATABASE_ID/pools/$POOL_NAME``. + + :param database_cluster_uuid: A unique identifier for a database cluster. Required. + :type database_cluster_uuid: str + :param pool_name: The name used to identify the connection pool. Required. + :type pool_name: str + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] :return: JSON object or None :rtype: JSON or None :raises ~azure.core.exceptions.HttpResponseError: @@ -112359,6 +120745,21 @@ async def delete_user( Example: .. code-block:: python + # JSON input template you can fill out and use as your body input. + body = { + "db": "str", # The database for use with the connection pool. Required. + "mode": "str", # The PGBouncer transaction mode for the connection pool. The + allowed values are session, transaction, and statement. Required. + "size": 0, # The desired size of the PGBouncer connection pool. The maximum + allowed size is determined by the size of the cluster's primary node. 25 backend + server connections are allowed for every 1GB of RAM. Three are reserved for + maintenance. For example, a primary node with 1 GB of RAM allows for a maximum of + 22 backend server connections while one with 4 GB would allow for 97. Note that + these are shared across all connection pools in a cluster. Required. + "user": "str" # Optional. The name of the user for use with the connection + pool. When excluded, all sessions connect to the database as the inbound user. + } + # response body for status code(s): 404 response == { "id": "str", # A short identifier corresponding to the HTTP status code @@ -112384,14 +120785,28 @@ async def delete_user( } error_map.update(kwargs.pop("error_map", {}) or {}) - _headers = kwargs.pop("headers", {}) or {} + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = kwargs.pop("params", {}) or {} + content_type: Optional[str] = kwargs.pop( + "content_type", _headers.pop("Content-Type", None) + ) cls: ClsType[Optional[JSON]] = kwargs.pop("cls", None) - _request = build_databases_delete_user_request( + content_type = content_type or "application/json" + _json = None + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _json = body + + _request = build_databases_update_connection_pool_request( database_cluster_uuid=database_cluster_uuid, - username=username, + pool_name=pool_name, + content_type=content_type, + json=_json, + content=_content, headers=_headers, params=_params, ) @@ -112446,180 +120861,30 @@ async def delete_user( return deserialized # type: ignore - @overload - async def update_user( - self, - database_cluster_uuid: str, - username: str, - body: JSON, - *, - content_type: str = "application/json", - **kwargs: Any - ) -> JSON: + @distributed_trace_async + async def delete_connection_pool( + self, database_cluster_uuid: str, pool_name: str, **kwargs: Any + ) -> Optional[JSON]: # pylint: disable=line-too-long - """Update a Database User. - - To update an existing database user, send a PUT request to - ``/v2/databases/$DATABASE_ID/users/$USERNAME`` - with the desired settings. + """Delete a Connection Pool (PostgreSQL). - **Note**\\ : only ``settings`` can be updated via this type of request. If you wish to change - the name of a user, - you must recreate a new user. + To delete a specific connection pool for a PostgreSQL database cluster, send + a DELETE request to ``/v2/databases/$DATABASE_ID/pools/$POOL_NAME``. - The response will be a JSON object with a key called ``user``. The value of this will be an - object that contains the name of the update database user, along with the ``settings`` object - that - has been updated. + A status of 204 will be given. This indicates that the request was processed + successfully, but that no response body is needed. :param database_cluster_uuid: A unique identifier for a database cluster. Required. :type database_cluster_uuid: str - :param username: The name of the database user. Required. - :type username: str - :param body: Required. - :type body: JSON - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: JSON object - :rtype: JSON + :param pool_name: The name used to identify the connection pool. Required. + :type pool_name: str + :return: JSON object or None + :rtype: JSON or None :raises ~azure.core.exceptions.HttpResponseError: Example: .. code-block:: python - # JSON input template you can fill out and use as your body input. - body = { - "settings": { - "acl": [ - { - "permission": "str", # Permission set applied to the - ACL. 'consume' allows for messages to be consumed from the topic. - 'produce' allows for messages to be published to the topic. - 'produceconsume' allows for both 'consume' and 'produce' permission. - 'admin' allows for 'produceconsume' as well as any operations to - administer the topic (delete, update). Required. Known values are: - "admin", "consume", "produce", and "produceconsume". - "topic": "str", # A regex for matching the topic(s) - that this ACL should apply to. Required. - "id": "str" # Optional. An identifier for the ACL. - Will be computed after the ACL is created/updated. - } - ], - "mongo_user_settings": { - "databases": [ - "str" # Optional. A list of databases to which the - user should have access. When the database is set to ``admin``"" , - the user will have access to all databases based on the user's role - i.e. a user with the role ``readOnly`` assigned to the ``admin`` - database will have read access to all databases. - ], - "role": "str" # Optional. The role to assign to the user - with each role mapping to a MongoDB built-in role. ``readOnly`` maps to - a `read - `_ - role. ``readWrite`` maps to a `readWrite - `_ - role. ``dbAdmin`` maps to a `dbAdmin - `_ - role. Known values are: "readOnly", "readWrite", and "dbAdmin". - }, - "opensearch_acl": [ - { - "index": "str", # Optional. A regex for matching the - indexes that this ACL should apply to. - "permission": "str" # Optional. Permission set - applied to the ACL. 'read' allows user to read from the index. - 'write' allows for user to write to the index. 'readwrite' allows for - both 'read' and 'write' permission. 'deny'(default) restricts user - from performing any operation over an index. 'admin' allows for - 'readwrite' as well as any operations to administer the index. Known - values are: "deny", "admin", "read", "readwrite", and "write". - } - ], - "pg_allow_replication": bool # Optional. For Postgres clusters, set - to ``true`` for a user with replication rights. This option is not currently - supported for other database engines. - } - } - - # response body for status code(s): 201 - response == { - "user": { - "name": "str", # The name of a database user. Required. - "access_cert": "str", # Optional. Access certificate for TLS client - authentication. (Kafka only). - "access_key": "str", # Optional. Access key for TLS client - authentication. (Kafka only). - "mysql_settings": { - "auth_plugin": "str" # A string specifying the - authentication method to be used for connections to the MySQL user - account. The valid values are ``mysql_native_password`` or - ``caching_sha2_password``. If excluded when creating a new user, the - default for the version of MySQL in use will be used. As of MySQL 8.0, - the default is ``caching_sha2_password``. Required. Known values are: - "mysql_native_password" and "caching_sha2_password". - }, - "password": "str", # Optional. A randomly generated password for the - database user.:code:`
`Requires ``database:view_credentials`` scope. - "role": "str", # Optional. A string representing the database user's - role. The value will be either "primary" or "normal". Known values are: - "primary" and "normal". - "settings": { - "acl": [ - { - "permission": "str", # Permission set - applied to the ACL. 'consume' allows for messages to be consumed - from the topic. 'produce' allows for messages to be published to - the topic. 'produceconsume' allows for both 'consume' and - 'produce' permission. 'admin' allows for 'produceconsume' as well - as any operations to administer the topic (delete, update). - Required. Known values are: "admin", "consume", "produce", and - "produceconsume". - "topic": "str", # A regex for matching the - topic(s) that this ACL should apply to. Required. - "id": "str" # Optional. An identifier for - the ACL. Will be computed after the ACL is created/updated. - } - ], - "mongo_user_settings": { - "databases": [ - "str" # Optional. A list of databases to - which the user should have access. When the database is set to - ``admin``"" , the user will have access to all databases based on - the user's role i.e. a user with the role ``readOnly`` assigned - to the ``admin`` database will have read access to all databases. - ], - "role": "str" # Optional. The role to assign to the - user with each role mapping to a MongoDB built-in role. ``readOnly`` - maps to a `read - `_ - role. ``readWrite`` maps to a `readWrite - `_ - role. ``dbAdmin`` maps to a `dbAdmin - `_ - role. Known values are: "readOnly", "readWrite", and "dbAdmin". - }, - "opensearch_acl": [ - { - "index": "str", # Optional. A regex for - matching the indexes that this ACL should apply to. - "permission": "str" # Optional. Permission - set applied to the ACL. 'read' allows user to read from the - index. 'write' allows for user to write to the index. 'readwrite' - allows for both 'read' and 'write' permission. 'deny'(default) - restricts user from performing any operation over an index. - 'admin' allows for 'readwrite' as well as any operations to - administer the index. Known values are: "deny", "admin", "read", - "readwrite", and "write". - } - ], - "pg_allow_replication": bool # Optional. For Postgres - clusters, set to ``true`` for a user with replication rights. This option - is not currently supported for other database engines. - } - } - } # response body for status code(s): 404 response == { "id": "str", # A short identifier corresponding to the HTTP status code @@ -112632,169 +120897,95 @@ async def update_user( tickets to help identify the issue. } """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + 401: cast( + Type[HttpResponseError], + lambda response: ClientAuthenticationError(response=response), + ), + 429: HttpResponseError, + 500: HttpResponseError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) - @overload - async def update_user( - self, - database_cluster_uuid: str, - username: str, - body: IO[bytes], - *, - content_type: str = "application/json", - **kwargs: Any - ) -> JSON: - # pylint: disable=line-too-long - """Update a Database User. + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} - To update an existing database user, send a PUT request to - ``/v2/databases/$DATABASE_ID/users/$USERNAME`` - with the desired settings. + cls: ClsType[Optional[JSON]] = kwargs.pop("cls", None) - **Note**\\ : only ``settings`` can be updated via this type of request. If you wish to change - the name of a user, - you must recreate a new user. + _request = build_databases_delete_connection_pool_request( + database_cluster_uuid=database_cluster_uuid, + pool_name=pool_name, + headers=_headers, + params=_params, + ) + _request.url = self._client.format_url(_request.url) - The response will be a JSON object with a key called ``user``. The value of this will be an - object that contains the name of the update database user, along with the ``settings`` object - that - has been updated. + _stream = False + pipeline_response: PipelineResponse = ( + await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + ) - :param database_cluster_uuid: A unique identifier for a database cluster. Required. - :type database_cluster_uuid: str - :param username: The name of the database user. Required. - :type username: str - :param body: Required. - :type body: IO[bytes] - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str - :return: JSON object - :rtype: JSON - :raises ~azure.core.exceptions.HttpResponseError: + response = pipeline_response.http_response - Example: - .. code-block:: python + if response.status_code not in [204, 404]: + if _stream: + await response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore + raise HttpResponseError(response=response) - # response body for status code(s): 201 - response == { - "user": { - "name": "str", # The name of a database user. Required. - "access_cert": "str", # Optional. Access certificate for TLS client - authentication. (Kafka only). - "access_key": "str", # Optional. Access key for TLS client - authentication. (Kafka only). - "mysql_settings": { - "auth_plugin": "str" # A string specifying the - authentication method to be used for connections to the MySQL user - account. The valid values are ``mysql_native_password`` or - ``caching_sha2_password``. If excluded when creating a new user, the - default for the version of MySQL in use will be used. As of MySQL 8.0, - the default is ``caching_sha2_password``. Required. Known values are: - "mysql_native_password" and "caching_sha2_password". - }, - "password": "str", # Optional. A randomly generated password for the - database user.:code:`
`Requires ``database:view_credentials`` scope. - "role": "str", # Optional. A string representing the database user's - role. The value will be either "primary" or "normal". Known values are: - "primary" and "normal". - "settings": { - "acl": [ - { - "permission": "str", # Permission set - applied to the ACL. 'consume' allows for messages to be consumed - from the topic. 'produce' allows for messages to be published to - the topic. 'produceconsume' allows for both 'consume' and - 'produce' permission. 'admin' allows for 'produceconsume' as well - as any operations to administer the topic (delete, update). - Required. Known values are: "admin", "consume", "produce", and - "produceconsume". - "topic": "str", # A regex for matching the - topic(s) that this ACL should apply to. Required. - "id": "str" # Optional. An identifier for - the ACL. Will be computed after the ACL is created/updated. - } - ], - "mongo_user_settings": { - "databases": [ - "str" # Optional. A list of databases to - which the user should have access. When the database is set to - ``admin``"" , the user will have access to all databases based on - the user's role i.e. a user with the role ``readOnly`` assigned - to the ``admin`` database will have read access to all databases. - ], - "role": "str" # Optional. The role to assign to the - user with each role mapping to a MongoDB built-in role. ``readOnly`` - maps to a `read - `_ - role. ``readWrite`` maps to a `readWrite - `_ - role. ``dbAdmin`` maps to a `dbAdmin - `_ - role. Known values are: "readOnly", "readWrite", and "dbAdmin". - }, - "opensearch_acl": [ - { - "index": "str", # Optional. A regex for - matching the indexes that this ACL should apply to. - "permission": "str" # Optional. Permission - set applied to the ACL. 'read' allows user to read from the - index. 'write' allows for user to write to the index. 'readwrite' - allows for both 'read' and 'write' permission. 'deny'(default) - restricts user from performing any operation over an index. - 'admin' allows for 'readwrite' as well as any operations to - administer the index. Known values are: "deny", "admin", "read", - "readwrite", and "write". - } - ], - "pg_allow_replication": bool # Optional. For Postgres - clusters, set to ``true`` for a user with replication rights. This option - is not currently supported for other database engines. - } - } - } - # response body for status code(s): 404 - response == { - "id": "str", # A short identifier corresponding to the HTTP status code - returned. For example, the ID for a response returning a 404 status code would - be "not_found.". Required. - "message": "str", # A message providing additional information about the - error, including details to help resolve it when possible. Required. - "request_id": "str" # Optional. Optionally, some endpoints may include a - request ID that should be provided when reporting bugs or opening support - tickets to help identify the issue. - } - """ + deserialized = None + response_headers = {} + if response.status_code == 204: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.status_code == 404: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.content: + deserialized = response.json() + else: + deserialized = None + + if cls: + return cls(pipeline_response, deserialized, response_headers) # type: ignore + + return deserialized # type: ignore @distributed_trace_async - async def update_user( - self, - database_cluster_uuid: str, - username: str, - body: Union[JSON, IO[bytes]], - **kwargs: Any + async def get_eviction_policy( + self, database_cluster_uuid: str, **kwargs: Any ) -> JSON: # pylint: disable=line-too-long - """Update a Database User. - - To update an existing database user, send a PUT request to - ``/v2/databases/$DATABASE_ID/users/$USERNAME`` - with the desired settings. - - **Note**\\ : only ``settings`` can be updated via this type of request. If you wish to change - the name of a user, - you must recreate a new user. + """Retrieve the Eviction Policy for a Caching or Valkey Cluster. - The response will be a JSON object with a key called ``user``. The value of this will be an - object that contains the name of the update database user, along with the ``settings`` object - that - has been updated. + To retrieve the configured eviction policy for an existing Caching or Valkey cluster, send a + GET request to ``/v2/databases/$DATABASE_ID/eviction_policy``. + The response will be a JSON object with an ``eviction_policy`` key. This will be set to a + string representing the eviction policy. :param database_cluster_uuid: A unique identifier for a database cluster. Required. :type database_cluster_uuid: str - :param username: The name of the database user. Required. - :type username: str - :param body: Is either a JSON type or a IO[bytes] type. Required. - :type body: JSON or IO[bytes] :return: JSON object :rtype: JSON :raises ~azure.core.exceptions.HttpResponseError: @@ -112802,137 +120993,18 @@ async def update_user( Example: .. code-block:: python - # JSON input template you can fill out and use as your body input. - body = { - "settings": { - "acl": [ - { - "permission": "str", # Permission set applied to the - ACL. 'consume' allows for messages to be consumed from the topic. - 'produce' allows for messages to be published to the topic. - 'produceconsume' allows for both 'consume' and 'produce' permission. - 'admin' allows for 'produceconsume' as well as any operations to - administer the topic (delete, update). Required. Known values are: - "admin", "consume", "produce", and "produceconsume". - "topic": "str", # A regex for matching the topic(s) - that this ACL should apply to. Required. - "id": "str" # Optional. An identifier for the ACL. - Will be computed after the ACL is created/updated. - } - ], - "mongo_user_settings": { - "databases": [ - "str" # Optional. A list of databases to which the - user should have access. When the database is set to ``admin``"" , - the user will have access to all databases based on the user's role - i.e. a user with the role ``readOnly`` assigned to the ``admin`` - database will have read access to all databases. - ], - "role": "str" # Optional. The role to assign to the user - with each role mapping to a MongoDB built-in role. ``readOnly`` maps to - a `read - `_ - role. ``readWrite`` maps to a `readWrite - `_ - role. ``dbAdmin`` maps to a `dbAdmin - `_ - role. Known values are: "readOnly", "readWrite", and "dbAdmin". - }, - "opensearch_acl": [ - { - "index": "str", # Optional. A regex for matching the - indexes that this ACL should apply to. - "permission": "str" # Optional. Permission set - applied to the ACL. 'read' allows user to read from the index. - 'write' allows for user to write to the index. 'readwrite' allows for - both 'read' and 'write' permission. 'deny'(default) restricts user - from performing any operation over an index. 'admin' allows for - 'readwrite' as well as any operations to administer the index. Known - values are: "deny", "admin", "read", "readwrite", and "write". - } - ], - "pg_allow_replication": bool # Optional. For Postgres clusters, set - to ``true`` for a user with replication rights. This option is not currently - supported for other database engines. - } - } - - # response body for status code(s): 201 + # response body for status code(s): 200 response == { - "user": { - "name": "str", # The name of a database user. Required. - "access_cert": "str", # Optional. Access certificate for TLS client - authentication. (Kafka only). - "access_key": "str", # Optional. Access key for TLS client - authentication. (Kafka only). - "mysql_settings": { - "auth_plugin": "str" # A string specifying the - authentication method to be used for connections to the MySQL user - account. The valid values are ``mysql_native_password`` or - ``caching_sha2_password``. If excluded when creating a new user, the - default for the version of MySQL in use will be used. As of MySQL 8.0, - the default is ``caching_sha2_password``. Required. Known values are: - "mysql_native_password" and "caching_sha2_password". - }, - "password": "str", # Optional. A randomly generated password for the - database user.:code:`
`Requires ``database:view_credentials`` scope. - "role": "str", # Optional. A string representing the database user's - role. The value will be either "primary" or "normal". Known values are: - "primary" and "normal". - "settings": { - "acl": [ - { - "permission": "str", # Permission set - applied to the ACL. 'consume' allows for messages to be consumed - from the topic. 'produce' allows for messages to be published to - the topic. 'produceconsume' allows for both 'consume' and - 'produce' permission. 'admin' allows for 'produceconsume' as well - as any operations to administer the topic (delete, update). - Required. Known values are: "admin", "consume", "produce", and - "produceconsume". - "topic": "str", # A regex for matching the - topic(s) that this ACL should apply to. Required. - "id": "str" # Optional. An identifier for - the ACL. Will be computed after the ACL is created/updated. - } - ], - "mongo_user_settings": { - "databases": [ - "str" # Optional. A list of databases to - which the user should have access. When the database is set to - ``admin``"" , the user will have access to all databases based on - the user's role i.e. a user with the role ``readOnly`` assigned - to the ``admin`` database will have read access to all databases. - ], - "role": "str" # Optional. The role to assign to the - user with each role mapping to a MongoDB built-in role. ``readOnly`` - maps to a `read - `_ - role. ``readWrite`` maps to a `readWrite - `_ - role. ``dbAdmin`` maps to a `dbAdmin - `_ - role. Known values are: "readOnly", "readWrite", and "dbAdmin". - }, - "opensearch_acl": [ - { - "index": "str", # Optional. A regex for - matching the indexes that this ACL should apply to. - "permission": "str" # Optional. Permission - set applied to the ACL. 'read' allows user to read from the - index. 'write' allows for user to write to the index. 'readwrite' - allows for both 'read' and 'write' permission. 'deny'(default) - restricts user from performing any operation over an index. - 'admin' allows for 'readwrite' as well as any operations to - administer the index. Known values are: "deny", "admin", "read", - "readwrite", and "write". - } - ], - "pg_allow_replication": bool # Optional. For Postgres - clusters, set to ``true`` for a user with replication rights. This option - is not currently supported for other database engines. - } - } + "eviction_policy": "str" # A string specifying the desired eviction policy + for a Caching or Valkey cluster. * ``noeviction``"" : Don't evict any data, + returns error when memory limit is reached. * ``allkeys_lru:`` Evict any key, + least recently used (LRU) first. * ``allkeys_random``"" : Evict keys in a random + order. * ``volatile_lru``"" : Evict keys with expiration only, least recently + used (LRU) first. * ``volatile_random``"" : Evict keys with expiration only in a + random order. * ``volatile_ttl``"" : Evict keys with expiration only, shortest + time-to-live (TTL) first. Required. Known values are: "noeviction", + "allkeys_lru", "allkeys_random", "volatile_lru", "volatile_random", and + "volatile_ttl". } # response body for status code(s): 404 response == { @@ -112959,28 +121031,13 @@ async def update_user( } error_map.update(kwargs.pop("error_map", {}) or {}) - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _headers = kwargs.pop("headers", {}) or {} _params = kwargs.pop("params", {}) or {} - content_type: Optional[str] = kwargs.pop( - "content_type", _headers.pop("Content-Type", None) - ) cls: ClsType[JSON] = kwargs.pop("cls", None) - content_type = content_type or "application/json" - _json = None - _content = None - if isinstance(body, (IOBase, bytes)): - _content = body - else: - _json = body - - _request = build_databases_update_user_request( + _request = build_databases_get_eviction_policy_request( database_cluster_uuid=database_cluster_uuid, - username=username, - content_type=content_type, - json=_json, - content=_content, headers=_headers, params=_params, ) @@ -112995,14 +121052,14 @@ async def update_user( response = pipeline_response.http_response - if response.status_code not in [201, 404]: + if response.status_code not in [200, 404]: if _stream: await response.read() # Load the body in memory and close the socket map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore raise HttpResponseError(response=response) response_headers = {} - if response.status_code == 201: + if response.status_code == 200: response_headers["ratelimit-limit"] = self._deserialize( "int", response.headers.get("ratelimit-limit") ) @@ -113040,39 +121097,29 @@ async def update_user( return cast(JSON, deserialized) # type: ignore @overload - async def reset_auth( + async def update_eviction_policy( self, database_cluster_uuid: str, - username: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any - ) -> JSON: + ) -> Optional[JSON]: # pylint: disable=line-too-long - """Reset a Database User's Password or Authentication Method. - - To reset the password for a database user, send a POST request to - ``/v2/databases/$DATABASE_ID/users/$USERNAME/reset_auth``. - - For ``mysql`` databases, the authentication method can be specifying by - including a key in the JSON body called ``mysql_settings`` with the ``auth_plugin`` - value specified. + """Configure the Eviction Policy for a Caching or Valkey Cluster. - The response will be a JSON object with a ``user`` key. This will be set to an - object containing the standard database user attributes. + To configure an eviction policy for an existing Caching or Valkey cluster, send a PUT request + to ``/v2/databases/$DATABASE_ID/eviction_policy`` specifying the desired policy. :param database_cluster_uuid: A unique identifier for a database cluster. Required. :type database_cluster_uuid: str - :param username: The name of the database user. Required. - :type username: str :param body: Required. :type body: JSON :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str - :return: JSON object - :rtype: JSON + :return: JSON object or None + :rtype: JSON or None :raises ~azure.core.exceptions.HttpResponseError: Example: @@ -113080,93 +121127,18 @@ async def reset_auth( # JSON input template you can fill out and use as your body input. body = { - "mysql_settings": { - "auth_plugin": "str" # A string specifying the authentication method - to be used for connections to the MySQL user account. The valid values are - ``mysql_native_password`` or ``caching_sha2_password``. If excluded when - creating a new user, the default for the version of MySQL in use will be - used. As of MySQL 8.0, the default is ``caching_sha2_password``. Required. - Known values are: "mysql_native_password" and "caching_sha2_password". - } + "eviction_policy": "str" # A string specifying the desired eviction policy + for a Caching or Valkey cluster. * ``noeviction``"" : Don't evict any data, + returns error when memory limit is reached. * ``allkeys_lru:`` Evict any key, + least recently used (LRU) first. * ``allkeys_random``"" : Evict keys in a random + order. * ``volatile_lru``"" : Evict keys with expiration only, least recently + used (LRU) first. * ``volatile_random``"" : Evict keys with expiration only in a + random order. * ``volatile_ttl``"" : Evict keys with expiration only, shortest + time-to-live (TTL) first. Required. Known values are: "noeviction", + "allkeys_lru", "allkeys_random", "volatile_lru", "volatile_random", and + "volatile_ttl". } - # response body for status code(s): 200 - response == { - "user": { - "name": "str", # The name of a database user. Required. - "access_cert": "str", # Optional. Access certificate for TLS client - authentication. (Kafka only). - "access_key": "str", # Optional. Access key for TLS client - authentication. (Kafka only). - "mysql_settings": { - "auth_plugin": "str" # A string specifying the - authentication method to be used for connections to the MySQL user - account. The valid values are ``mysql_native_password`` or - ``caching_sha2_password``. If excluded when creating a new user, the - default for the version of MySQL in use will be used. As of MySQL 8.0, - the default is ``caching_sha2_password``. Required. Known values are: - "mysql_native_password" and "caching_sha2_password". - }, - "password": "str", # Optional. A randomly generated password for the - database user.:code:`
`Requires ``database:view_credentials`` scope. - "role": "str", # Optional. A string representing the database user's - role. The value will be either "primary" or "normal". Known values are: - "primary" and "normal". - "settings": { - "acl": [ - { - "permission": "str", # Permission set - applied to the ACL. 'consume' allows for messages to be consumed - from the topic. 'produce' allows for messages to be published to - the topic. 'produceconsume' allows for both 'consume' and - 'produce' permission. 'admin' allows for 'produceconsume' as well - as any operations to administer the topic (delete, update). - Required. Known values are: "admin", "consume", "produce", and - "produceconsume". - "topic": "str", # A regex for matching the - topic(s) that this ACL should apply to. Required. - "id": "str" # Optional. An identifier for - the ACL. Will be computed after the ACL is created/updated. - } - ], - "mongo_user_settings": { - "databases": [ - "str" # Optional. A list of databases to - which the user should have access. When the database is set to - ``admin``"" , the user will have access to all databases based on - the user's role i.e. a user with the role ``readOnly`` assigned - to the ``admin`` database will have read access to all databases. - ], - "role": "str" # Optional. The role to assign to the - user with each role mapping to a MongoDB built-in role. ``readOnly`` - maps to a `read - `_ - role. ``readWrite`` maps to a `readWrite - `_ - role. ``dbAdmin`` maps to a `dbAdmin - `_ - role. Known values are: "readOnly", "readWrite", and "dbAdmin". - }, - "opensearch_acl": [ - { - "index": "str", # Optional. A regex for - matching the indexes that this ACL should apply to. - "permission": "str" # Optional. Permission - set applied to the ACL. 'read' allows user to read from the - index. 'write' allows for user to write to the index. 'readwrite' - allows for both 'read' and 'write' permission. 'deny'(default) - restricts user from performing any operation over an index. - 'admin' allows for 'readwrite' as well as any operations to - administer the index. Known values are: "deny", "admin", "read", - "readwrite", and "write". - } - ], - "pg_allow_replication": bool # Optional. For Postgres - clusters, set to ``true`` for a user with replication rights. This option - is not currently supported for other database engines. - } - } - } # response body for status code(s): 404 response == { "id": "str", # A short identifier corresponding to the HTTP status code @@ -113181,121 +121153,34 @@ async def reset_auth( """ @overload - async def reset_auth( + async def update_eviction_policy( self, database_cluster_uuid: str, - username: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any - ) -> JSON: + ) -> Optional[JSON]: # pylint: disable=line-too-long - """Reset a Database User's Password or Authentication Method. - - To reset the password for a database user, send a POST request to - ``/v2/databases/$DATABASE_ID/users/$USERNAME/reset_auth``. - - For ``mysql`` databases, the authentication method can be specifying by - including a key in the JSON body called ``mysql_settings`` with the ``auth_plugin`` - value specified. + """Configure the Eviction Policy for a Caching or Valkey Cluster. - The response will be a JSON object with a ``user`` key. This will be set to an - object containing the standard database user attributes. + To configure an eviction policy for an existing Caching or Valkey cluster, send a PUT request + to ``/v2/databases/$DATABASE_ID/eviction_policy`` specifying the desired policy. :param database_cluster_uuid: A unique identifier for a database cluster. Required. :type database_cluster_uuid: str - :param username: The name of the database user. Required. - :type username: str :param body: Required. :type body: IO[bytes] :keyword content_type: Body Parameter content-type. Content type parameter for binary body. Default value is "application/json". :paramtype content_type: str - :return: JSON object - :rtype: JSON + :return: JSON object or None + :rtype: JSON or None :raises ~azure.core.exceptions.HttpResponseError: Example: .. code-block:: python - # response body for status code(s): 200 - response == { - "user": { - "name": "str", # The name of a database user. Required. - "access_cert": "str", # Optional. Access certificate for TLS client - authentication. (Kafka only). - "access_key": "str", # Optional. Access key for TLS client - authentication. (Kafka only). - "mysql_settings": { - "auth_plugin": "str" # A string specifying the - authentication method to be used for connections to the MySQL user - account. The valid values are ``mysql_native_password`` or - ``caching_sha2_password``. If excluded when creating a new user, the - default for the version of MySQL in use will be used. As of MySQL 8.0, - the default is ``caching_sha2_password``. Required. Known values are: - "mysql_native_password" and "caching_sha2_password". - }, - "password": "str", # Optional. A randomly generated password for the - database user.:code:`
`Requires ``database:view_credentials`` scope. - "role": "str", # Optional. A string representing the database user's - role. The value will be either "primary" or "normal". Known values are: - "primary" and "normal". - "settings": { - "acl": [ - { - "permission": "str", # Permission set - applied to the ACL. 'consume' allows for messages to be consumed - from the topic. 'produce' allows for messages to be published to - the topic. 'produceconsume' allows for both 'consume' and - 'produce' permission. 'admin' allows for 'produceconsume' as well - as any operations to administer the topic (delete, update). - Required. Known values are: "admin", "consume", "produce", and - "produceconsume". - "topic": "str", # A regex for matching the - topic(s) that this ACL should apply to. Required. - "id": "str" # Optional. An identifier for - the ACL. Will be computed after the ACL is created/updated. - } - ], - "mongo_user_settings": { - "databases": [ - "str" # Optional. A list of databases to - which the user should have access. When the database is set to - ``admin``"" , the user will have access to all databases based on - the user's role i.e. a user with the role ``readOnly`` assigned - to the ``admin`` database will have read access to all databases. - ], - "role": "str" # Optional. The role to assign to the - user with each role mapping to a MongoDB built-in role. ``readOnly`` - maps to a `read - `_ - role. ``readWrite`` maps to a `readWrite - `_ - role. ``dbAdmin`` maps to a `dbAdmin - `_ - role. Known values are: "readOnly", "readWrite", and "dbAdmin". - }, - "opensearch_acl": [ - { - "index": "str", # Optional. A regex for - matching the indexes that this ACL should apply to. - "permission": "str" # Optional. Permission - set applied to the ACL. 'read' allows user to read from the - index. 'write' allows for user to write to the index. 'readwrite' - allows for both 'read' and 'write' permission. 'deny'(default) - restricts user from performing any operation over an index. - 'admin' allows for 'readwrite' as well as any operations to - administer the index. Known values are: "deny", "admin", "read", - "readwrite", and "write". - } - ], - "pg_allow_replication": bool # Optional. For Postgres - clusters, set to ``true`` for a user with replication rights. This option - is not currently supported for other database engines. - } - } - } # response body for status code(s): 404 response == { "id": "str", # A short identifier corresponding to the HTTP status code @@ -113310,128 +121195,40 @@ async def reset_auth( """ @distributed_trace_async - async def reset_auth( - self, - database_cluster_uuid: str, - username: str, - body: Union[JSON, IO[bytes]], - **kwargs: Any - ) -> JSON: + async def update_eviction_policy( + self, database_cluster_uuid: str, body: Union[JSON, IO[bytes]], **kwargs: Any + ) -> Optional[JSON]: # pylint: disable=line-too-long - """Reset a Database User's Password or Authentication Method. - - To reset the password for a database user, send a POST request to - ``/v2/databases/$DATABASE_ID/users/$USERNAME/reset_auth``. - - For ``mysql`` databases, the authentication method can be specifying by - including a key in the JSON body called ``mysql_settings`` with the ``auth_plugin`` - value specified. + """Configure the Eviction Policy for a Caching or Valkey Cluster. - The response will be a JSON object with a ``user`` key. This will be set to an - object containing the standard database user attributes. + To configure an eviction policy for an existing Caching or Valkey cluster, send a PUT request + to ``/v2/databases/$DATABASE_ID/eviction_policy`` specifying the desired policy. :param database_cluster_uuid: A unique identifier for a database cluster. Required. :type database_cluster_uuid: str - :param username: The name of the database user. Required. - :type username: str :param body: Is either a JSON type or a IO[bytes] type. Required. :type body: JSON or IO[bytes] - :return: JSON object - :rtype: JSON + :return: JSON object or None + :rtype: JSON or None :raises ~azure.core.exceptions.HttpResponseError: Example: .. code-block:: python - # JSON input template you can fill out and use as your body input. - body = { - "mysql_settings": { - "auth_plugin": "str" # A string specifying the authentication method - to be used for connections to the MySQL user account. The valid values are - ``mysql_native_password`` or ``caching_sha2_password``. If excluded when - creating a new user, the default for the version of MySQL in use will be - used. As of MySQL 8.0, the default is ``caching_sha2_password``. Required. - Known values are: "mysql_native_password" and "caching_sha2_password". - } - } - - # response body for status code(s): 200 - response == { - "user": { - "name": "str", # The name of a database user. Required. - "access_cert": "str", # Optional. Access certificate for TLS client - authentication. (Kafka only). - "access_key": "str", # Optional. Access key for TLS client - authentication. (Kafka only). - "mysql_settings": { - "auth_plugin": "str" # A string specifying the - authentication method to be used for connections to the MySQL user - account. The valid values are ``mysql_native_password`` or - ``caching_sha2_password``. If excluded when creating a new user, the - default for the version of MySQL in use will be used. As of MySQL 8.0, - the default is ``caching_sha2_password``. Required. Known values are: - "mysql_native_password" and "caching_sha2_password". - }, - "password": "str", # Optional. A randomly generated password for the - database user.:code:`
`Requires ``database:view_credentials`` scope. - "role": "str", # Optional. A string representing the database user's - role. The value will be either "primary" or "normal". Known values are: - "primary" and "normal". - "settings": { - "acl": [ - { - "permission": "str", # Permission set - applied to the ACL. 'consume' allows for messages to be consumed - from the topic. 'produce' allows for messages to be published to - the topic. 'produceconsume' allows for both 'consume' and - 'produce' permission. 'admin' allows for 'produceconsume' as well - as any operations to administer the topic (delete, update). - Required. Known values are: "admin", "consume", "produce", and - "produceconsume". - "topic": "str", # A regex for matching the - topic(s) that this ACL should apply to. Required. - "id": "str" # Optional. An identifier for - the ACL. Will be computed after the ACL is created/updated. - } - ], - "mongo_user_settings": { - "databases": [ - "str" # Optional. A list of databases to - which the user should have access. When the database is set to - ``admin``"" , the user will have access to all databases based on - the user's role i.e. a user with the role ``readOnly`` assigned - to the ``admin`` database will have read access to all databases. - ], - "role": "str" # Optional. The role to assign to the - user with each role mapping to a MongoDB built-in role. ``readOnly`` - maps to a `read - `_ - role. ``readWrite`` maps to a `readWrite - `_ - role. ``dbAdmin`` maps to a `dbAdmin - `_ - role. Known values are: "readOnly", "readWrite", and "dbAdmin". - }, - "opensearch_acl": [ - { - "index": "str", # Optional. A regex for - matching the indexes that this ACL should apply to. - "permission": "str" # Optional. Permission - set applied to the ACL. 'read' allows user to read from the - index. 'write' allows for user to write to the index. 'readwrite' - allows for both 'read' and 'write' permission. 'deny'(default) - restricts user from performing any operation over an index. - 'admin' allows for 'readwrite' as well as any operations to - administer the index. Known values are: "deny", "admin", "read", - "readwrite", and "write". - } - ], - "pg_allow_replication": bool # Optional. For Postgres - clusters, set to ``true`` for a user with replication rights. This option - is not currently supported for other database engines. - } - } + # JSON input template you can fill out and use as your body input. + body = { + "eviction_policy": "str" # A string specifying the desired eviction policy + for a Caching or Valkey cluster. * ``noeviction``"" : Don't evict any data, + returns error when memory limit is reached. * ``allkeys_lru:`` Evict any key, + least recently used (LRU) first. * ``allkeys_random``"" : Evict keys in a random + order. * ``volatile_lru``"" : Evict keys with expiration only, least recently + used (LRU) first. * ``volatile_random``"" : Evict keys with expiration only in a + random order. * ``volatile_ttl``"" : Evict keys with expiration only, shortest + time-to-live (TTL) first. Required. Known values are: "noeviction", + "allkeys_lru", "allkeys_random", "volatile_lru", "volatile_random", and + "volatile_ttl". } + # response body for status code(s): 404 response == { "id": "str", # A short identifier corresponding to the HTTP status code @@ -113463,7 +121260,7 @@ async def reset_auth( content_type: Optional[str] = kwargs.pop( "content_type", _headers.pop("Content-Type", None) ) - cls: ClsType[JSON] = kwargs.pop("cls", None) + cls: ClsType[Optional[JSON]] = kwargs.pop("cls", None) content_type = content_type or "application/json" _json = None @@ -113473,9 +121270,8 @@ async def reset_auth( else: _json = body - _request = build_databases_reset_auth_request( + _request = build_databases_update_eviction_policy_request( database_cluster_uuid=database_cluster_uuid, - username=username, content_type=content_type, json=_json, content=_content, @@ -113493,14 +121289,15 @@ async def reset_auth( response = pipeline_response.http_response - if response.status_code not in [200, 404]: + if response.status_code not in [204, 404]: if _stream: await response.read() # Load the body in memory and close the socket map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore raise HttpResponseError(response=response) + deserialized = None response_headers = {} - if response.status_code == 200: + if response.status_code == 204: response_headers["ratelimit-limit"] = self._deserialize( "int", response.headers.get("ratelimit-limit") ) @@ -113511,11 +121308,6 @@ async def reset_auth( "int", response.headers.get("ratelimit-reset") ) - if response.content: - deserialized = response.json() - else: - deserialized = None - if response.status_code == 404: response_headers["ratelimit-limit"] = self._deserialize( "int", response.headers.get("ratelimit-limit") @@ -113533,22 +121325,19 @@ async def reset_auth( deserialized = None if cls: - return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore + return cls(pipeline_response, deserialized, response_headers) # type: ignore - return cast(JSON, deserialized) # type: ignore + return deserialized # type: ignore @distributed_trace_async - async def list(self, database_cluster_uuid: str, **kwargs: Any) -> JSON: + async def get_sql_mode(self, database_cluster_uuid: str, **kwargs: Any) -> JSON: # pylint: disable=line-too-long - """List All Databases. - - To list all of the databases in a clusters, send a GET request to - ``/v2/databases/$DATABASE_ID/dbs``. - - The result will be a JSON object with a ``dbs`` key. This will be set to an array - of database objects, each of which will contain the standard database attributes. + """Retrieve the SQL Modes for a MySQL Cluster. - Note: Database management is not supported for Caching or Valkey clusters. + To retrieve the configured SQL modes for an existing MySQL cluster, send a GET request to + ``/v2/databases/$DATABASE_ID/sql_mode``. + The response will be a JSON object with a ``sql_mode`` key. This will be set to a string + representing the configured SQL modes. :param database_cluster_uuid: A unique identifier for a database cluster. Required. :type database_cluster_uuid: str @@ -113561,11 +121350,8 @@ async def list(self, database_cluster_uuid: str, **kwargs: Any) -> JSON: # response body for status code(s): 200 response == { - "dbs": [ - { - "name": "str" # The name of the database. Required. - } - ] + "sql_mode": "str" # A string specifying the configured SQL modes for the + MySQL cluster. Required. } # response body for status code(s): 404 response == { @@ -113597,7 +121383,7 @@ async def list(self, database_cluster_uuid: str, **kwargs: Any) -> JSON: cls: ClsType[JSON] = kwargs.pop("cls", None) - _request = build_databases_list_request( + _request = build_databases_get_sql_mode_request( database_cluster_uuid=database_cluster_uuid, headers=_headers, params=_params, @@ -113658,24 +121444,22 @@ async def list(self, database_cluster_uuid: str, **kwargs: Any) -> JSON: return cast(JSON, deserialized) # type: ignore @overload - async def add( + async def update_sql_mode( self, database_cluster_uuid: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any - ) -> JSON: + ) -> Optional[JSON]: # pylint: disable=line-too-long - """Add a New Database. - - To add a new database to an existing cluster, send a POST request to - ``/v2/databases/$DATABASE_ID/dbs``. - - Note: Database management is not supported for Caching or Valkey clusters. + """Update SQL Mode for a Cluster. - The response will be a JSON object with a key called ``db``. The value of this will be - an object that contains the standard attributes associated with a database. + To configure the SQL modes for an existing MySQL cluster, send a PUT request to + ``/v2/databases/$DATABASE_ID/sql_mode`` specifying the desired modes. See the official MySQL 8 + documentation for a `full list of supported SQL modes + `_. + A successful request will receive a 204 No Content status code with no body in response. :param database_cluster_uuid: A unique identifier for a database cluster. Required. :type database_cluster_uuid: str @@ -113684,8 +121468,8 @@ async def add( :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str - :return: JSON object - :rtype: JSON + :return: JSON object or None + :rtype: JSON or None :raises ~azure.core.exceptions.HttpResponseError: Example: @@ -113693,15 +121477,10 @@ async def add( # JSON input template you can fill out and use as your body input. body = { - "name": "str" # The name of the database. Required. + "sql_mode": "str" # A string specifying the configured SQL modes for the + MySQL cluster. Required. } - # response body for status code(s): 201 - response == { - "db": { - "name": "str" # The name of the database. Required. - } - } # response body for status code(s): 404 response == { "id": "str", # A short identifier corresponding to the HTTP status code @@ -113716,24 +121495,22 @@ async def add( """ @overload - async def add( + async def update_sql_mode( self, database_cluster_uuid: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any - ) -> JSON: + ) -> Optional[JSON]: # pylint: disable=line-too-long - """Add a New Database. - - To add a new database to an existing cluster, send a POST request to - ``/v2/databases/$DATABASE_ID/dbs``. - - Note: Database management is not supported for Caching or Valkey clusters. + """Update SQL Mode for a Cluster. - The response will be a JSON object with a key called ``db``. The value of this will be - an object that contains the standard attributes associated with a database. + To configure the SQL modes for an existing MySQL cluster, send a PUT request to + ``/v2/databases/$DATABASE_ID/sql_mode`` specifying the desired modes. See the official MySQL 8 + documentation for a `full list of supported SQL modes + `_. + A successful request will receive a 204 No Content status code with no body in response. :param database_cluster_uuid: A unique identifier for a database cluster. Required. :type database_cluster_uuid: str @@ -113742,19 +121519,13 @@ async def add( :keyword content_type: Body Parameter content-type. Content type parameter for binary body. Default value is "application/json". :paramtype content_type: str - :return: JSON object - :rtype: JSON + :return: JSON object or None + :rtype: JSON or None :raises ~azure.core.exceptions.HttpResponseError: Example: .. code-block:: python - # response body for status code(s): 201 - response == { - "db": { - "name": "str" # The name of the database. Required. - } - } # response body for status code(s): 404 response == { "id": "str", # A short identifier corresponding to the HTTP status code @@ -113769,26 +121540,24 @@ async def add( """ @distributed_trace_async - async def add( + async def update_sql_mode( self, database_cluster_uuid: str, body: Union[JSON, IO[bytes]], **kwargs: Any - ) -> JSON: + ) -> Optional[JSON]: # pylint: disable=line-too-long - """Add a New Database. - - To add a new database to an existing cluster, send a POST request to - ``/v2/databases/$DATABASE_ID/dbs``. - - Note: Database management is not supported for Caching or Valkey clusters. + """Update SQL Mode for a Cluster. - The response will be a JSON object with a key called ``db``. The value of this will be - an object that contains the standard attributes associated with a database. + To configure the SQL modes for an existing MySQL cluster, send a PUT request to + ``/v2/databases/$DATABASE_ID/sql_mode`` specifying the desired modes. See the official MySQL 8 + documentation for a `full list of supported SQL modes + `_. + A successful request will receive a 204 No Content status code with no body in response. :param database_cluster_uuid: A unique identifier for a database cluster. Required. :type database_cluster_uuid: str :param body: Is either a JSON type or a IO[bytes] type. Required. :type body: JSON or IO[bytes] - :return: JSON object - :rtype: JSON + :return: JSON object or None + :rtype: JSON or None :raises ~azure.core.exceptions.HttpResponseError: Example: @@ -113796,15 +121565,10 @@ async def add( # JSON input template you can fill out and use as your body input. body = { - "name": "str" # The name of the database. Required. + "sql_mode": "str" # A string specifying the configured SQL modes for the + MySQL cluster. Required. } - # response body for status code(s): 201 - response == { - "db": { - "name": "str" # The name of the database. Required. - } - } # response body for status code(s): 404 response == { "id": "str", # A short identifier corresponding to the HTTP status code @@ -113836,7 +121600,7 @@ async def add( content_type: Optional[str] = kwargs.pop( "content_type", _headers.pop("Content-Type", None) ) - cls: ClsType[JSON] = kwargs.pop("cls", None) + cls: ClsType[Optional[JSON]] = kwargs.pop("cls", None) content_type = content_type or "application/json" _json = None @@ -113846,7 +121610,7 @@ async def add( else: _json = body - _request = build_databases_add_request( + _request = build_databases_update_sql_mode_request( database_cluster_uuid=database_cluster_uuid, content_type=content_type, json=_json, @@ -113865,14 +121629,15 @@ async def add( response = pipeline_response.http_response - if response.status_code not in [201, 404]: + if response.status_code not in [204, 404]: if _stream: await response.read() # Load the body in memory and close the socket map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore raise HttpResponseError(response=response) + deserialized = None response_headers = {} - if response.status_code == 201: + if response.status_code == 204: response_headers["ratelimit-limit"] = self._deserialize( "int", response.headers.get("ratelimit-limit") ) @@ -113883,11 +121648,6 @@ async def add( "int", response.headers.get("ratelimit-reset") ) - if response.content: - deserialized = response.json() - else: - deserialized = None - if response.status_code == 404: response_headers["ratelimit-limit"] = self._deserialize( "int", response.headers.get("ratelimit-limit") @@ -113905,42 +121665,46 @@ async def add( deserialized = None if cls: - return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore + return cls(pipeline_response, deserialized, response_headers) # type: ignore - return cast(JSON, deserialized) # type: ignore + return deserialized # type: ignore - @distributed_trace_async - async def get( - self, database_cluster_uuid: str, database_name: str, **kwargs: Any - ) -> JSON: + @overload + async def update_major_version( + self, + database_cluster_uuid: str, + body: JSON, + *, + content_type: str = "application/json", + **kwargs: Any + ) -> Optional[JSON]: # pylint: disable=line-too-long - """Retrieve an Existing Database. - - To show information about an existing database cluster, send a GET request to - ``/v2/databases/$DATABASE_ID/dbs/$DB_NAME``. - - Note: Database management is not supported for Caching or Valkey clusters. + """Upgrade Major Version for a Database. - The response will be a JSON object with a ``db`` key. This will be set to an object - containing the standard database attributes. + To upgrade the major version of a database, send a PUT request to + ``/v2/databases/$DATABASE_ID/upgrade``\\ , specifying the target version. + A successful request will receive a 204 No Content status code with no body in response. :param database_cluster_uuid: A unique identifier for a database cluster. Required. :type database_cluster_uuid: str - :param database_name: The name of the database. Required. - :type database_name: str - :return: JSON object - :rtype: JSON + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: JSON object or None + :rtype: JSON or None :raises ~azure.core.exceptions.HttpResponseError: Example: .. code-block:: python - # response body for status code(s): 200 - response == { - "db": { - "name": "str" # The name of the database. Required. - } + # JSON input template you can fill out and use as your body input. + body = { + "version": "str" # Optional. A string representing the version of the + database engine in use for the cluster. } + # response body for status code(s): 404 response == { "id": "str", # A short identifier corresponding to the HTTP status code @@ -113953,104 +121717,65 @@ async def get( tickets to help identify the issue. } """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - 401: cast( - Type[HttpResponseError], - lambda response: ClientAuthenticationError(response=response), - ), - 429: HttpResponseError, - 500: HttpResponseError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = kwargs.pop("headers", {}) or {} - _params = kwargs.pop("params", {}) or {} - - cls: ClsType[JSON] = kwargs.pop("cls", None) - - _request = build_databases_get_request( - database_cluster_uuid=database_cluster_uuid, - database_name=database_name, - headers=_headers, - params=_params, - ) - _request.url = self._client.format_url(_request.url) - - _stream = False - pipeline_response: PipelineResponse = ( - await self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - ) - - response = pipeline_response.http_response - if response.status_code not in [200, 404]: - if _stream: - await response.read() # Load the body in memory and close the socket - map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore - raise HttpResponseError(response=response) - - response_headers = {} - if response.status_code == 200: - response_headers["ratelimit-limit"] = self._deserialize( - "int", response.headers.get("ratelimit-limit") - ) - response_headers["ratelimit-remaining"] = self._deserialize( - "int", response.headers.get("ratelimit-remaining") - ) - response_headers["ratelimit-reset"] = self._deserialize( - "int", response.headers.get("ratelimit-reset") - ) - - if response.content: - deserialized = response.json() - else: - deserialized = None + @overload + async def update_major_version( + self, + database_cluster_uuid: str, + body: IO[bytes], + *, + content_type: str = "application/json", + **kwargs: Any + ) -> Optional[JSON]: + # pylint: disable=line-too-long + """Upgrade Major Version for a Database. - if response.status_code == 404: - response_headers["ratelimit-limit"] = self._deserialize( - "int", response.headers.get("ratelimit-limit") - ) - response_headers["ratelimit-remaining"] = self._deserialize( - "int", response.headers.get("ratelimit-remaining") - ) - response_headers["ratelimit-reset"] = self._deserialize( - "int", response.headers.get("ratelimit-reset") - ) + To upgrade the major version of a database, send a PUT request to + ``/v2/databases/$DATABASE_ID/upgrade``\\ , specifying the target version. + A successful request will receive a 204 No Content status code with no body in response. - if response.content: - deserialized = response.json() - else: - deserialized = None + :param database_cluster_uuid: A unique identifier for a database cluster. Required. + :type database_cluster_uuid: str + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: JSON object or None + :rtype: JSON or None + :raises ~azure.core.exceptions.HttpResponseError: - if cls: - return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore + Example: + .. code-block:: python - return cast(JSON, deserialized) # type: ignore + # response body for status code(s): 404 + response == { + "id": "str", # A short identifier corresponding to the HTTP status code + returned. For example, the ID for a response returning a 404 status code would + be "not_found.". Required. + "message": "str", # A message providing additional information about the + error, including details to help resolve it when possible. Required. + "request_id": "str" # Optional. Optionally, some endpoints may include a + request ID that should be provided when reporting bugs or opening support + tickets to help identify the issue. + } + """ @distributed_trace_async - async def delete( - self, database_cluster_uuid: str, database_name: str, **kwargs: Any + async def update_major_version( + self, database_cluster_uuid: str, body: Union[JSON, IO[bytes]], **kwargs: Any ) -> Optional[JSON]: # pylint: disable=line-too-long - """Delete a Database. - - To delete a specific database, send a DELETE request to - ``/v2/databases/$DATABASE_ID/dbs/$DB_NAME``. - - A status of 204 will be given. This indicates that the request was processed - successfully, but that no response body is needed. + """Upgrade Major Version for a Database. - Note: Database management is not supported for Caching or Valkey clusters. + To upgrade the major version of a database, send a PUT request to + ``/v2/databases/$DATABASE_ID/upgrade``\\ , specifying the target version. + A successful request will receive a 204 No Content status code with no body in response. :param database_cluster_uuid: A unique identifier for a database cluster. Required. :type database_cluster_uuid: str - :param database_name: The name of the database. Required. - :type database_name: str + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] :return: JSON object or None :rtype: JSON or None :raises ~azure.core.exceptions.HttpResponseError: @@ -114058,6 +121783,12 @@ async def delete( Example: .. code-block:: python + # JSON input template you can fill out and use as your body input. + body = { + "version": "str" # Optional. A string representing the version of the + database engine in use for the cluster. + } + # response body for status code(s): 404 response == { "id": "str", # A short identifier corresponding to the HTTP status code @@ -114083,14 +121814,27 @@ async def delete( } error_map.update(kwargs.pop("error_map", {}) or {}) - _headers = kwargs.pop("headers", {}) or {} + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = kwargs.pop("params", {}) or {} + content_type: Optional[str] = kwargs.pop( + "content_type", _headers.pop("Content-Type", None) + ) cls: ClsType[Optional[JSON]] = kwargs.pop("cls", None) - _request = build_databases_delete_request( + content_type = content_type or "application/json" + _json = None + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _json = body + + _request = build_databases_update_major_version_request( database_cluster_uuid=database_cluster_uuid, - database_name=database_name, + content_type=content_type, + json=_json, + content=_content, headers=_headers, params=_params, ) @@ -114146,16 +121890,13 @@ async def delete( return deserialized # type: ignore @distributed_trace_async - async def list_connection_pools( - self, database_cluster_uuid: str, **kwargs: Any - ) -> JSON: + async def get_autoscale(self, database_cluster_uuid: str, **kwargs: Any) -> JSON: # pylint: disable=line-too-long - """List Connection Pools (PostgreSQL). + """Retrieve Autoscale Configuration for a Database Cluster. - To list all of the connection pools available to a PostgreSQL database cluster, send a GET - request to ``/v2/databases/$DATABASE_ID/pools``. - The result will be a JSON object with a ``pools`` key. This will be set to an array of - connection pool objects. + To retrieve the autoscale configuration for an existing database cluster, send a GET request to + ``/v2/databases/$DATABASE_ID/autoscale``. + The response will be a JSON object with autoscaling configuration details. :param database_cluster_uuid: A unique identifier for a database cluster. Required. :type database_cluster_uuid: str @@ -114168,111 +121909,17 @@ async def list_connection_pools( # response body for status code(s): 200 response == { - "pools": [ - { - "db": "str", # The database for use with the connection - pool. Required. - "mode": "str", # The PGBouncer transaction mode for the - connection pool. The allowed values are session, transaction, and - statement. Required. - "name": "str", # A unique name for the connection pool. Must - be between 3 and 60 characters. Required. - "size": 0, # The desired size of the PGBouncer connection - pool. The maximum allowed size is determined by the size of the cluster's - primary node. 25 backend server connections are allowed for every 1GB of - RAM. Three are reserved for maintenance. For example, a primary node with - 1 GB of RAM allows for a maximum of 22 backend server connections while - one with 4 GB would allow for 97. Note that these are shared across all - connection pools in a cluster. Required. - "connection": { - "database": "str", # Optional. The name of the - default database. - "host": "str", # Optional. The FQDN pointing to the - database cluster's current primary node. - "password": "str", # Optional. The randomly - generated password for the default - user.:code:`
`:code:`
`Requires ``database:view_credentials`` - scope. - "port": 0, # Optional. The port on which the - database cluster is listening. - "ssl": bool, # Optional. A boolean value indicating - if the connection should be made over SSL. - "uri": "str", # Optional. A connection string in the - format accepted by the ``psql`` command. This is provided as a - convenience and should be able to be constructed by the other - attributes. - "user": "str" # Optional. The default user for the - database.:code:`
`:code:`
`Requires - ``database:view_credentials`` scope. - }, - "private_connection": { - "database": "str", # Optional. The name of the - default database. - "host": "str", # Optional. The FQDN pointing to the - database cluster's current primary node. - "password": "str", # Optional. The randomly - generated password for the default - user.:code:`
`:code:`
`Requires ``database:view_credentials`` - scope. - "port": 0, # Optional. The port on which the - database cluster is listening. - "ssl": bool, # Optional. A boolean value indicating - if the connection should be made over SSL. - "uri": "str", # Optional. A connection string in the - format accepted by the ``psql`` command. This is provided as a - convenience and should be able to be constructed by the other - attributes. - "user": "str" # Optional. The default user for the - database.:code:`
`:code:`
`Requires - ``database:view_credentials`` scope. - }, - "standby_connection": { - "database": "str", # Optional. The name of the - default database. - "host": "str", # Optional. The FQDN pointing to the - database cluster's current primary node. - "password": "str", # Optional. The randomly - generated password for the default - user.:code:`
`:code:`
`Requires ``database:view_credentials`` - scope. - "port": 0, # Optional. The port on which the - database cluster is listening. - "ssl": bool, # Optional. A boolean value indicating - if the connection should be made over SSL. - "uri": "str", # Optional. A connection string in the - format accepted by the ``psql`` command. This is provided as a - convenience and should be able to be constructed by the other - attributes. - "user": "str" # Optional. The default user for the - database.:code:`
`:code:`
`Requires - ``database:view_credentials`` scope. - }, - "standby_private_connection": { - "database": "str", # Optional. The name of the - default database. - "host": "str", # Optional. The FQDN pointing to the - database cluster's current primary node. - "password": "str", # Optional. The randomly - generated password for the default - user.:code:`
`:code:`
`Requires ``database:view_credentials`` - scope. - "port": 0, # Optional. The port on which the - database cluster is listening. - "ssl": bool, # Optional. A boolean value indicating - if the connection should be made over SSL. - "uri": "str", # Optional. A connection string in the - format accepted by the ``psql`` command. This is provided as a - convenience and should be able to be constructed by the other - attributes. - "user": "str" # Optional. The default user for the - database.:code:`
`:code:`
`Requires - ``database:view_credentials`` scope. - }, - "user": "str" # Optional. The name of the user for use with - the connection pool. When excluded, all sessions connect to the database - as the inbound user. + "autoscale": { + "storage": { + "enabled": bool, # Whether storage autoscaling is enabled + for the cluster. Required. + "increment_gib": 0, # Optional. The amount of additional + storage to add (in GiB) when autoscaling is triggered. + "threshold_percent": 0 # Optional. The storage usage + threshold percentage that triggers autoscaling. When storage usage + exceeds this percentage, additional storage will be added automatically. } - ] + } } # response body for status code(s): 404 response == { @@ -114304,7 +121951,7 @@ async def list_connection_pools( cls: ClsType[JSON] = kwargs.pop("cls", None) - _request = build_databases_list_connection_pools_request( + _request = build_databases_get_autoscale_request( database_cluster_uuid=database_cluster_uuid, headers=_headers, params=_params, @@ -114365,28 +122012,20 @@ async def list_connection_pools( return cast(JSON, deserialized) # type: ignore @overload - async def add_connection_pool( + async def update_autoscale( self, database_cluster_uuid: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any - ) -> JSON: + ) -> Optional[JSON]: # pylint: disable=line-too-long - """Add a New Connection Pool (PostgreSQL). - - For PostgreSQL database clusters, connection pools can be used to allow a - database to share its idle connections. The popular PostgreSQL connection - pooling utility PgBouncer is used to provide this service. `See here for more information - `_ - about how and why to use PgBouncer connection pooling including - details about the available transaction modes. + """Configure Autoscale Settings for a Database Cluster. - To add a new connection pool to a PostgreSQL database cluster, send a POST - request to ``/v2/databases/$DATABASE_ID/pools`` specifying a name for the pool, - the user to connect with, the database to connect to, as well as its desired - size and transaction mode. + To configure autoscale settings for an existing database cluster, send a PUT request to + ``/v2/databases/$DATABASE_ID/autoscale``\\ , specifying the autoscale configuration. + A successful request will receive a 204 No Content status code with no body in response. :param database_cluster_uuid: A unique identifier for a database cluster. Required. :type database_cluster_uuid: str @@ -114395,8 +122034,8 @@ async def add_connection_pool( :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str - :return: JSON object - :rtype: JSON + :return: JSON object or None + :rtype: JSON or None :raises ~azure.core.exceptions.HttpResponseError: Example: @@ -114404,191 +122043,18 @@ async def add_connection_pool( # JSON input template you can fill out and use as your body input. body = { - "db": "str", # The database for use with the connection pool. Required. - "mode": "str", # The PGBouncer transaction mode for the connection pool. The - allowed values are session, transaction, and statement. Required. - "name": "str", # A unique name for the connection pool. Must be between 3 - and 60 characters. Required. - "size": 0, # The desired size of the PGBouncer connection pool. The maximum - allowed size is determined by the size of the cluster's primary node. 25 backend - server connections are allowed for every 1GB of RAM. Three are reserved for - maintenance. For example, a primary node with 1 GB of RAM allows for a maximum of - 22 backend server connections while one with 4 GB would allow for 97. Note that - these are shared across all connection pools in a cluster. Required. - "connection": { - "database": "str", # Optional. The name of the default database. - "host": "str", # Optional. The FQDN pointing to the database - cluster's current primary node. - "password": "str", # Optional. The randomly generated password for - the default user.:code:`
`:code:`
`Requires - ``database:view_credentials`` scope. - "port": 0, # Optional. The port on which the database cluster is - listening. - "ssl": bool, # Optional. A boolean value indicating if the - connection should be made over SSL. - "uri": "str", # Optional. A connection string in the format accepted - by the ``psql`` command. This is provided as a convenience and should be able - to be constructed by the other attributes. - "user": "str" # Optional. The default user for the - database.:code:`
`:code:`
`Requires ``database:view_credentials`` - scope. - }, - "private_connection": { - "database": "str", # Optional. The name of the default database. - "host": "str", # Optional. The FQDN pointing to the database - cluster's current primary node. - "password": "str", # Optional. The randomly generated password for - the default user.:code:`
`:code:`
`Requires - ``database:view_credentials`` scope. - "port": 0, # Optional. The port on which the database cluster is - listening. - "ssl": bool, # Optional. A boolean value indicating if the - connection should be made over SSL. - "uri": "str", # Optional. A connection string in the format accepted - by the ``psql`` command. This is provided as a convenience and should be able - to be constructed by the other attributes. - "user": "str" # Optional. The default user for the - database.:code:`
`:code:`
`Requires ``database:view_credentials`` - scope. - }, - "standby_connection": { - "database": "str", # Optional. The name of the default database. - "host": "str", # Optional. The FQDN pointing to the database - cluster's current primary node. - "password": "str", # Optional. The randomly generated password for - the default user.:code:`
`:code:`
`Requires - ``database:view_credentials`` scope. - "port": 0, # Optional. The port on which the database cluster is - listening. - "ssl": bool, # Optional. A boolean value indicating if the - connection should be made over SSL. - "uri": "str", # Optional. A connection string in the format accepted - by the ``psql`` command. This is provided as a convenience and should be able - to be constructed by the other attributes. - "user": "str" # Optional. The default user for the - database.:code:`
`:code:`
`Requires ``database:view_credentials`` - scope. - }, - "standby_private_connection": { - "database": "str", # Optional. The name of the default database. - "host": "str", # Optional. The FQDN pointing to the database - cluster's current primary node. - "password": "str", # Optional. The randomly generated password for - the default user.:code:`
`:code:`
`Requires - ``database:view_credentials`` scope. - "port": 0, # Optional. The port on which the database cluster is - listening. - "ssl": bool, # Optional. A boolean value indicating if the - connection should be made over SSL. - "uri": "str", # Optional. A connection string in the format accepted - by the ``psql`` command. This is provided as a convenience and should be able - to be constructed by the other attributes. - "user": "str" # Optional. The default user for the - database.:code:`
`:code:`
`Requires ``database:view_credentials`` - scope. - }, - "user": "str" # Optional. The name of the user for use with the connection - pool. When excluded, all sessions connect to the database as the inbound user. - } - - # response body for status code(s): 201 - response == { - "pool": { - "db": "str", # The database for use with the connection pool. - Required. - "mode": "str", # The PGBouncer transaction mode for the connection - pool. The allowed values are session, transaction, and statement. Required. - "name": "str", # A unique name for the connection pool. Must be - between 3 and 60 characters. Required. - "size": 0, # The desired size of the PGBouncer connection pool. The - maximum allowed size is determined by the size of the cluster's primary node. - 25 backend server connections are allowed for every 1GB of RAM. Three are - reserved for maintenance. For example, a primary node with 1 GB of RAM allows - for a maximum of 22 backend server connections while one with 4 GB would - allow for 97. Note that these are shared across all connection pools in a + "storage": { + "enabled": bool, # Whether storage autoscaling is enabled for the cluster. Required. - "connection": { - "database": "str", # Optional. The name of the default - database. - "host": "str", # Optional. The FQDN pointing to the database - cluster's current primary node. - "password": "str", # Optional. The randomly generated - password for the default user.:code:`
`:code:`
`Requires - ``database:view_credentials`` scope. - "port": 0, # Optional. The port on which the database - cluster is listening. - "ssl": bool, # Optional. A boolean value indicating if the - connection should be made over SSL. - "uri": "str", # Optional. A connection string in the format - accepted by the ``psql`` command. This is provided as a convenience and - should be able to be constructed by the other attributes. - "user": "str" # Optional. The default user for the - database.:code:`
`:code:`
`Requires ``database:view_credentials`` - scope. - }, - "private_connection": { - "database": "str", # Optional. The name of the default - database. - "host": "str", # Optional. The FQDN pointing to the database - cluster's current primary node. - "password": "str", # Optional. The randomly generated - password for the default user.:code:`
`:code:`
`Requires - ``database:view_credentials`` scope. - "port": 0, # Optional. The port on which the database - cluster is listening. - "ssl": bool, # Optional. A boolean value indicating if the - connection should be made over SSL. - "uri": "str", # Optional. A connection string in the format - accepted by the ``psql`` command. This is provided as a convenience and - should be able to be constructed by the other attributes. - "user": "str" # Optional. The default user for the - database.:code:`
`:code:`
`Requires ``database:view_credentials`` - scope. - }, - "standby_connection": { - "database": "str", # Optional. The name of the default - database. - "host": "str", # Optional. The FQDN pointing to the database - cluster's current primary node. - "password": "str", # Optional. The randomly generated - password for the default user.:code:`
`:code:`
`Requires - ``database:view_credentials`` scope. - "port": 0, # Optional. The port on which the database - cluster is listening. - "ssl": bool, # Optional. A boolean value indicating if the - connection should be made over SSL. - "uri": "str", # Optional. A connection string in the format - accepted by the ``psql`` command. This is provided as a convenience and - should be able to be constructed by the other attributes. - "user": "str" # Optional. The default user for the - database.:code:`
`:code:`
`Requires ``database:view_credentials`` - scope. - }, - "standby_private_connection": { - "database": "str", # Optional. The name of the default - database. - "host": "str", # Optional. The FQDN pointing to the database - cluster's current primary node. - "password": "str", # Optional. The randomly generated - password for the default user.:code:`
`:code:`
`Requires - ``database:view_credentials`` scope. - "port": 0, # Optional. The port on which the database - cluster is listening. - "ssl": bool, # Optional. A boolean value indicating if the - connection should be made over SSL. - "uri": "str", # Optional. A connection string in the format - accepted by the ``psql`` command. This is provided as a convenience and - should be able to be constructed by the other attributes. - "user": "str" # Optional. The default user for the - database.:code:`
`:code:`
`Requires ``database:view_credentials`` - scope. - }, - "user": "str" # Optional. The name of the user for use with the - connection pool. When excluded, all sessions connect to the database as the - inbound user. + "increment_gib": 0, # Optional. The amount of additional storage to + add (in GiB) when autoscaling is triggered. + "threshold_percent": 0 # Optional. The storage usage threshold + percentage that triggers autoscaling. When storage usage exceeds this + percentage, additional storage will be added automatically. } } - # response body for status code(s): 404 + + # response body for status code(s): 404, 422 response == { "id": "str", # A short identifier corresponding to the HTTP status code returned. For example, the ID for a response returning a 404 status code would @@ -114602,28 +122068,20 @@ async def add_connection_pool( """ @overload - async def add_connection_pool( + async def update_autoscale( self, database_cluster_uuid: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any - ) -> JSON: + ) -> Optional[JSON]: # pylint: disable=line-too-long - """Add a New Connection Pool (PostgreSQL). - - For PostgreSQL database clusters, connection pools can be used to allow a - database to share its idle connections. The popular PostgreSQL connection - pooling utility PgBouncer is used to provide this service. `See here for more information - `_ - about how and why to use PgBouncer connection pooling including - details about the available transaction modes. + """Configure Autoscale Settings for a Database Cluster. - To add a new connection pool to a PostgreSQL database cluster, send a POST - request to ``/v2/databases/$DATABASE_ID/pools`` specifying a name for the pool, - the user to connect with, the database to connect to, as well as its desired - size and transaction mode. + To configure autoscale settings for an existing database cluster, send a PUT request to + ``/v2/databases/$DATABASE_ID/autoscale``\\ , specifying the autoscale configuration. + A successful request will receive a 204 No Content status code with no body in response. :param database_cluster_uuid: A unique identifier for a database cluster. Required. :type database_cluster_uuid: str @@ -114632,111 +122090,14 @@ async def add_connection_pool( :keyword content_type: Body Parameter content-type. Content type parameter for binary body. Default value is "application/json". :paramtype content_type: str - :return: JSON object - :rtype: JSON + :return: JSON object or None + :rtype: JSON or None :raises ~azure.core.exceptions.HttpResponseError: Example: .. code-block:: python - # response body for status code(s): 201 - response == { - "pool": { - "db": "str", # The database for use with the connection pool. - Required. - "mode": "str", # The PGBouncer transaction mode for the connection - pool. The allowed values are session, transaction, and statement. Required. - "name": "str", # A unique name for the connection pool. Must be - between 3 and 60 characters. Required. - "size": 0, # The desired size of the PGBouncer connection pool. The - maximum allowed size is determined by the size of the cluster's primary node. - 25 backend server connections are allowed for every 1GB of RAM. Three are - reserved for maintenance. For example, a primary node with 1 GB of RAM allows - for a maximum of 22 backend server connections while one with 4 GB would - allow for 97. Note that these are shared across all connection pools in a - cluster. Required. - "connection": { - "database": "str", # Optional. The name of the default - database. - "host": "str", # Optional. The FQDN pointing to the database - cluster's current primary node. - "password": "str", # Optional. The randomly generated - password for the default user.:code:`
`:code:`
`Requires - ``database:view_credentials`` scope. - "port": 0, # Optional. The port on which the database - cluster is listening. - "ssl": bool, # Optional. A boolean value indicating if the - connection should be made over SSL. - "uri": "str", # Optional. A connection string in the format - accepted by the ``psql`` command. This is provided as a convenience and - should be able to be constructed by the other attributes. - "user": "str" # Optional. The default user for the - database.:code:`
`:code:`
`Requires ``database:view_credentials`` - scope. - }, - "private_connection": { - "database": "str", # Optional. The name of the default - database. - "host": "str", # Optional. The FQDN pointing to the database - cluster's current primary node. - "password": "str", # Optional. The randomly generated - password for the default user.:code:`
`:code:`
`Requires - ``database:view_credentials`` scope. - "port": 0, # Optional. The port on which the database - cluster is listening. - "ssl": bool, # Optional. A boolean value indicating if the - connection should be made over SSL. - "uri": "str", # Optional. A connection string in the format - accepted by the ``psql`` command. This is provided as a convenience and - should be able to be constructed by the other attributes. - "user": "str" # Optional. The default user for the - database.:code:`
`:code:`
`Requires ``database:view_credentials`` - scope. - }, - "standby_connection": { - "database": "str", # Optional. The name of the default - database. - "host": "str", # Optional. The FQDN pointing to the database - cluster's current primary node. - "password": "str", # Optional. The randomly generated - password for the default user.:code:`
`:code:`
`Requires - ``database:view_credentials`` scope. - "port": 0, # Optional. The port on which the database - cluster is listening. - "ssl": bool, # Optional. A boolean value indicating if the - connection should be made over SSL. - "uri": "str", # Optional. A connection string in the format - accepted by the ``psql`` command. This is provided as a convenience and - should be able to be constructed by the other attributes. - "user": "str" # Optional. The default user for the - database.:code:`
`:code:`
`Requires ``database:view_credentials`` - scope. - }, - "standby_private_connection": { - "database": "str", # Optional. The name of the default - database. - "host": "str", # Optional. The FQDN pointing to the database - cluster's current primary node. - "password": "str", # Optional. The randomly generated - password for the default user.:code:`
`:code:`
`Requires - ``database:view_credentials`` scope. - "port": 0, # Optional. The port on which the database - cluster is listening. - "ssl": bool, # Optional. A boolean value indicating if the - connection should be made over SSL. - "uri": "str", # Optional. A connection string in the format - accepted by the ``psql`` command. This is provided as a convenience and - should be able to be constructed by the other attributes. - "user": "str" # Optional. The default user for the - database.:code:`
`:code:`
`Requires ``database:view_credentials`` - scope. - }, - "user": "str" # Optional. The name of the user for use with the - connection pool. When excluded, all sessions connect to the database as the - inbound user. - } - } - # response body for status code(s): 404 + # response body for status code(s): 404, 422 response == { "id": "str", # A short identifier corresponding to the HTTP status code returned. For example, the ID for a response returning a 404 status code would @@ -114750,222 +122111,41 @@ async def add_connection_pool( """ @distributed_trace_async - async def add_connection_pool( + async def update_autoscale( self, database_cluster_uuid: str, body: Union[JSON, IO[bytes]], **kwargs: Any - ) -> JSON: + ) -> Optional[JSON]: # pylint: disable=line-too-long - """Add a New Connection Pool (PostgreSQL). - - For PostgreSQL database clusters, connection pools can be used to allow a - database to share its idle connections. The popular PostgreSQL connection - pooling utility PgBouncer is used to provide this service. `See here for more information - `_ - about how and why to use PgBouncer connection pooling including - details about the available transaction modes. + """Configure Autoscale Settings for a Database Cluster. - To add a new connection pool to a PostgreSQL database cluster, send a POST - request to ``/v2/databases/$DATABASE_ID/pools`` specifying a name for the pool, - the user to connect with, the database to connect to, as well as its desired - size and transaction mode. + To configure autoscale settings for an existing database cluster, send a PUT request to + ``/v2/databases/$DATABASE_ID/autoscale``\\ , specifying the autoscale configuration. + A successful request will receive a 204 No Content status code with no body in response. :param database_cluster_uuid: A unique identifier for a database cluster. Required. :type database_cluster_uuid: str :param body: Is either a JSON type or a IO[bytes] type. Required. :type body: JSON or IO[bytes] - :return: JSON object - :rtype: JSON + :return: JSON object or None + :rtype: JSON or None :raises ~azure.core.exceptions.HttpResponseError: - Example: - .. code-block:: python - - # JSON input template you can fill out and use as your body input. - body = { - "db": "str", # The database for use with the connection pool. Required. - "mode": "str", # The PGBouncer transaction mode for the connection pool. The - allowed values are session, transaction, and statement. Required. - "name": "str", # A unique name for the connection pool. Must be between 3 - and 60 characters. Required. - "size": 0, # The desired size of the PGBouncer connection pool. The maximum - allowed size is determined by the size of the cluster's primary node. 25 backend - server connections are allowed for every 1GB of RAM. Three are reserved for - maintenance. For example, a primary node with 1 GB of RAM allows for a maximum of - 22 backend server connections while one with 4 GB would allow for 97. Note that - these are shared across all connection pools in a cluster. Required. - "connection": { - "database": "str", # Optional. The name of the default database. - "host": "str", # Optional. The FQDN pointing to the database - cluster's current primary node. - "password": "str", # Optional. The randomly generated password for - the default user.:code:`
`:code:`
`Requires - ``database:view_credentials`` scope. - "port": 0, # Optional. The port on which the database cluster is - listening. - "ssl": bool, # Optional. A boolean value indicating if the - connection should be made over SSL. - "uri": "str", # Optional. A connection string in the format accepted - by the ``psql`` command. This is provided as a convenience and should be able - to be constructed by the other attributes. - "user": "str" # Optional. The default user for the - database.:code:`
`:code:`
`Requires ``database:view_credentials`` - scope. - }, - "private_connection": { - "database": "str", # Optional. The name of the default database. - "host": "str", # Optional. The FQDN pointing to the database - cluster's current primary node. - "password": "str", # Optional. The randomly generated password for - the default user.:code:`
`:code:`
`Requires - ``database:view_credentials`` scope. - "port": 0, # Optional. The port on which the database cluster is - listening. - "ssl": bool, # Optional. A boolean value indicating if the - connection should be made over SSL. - "uri": "str", # Optional. A connection string in the format accepted - by the ``psql`` command. This is provided as a convenience and should be able - to be constructed by the other attributes. - "user": "str" # Optional. The default user for the - database.:code:`
`:code:`
`Requires ``database:view_credentials`` - scope. - }, - "standby_connection": { - "database": "str", # Optional. The name of the default database. - "host": "str", # Optional. The FQDN pointing to the database - cluster's current primary node. - "password": "str", # Optional. The randomly generated password for - the default user.:code:`
`:code:`
`Requires - ``database:view_credentials`` scope. - "port": 0, # Optional. The port on which the database cluster is - listening. - "ssl": bool, # Optional. A boolean value indicating if the - connection should be made over SSL. - "uri": "str", # Optional. A connection string in the format accepted - by the ``psql`` command. This is provided as a convenience and should be able - to be constructed by the other attributes. - "user": "str" # Optional. The default user for the - database.:code:`
`:code:`
`Requires ``database:view_credentials`` - scope. - }, - "standby_private_connection": { - "database": "str", # Optional. The name of the default database. - "host": "str", # Optional. The FQDN pointing to the database - cluster's current primary node. - "password": "str", # Optional. The randomly generated password for - the default user.:code:`
`:code:`
`Requires - ``database:view_credentials`` scope. - "port": 0, # Optional. The port on which the database cluster is - listening. - "ssl": bool, # Optional. A boolean value indicating if the - connection should be made over SSL. - "uri": "str", # Optional. A connection string in the format accepted - by the ``psql`` command. This is provided as a convenience and should be able - to be constructed by the other attributes. - "user": "str" # Optional. The default user for the - database.:code:`
`:code:`
`Requires ``database:view_credentials`` - scope. - }, - "user": "str" # Optional. The name of the user for use with the connection - pool. When excluded, all sessions connect to the database as the inbound user. - } + Example: + .. code-block:: python - # response body for status code(s): 201 - response == { - "pool": { - "db": "str", # The database for use with the connection pool. - Required. - "mode": "str", # The PGBouncer transaction mode for the connection - pool. The allowed values are session, transaction, and statement. Required. - "name": "str", # A unique name for the connection pool. Must be - between 3 and 60 characters. Required. - "size": 0, # The desired size of the PGBouncer connection pool. The - maximum allowed size is determined by the size of the cluster's primary node. - 25 backend server connections are allowed for every 1GB of RAM. Three are - reserved for maintenance. For example, a primary node with 1 GB of RAM allows - for a maximum of 22 backend server connections while one with 4 GB would - allow for 97. Note that these are shared across all connection pools in a + # JSON input template you can fill out and use as your body input. + body = { + "storage": { + "enabled": bool, # Whether storage autoscaling is enabled for the cluster. Required. - "connection": { - "database": "str", # Optional. The name of the default - database. - "host": "str", # Optional. The FQDN pointing to the database - cluster's current primary node. - "password": "str", # Optional. The randomly generated - password for the default user.:code:`
`:code:`
`Requires - ``database:view_credentials`` scope. - "port": 0, # Optional. The port on which the database - cluster is listening. - "ssl": bool, # Optional. A boolean value indicating if the - connection should be made over SSL. - "uri": "str", # Optional. A connection string in the format - accepted by the ``psql`` command. This is provided as a convenience and - should be able to be constructed by the other attributes. - "user": "str" # Optional. The default user for the - database.:code:`
`:code:`
`Requires ``database:view_credentials`` - scope. - }, - "private_connection": { - "database": "str", # Optional. The name of the default - database. - "host": "str", # Optional. The FQDN pointing to the database - cluster's current primary node. - "password": "str", # Optional. The randomly generated - password for the default user.:code:`
`:code:`
`Requires - ``database:view_credentials`` scope. - "port": 0, # Optional. The port on which the database - cluster is listening. - "ssl": bool, # Optional. A boolean value indicating if the - connection should be made over SSL. - "uri": "str", # Optional. A connection string in the format - accepted by the ``psql`` command. This is provided as a convenience and - should be able to be constructed by the other attributes. - "user": "str" # Optional. The default user for the - database.:code:`
`:code:`
`Requires ``database:view_credentials`` - scope. - }, - "standby_connection": { - "database": "str", # Optional. The name of the default - database. - "host": "str", # Optional. The FQDN pointing to the database - cluster's current primary node. - "password": "str", # Optional. The randomly generated - password for the default user.:code:`
`:code:`
`Requires - ``database:view_credentials`` scope. - "port": 0, # Optional. The port on which the database - cluster is listening. - "ssl": bool, # Optional. A boolean value indicating if the - connection should be made over SSL. - "uri": "str", # Optional. A connection string in the format - accepted by the ``psql`` command. This is provided as a convenience and - should be able to be constructed by the other attributes. - "user": "str" # Optional. The default user for the - database.:code:`
`:code:`
`Requires ``database:view_credentials`` - scope. - }, - "standby_private_connection": { - "database": "str", # Optional. The name of the default - database. - "host": "str", # Optional. The FQDN pointing to the database - cluster's current primary node. - "password": "str", # Optional. The randomly generated - password for the default user.:code:`
`:code:`
`Requires - ``database:view_credentials`` scope. - "port": 0, # Optional. The port on which the database - cluster is listening. - "ssl": bool, # Optional. A boolean value indicating if the - connection should be made over SSL. - "uri": "str", # Optional. A connection string in the format - accepted by the ``psql`` command. This is provided as a convenience and - should be able to be constructed by the other attributes. - "user": "str" # Optional. The default user for the - database.:code:`
`:code:`
`Requires ``database:view_credentials`` - scope. - }, - "user": "str" # Optional. The name of the user for use with the - connection pool. When excluded, all sessions connect to the database as the - inbound user. + "increment_gib": 0, # Optional. The amount of additional storage to + add (in GiB) when autoscaling is triggered. + "threshold_percent": 0 # Optional. The storage usage threshold + percentage that triggers autoscaling. When storage usage exceeds this + percentage, additional storage will be added automatically. } } - # response body for status code(s): 404 + + # response body for status code(s): 404, 422 response == { "id": "str", # A short identifier corresponding to the HTTP status code returned. For example, the ID for a response returning a 404 status code would @@ -114996,7 +122176,7 @@ async def add_connection_pool( content_type: Optional[str] = kwargs.pop( "content_type", _headers.pop("Content-Type", None) ) - cls: ClsType[JSON] = kwargs.pop("cls", None) + cls: ClsType[Optional[JSON]] = kwargs.pop("cls", None) content_type = content_type or "application/json" _json = None @@ -115006,7 +122186,7 @@ async def add_connection_pool( else: _json = body - _request = build_databases_add_connection_pool_request( + _request = build_databases_update_autoscale_request( database_cluster_uuid=database_cluster_uuid, content_type=content_type, json=_json, @@ -115025,14 +122205,26 @@ async def add_connection_pool( response = pipeline_response.http_response - if response.status_code not in [201, 404]: + if response.status_code not in [204, 404, 422]: if _stream: await response.read() # Load the body in memory and close the socket map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore raise HttpResponseError(response=response) + deserialized = None response_headers = {} - if response.status_code == 201: + if response.status_code == 204: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.status_code == 404: response_headers["ratelimit-limit"] = self._deserialize( "int", response.headers.get("ratelimit-limit") ) @@ -115048,7 +122240,7 @@ async def add_connection_pool( else: deserialized = None - if response.status_code == 404: + if response.status_code == 422: response_headers["ratelimit-limit"] = self._deserialize( "int", response.headers.get("ratelimit-limit") ) @@ -115065,25 +122257,24 @@ async def add_connection_pool( deserialized = None if cls: - return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore + return cls(pipeline_response, deserialized, response_headers) # type: ignore - return cast(JSON, deserialized) # type: ignore + return deserialized # type: ignore @distributed_trace_async - async def get_connection_pool( - self, database_cluster_uuid: str, pool_name: str, **kwargs: Any + async def list_kafka_topics( + self, database_cluster_uuid: str, **kwargs: Any ) -> JSON: # pylint: disable=line-too-long - """Retrieve Existing Connection Pool (PostgreSQL). + """List Topics for a Kafka Cluster. - To show information about an existing connection pool for a PostgreSQL database cluster, send a - GET request to ``/v2/databases/$DATABASE_ID/pools/$POOL_NAME``. - The response will be a JSON object with a ``pool`` key. + To list all of a Kafka cluster's topics, send a GET request to + ``/v2/databases/$DATABASE_ID/topics``. + + The result will be a JSON object with a ``topics`` key. :param database_cluster_uuid: A unique identifier for a database cluster. Required. :type database_cluster_uuid: str - :param pool_name: The name used to identify the connection pool. Required. - :type pool_name: str :return: JSON object :rtype: JSON :raises ~azure.core.exceptions.HttpResponseError: @@ -115093,100 +122284,17 @@ async def get_connection_pool( # response body for status code(s): 200 response == { - "pool": { - "db": "str", # The database for use with the connection pool. - Required. - "mode": "str", # The PGBouncer transaction mode for the connection - pool. The allowed values are session, transaction, and statement. Required. - "name": "str", # A unique name for the connection pool. Must be - between 3 and 60 characters. Required. - "size": 0, # The desired size of the PGBouncer connection pool. The - maximum allowed size is determined by the size of the cluster's primary node. - 25 backend server connections are allowed for every 1GB of RAM. Three are - reserved for maintenance. For example, a primary node with 1 GB of RAM allows - for a maximum of 22 backend server connections while one with 4 GB would - allow for 97. Note that these are shared across all connection pools in a - cluster. Required. - "connection": { - "database": "str", # Optional. The name of the default - database. - "host": "str", # Optional. The FQDN pointing to the database - cluster's current primary node. - "password": "str", # Optional. The randomly generated - password for the default user.:code:`
`:code:`
`Requires - ``database:view_credentials`` scope. - "port": 0, # Optional. The port on which the database - cluster is listening. - "ssl": bool, # Optional. A boolean value indicating if the - connection should be made over SSL. - "uri": "str", # Optional. A connection string in the format - accepted by the ``psql`` command. This is provided as a convenience and - should be able to be constructed by the other attributes. - "user": "str" # Optional. The default user for the - database.:code:`
`:code:`
`Requires ``database:view_credentials`` - scope. - }, - "private_connection": { - "database": "str", # Optional. The name of the default - database. - "host": "str", # Optional. The FQDN pointing to the database - cluster's current primary node. - "password": "str", # Optional. The randomly generated - password for the default user.:code:`
`:code:`
`Requires - ``database:view_credentials`` scope. - "port": 0, # Optional. The port on which the database - cluster is listening. - "ssl": bool, # Optional. A boolean value indicating if the - connection should be made over SSL. - "uri": "str", # Optional. A connection string in the format - accepted by the ``psql`` command. This is provided as a convenience and - should be able to be constructed by the other attributes. - "user": "str" # Optional. The default user for the - database.:code:`
`:code:`
`Requires ``database:view_credentials`` - scope. - }, - "standby_connection": { - "database": "str", # Optional. The name of the default - database. - "host": "str", # Optional. The FQDN pointing to the database - cluster's current primary node. - "password": "str", # Optional. The randomly generated - password for the default user.:code:`
`:code:`
`Requires - ``database:view_credentials`` scope. - "port": 0, # Optional. The port on which the database - cluster is listening. - "ssl": bool, # Optional. A boolean value indicating if the - connection should be made over SSL. - "uri": "str", # Optional. A connection string in the format - accepted by the ``psql`` command. This is provided as a convenience and - should be able to be constructed by the other attributes. - "user": "str" # Optional. The default user for the - database.:code:`
`:code:`
`Requires ``database:view_credentials`` - scope. - }, - "standby_private_connection": { - "database": "str", # Optional. The name of the default - database. - "host": "str", # Optional. The FQDN pointing to the database - cluster's current primary node. - "password": "str", # Optional. The randomly generated - password for the default user.:code:`
`:code:`
`Requires - ``database:view_credentials`` scope. - "port": 0, # Optional. The port on which the database - cluster is listening. - "ssl": bool, # Optional. A boolean value indicating if the - connection should be made over SSL. - "uri": "str", # Optional. A connection string in the format - accepted by the ``psql`` command. This is provided as a convenience and - should be able to be constructed by the other attributes. - "user": "str" # Optional. The default user for the - database.:code:`
`:code:`
`Requires ``database:view_credentials`` - scope. - }, - "user": "str" # Optional. The name of the user for use with the - connection pool. When excluded, all sessions connect to the database as the - inbound user. - } + "topics": [ + { + "name": "str", # Optional. The name of the Kafka topic. + "partition_count": 0, # Optional. The number of partitions + available for the topic. On update, this value can only be increased. + "replication_factor": 0, # Optional. The number of nodes to + replicate data across the cluster. + "state": "str" # Optional. The state of the Kafka topic. + Known values are: "active", "configuring", "deleting", and "unknown". + } + ] } # response body for status code(s): 404 response == { @@ -115218,9 +122326,8 @@ async def get_connection_pool( cls: ClsType[JSON] = kwargs.pop("cls", None) - _request = build_databases_get_connection_pool_request( + _request = build_databases_list_kafka_topics_request( database_cluster_uuid=database_cluster_uuid, - pool_name=pool_name, headers=_headers, params=_params, ) @@ -115280,32 +122387,31 @@ async def get_connection_pool( return cast(JSON, deserialized) # type: ignore @overload - async def update_connection_pool( + async def create_kafka_topic( self, database_cluster_uuid: str, - pool_name: str, - body: JSON, + body: Optional[JSON] = None, *, content_type: str = "application/json", **kwargs: Any - ) -> Optional[JSON]: + ) -> JSON: # pylint: disable=line-too-long - """Update Connection Pools (PostgreSQL). + """Create Topic for a Kafka Cluster. - To update a connection pool for a PostgreSQL database cluster, send a PUT request to - ``/v2/databases/$DATABASE_ID/pools/$POOL_NAME``. + To create a topic attached to a Kafka cluster, send a POST request to + ``/v2/databases/$DATABASE_ID/topics``. + + The result will be a JSON object with a ``topic`` key. :param database_cluster_uuid: A unique identifier for a database cluster. Required. :type database_cluster_uuid: str - :param pool_name: The name used to identify the connection pool. Required. - :type pool_name: str - :param body: Required. + :param body: Default value is None. :type body: JSON :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str - :return: JSON object or None - :rtype: JSON or None + :return: JSON object + :rtype: JSON :raises ~azure.core.exceptions.HttpResponseError: Example: @@ -115313,19 +122419,223 @@ async def update_connection_pool( # JSON input template you can fill out and use as your body input. body = { - "db": "str", # The database for use with the connection pool. Required. - "mode": "str", # The PGBouncer transaction mode for the connection pool. The - allowed values are session, transaction, and statement. Required. - "size": 0, # The desired size of the PGBouncer connection pool. The maximum - allowed size is determined by the size of the cluster's primary node. 25 backend - server connections are allowed for every 1GB of RAM. Three are reserved for - maintenance. For example, a primary node with 1 GB of RAM allows for a maximum of - 22 backend server connections while one with 4 GB would allow for 97. Note that - these are shared across all connection pools in a cluster. Required. - "user": "str" # Optional. The name of the user for use with the connection - pool. When excluded, all sessions connect to the database as the inbound user. + "config": { + "cleanup_policy": "delete", # Optional. Default value is "delete". + The cleanup_policy sets the retention policy to use on log segments. 'delete' + will discard old segments when retention time/size limits are reached. + 'compact' will enable log compaction, resulting in retention of the latest + value for each key. Known values are: "delete", "compact", and + "compact_delete". + "compression_type": "producer", # Optional. Default value is + "producer". The compression_type specifies the compression type of the topic. + Known values are: "producer", "gzip", "snappy", "Iz4", "zstd", and + "uncompressed". + "delete_retention_ms": 86400000, # Optional. Default value is + 86400000. The delete_retention_ms specifies how long (in ms) to retain delete + tombstone markers for topics. + "file_delete_delay_ms": 60000, # Optional. Default value is 60000. + The file_delete_delay_ms specifies the time (in ms) to wait before deleting a + file from the filesystem. + "flush_messages": 9223372036854776000, # Optional. Default value is + 9223372036854776000. The flush_messages specifies the number of messages to + accumulate on a log partition before messages are flushed to disk. + "flush_ms": 9223372036854776000, # Optional. Default value is + 9223372036854776000. The flush_ms specifies the maximum time (in ms) that a + message is kept in memory before being flushed to disk. + "index_interval_bytes": 4096, # Optional. Default value is 4096. The + index_interval_bytes specifies the number of bytes between entries being + added into te offset index. + "max_compaction_lag_ms": 9223372036854776000, # Optional. Default + value is 9223372036854776000. The max_compaction_lag_ms specifies the maximum + amount of time (in ms) that a message will remain uncompacted. This is only + applicable if the logs are have compaction enabled. + "max_message_bytes": 1048588, # Optional. Default value is 1048588. + The max_messages_bytes specifies the largest record batch size (in bytes) + that can be sent to the server. This is calculated after compression if + compression is enabled. + "message_down_conversion_enable": True, # Optional. Default value is + True. The message_down_conversion_enable specifies whether down-conversion of + message formats is enabled to satisfy consumer requests. When 'false', the + broker will not perform conversion for consumers expecting older message + formats. The broker will respond with an ``UNSUPPORTED_VERSION`` error for + consume requests from these older clients. + "message_format_version": "3.0-IV1", # Optional. Default value is + "3.0-IV1". The message_format_version specifies the message format version + used by the broker to append messages to the logs. The value of this setting + is assumed to be 3.0-IV1 if the broker protocol version is 3.0 or higher. By + setting a particular message format version, all existing messages on disk + must be smaller or equal to the specified version. Known values are: "0.8.0", + "0.8.1", "0.8.2", "0.9.0", "0.10.0-IV0", "0.10.0-IV1", "0.10.1-IV0", + "0.10.1-IV1", "0.10.1-IV2", "0.10.2-IV0", "0.11.0-IV0", "0.11.0-IV1", + "0.11.0-IV2", "1.0-IV0", "1.1-IV0", "2.0-IV0", "2.0-IV1", "2.1-IV0", + "2.1-IV1", "2.1-IV2", "2.2-IV0", "2.2-IV1", "2.3-IV0", "2.3-IV1", "2.4-IV0", + "2.4-IV1", "2.5-IV0", "2.6-IV0", "2.7-IV0", "2.7-IV1", "2.7-IV2", "2.8-IV0", + "2.8-IV1", "3.0-IV0", "3.0-IV1", "3.1-IV0", "3.2-IV0", "3.3-IV0", "3.3-IV1", + "3.3-IV2", and "3.3-IV3". + "message_timestamp_type": "create_time", # Optional. Default value + is "create_time". The message_timestamp_type specifies whether to use the + message create time or log append time as the timestamp on a message. Known + values are: "create_time" and "log_append_time". + "min_cleanable_dirty_ratio": 0.5, # Optional. Default value is 0.5. + The min_cleanable_dirty_ratio specifies the frequency of log compaction (if + enabled) in relation to duplicates present in the logs. For example, at 0.5, + at most 50% of the log could be duplicates before compaction would begin. + "min_compaction_lag_ms": 0, # Optional. Default value is 0. The + min_compaction_lag_ms specifies the minimum time (in ms) that a message will + remain uncompacted in the log. Only relevant if log compaction is enabled. + "min_insync_replicas": 1, # Optional. Default value is 1. The + min_insync_replicas specifies the number of replicas that must ACK a write + for the write to be considered successful. + "preallocate": False, # Optional. Default value is False. The + preallocate specifies whether a file should be preallocated on disk when + creating a new log segment. + "retention_bytes": -1, # Optional. Default value is -1. The + retention_bytes specifies the maximum size of the log (in bytes) before + deleting messages. -1 indicates that there is no limit. + "retention_ms": 604800000, # Optional. Default value is 604800000. + The retention_ms specifies the maximum amount of time (in ms) to keep a + message before deleting it. + "segment_bytes": 209715200, # Optional. Default value is 209715200. + The segment_bytes specifies the maximum size of a single log file (in bytes). + "segment_jitter_ms": 0, # Optional. Default value is 0. The + segment_jitter_ms specifies the maximum random jitter subtracted from the + scheduled segment roll time to avoid thundering herds of segment rolling. + "segment_ms": 604800000 # Optional. Default value is 604800000. The + segment_ms specifies the period of time after which the log will be forced to + roll if the segment file isn't full. This ensures that retention can delete + or compact old data. + }, + "name": "str", # Optional. The name of the Kafka topic. + "partition_count": 0, # Optional. The number of partitions available for the + topic. On update, this value can only be increased. + "replication_factor": 0 # Optional. The number of nodes to replicate data + across the cluster. } + # response body for status code(s): 201 + response == { + "topic": { + "config": { + "cleanup_policy": "delete", # Optional. Default value is + "delete". The cleanup_policy sets the retention policy to use on log + segments. 'delete' will discard old segments when retention time/size + limits are reached. 'compact' will enable log compaction, resulting in + retention of the latest value for each key. Known values are: "delete", + "compact", and "compact_delete". + "compression_type": "producer", # Optional. Default value is + "producer". The compression_type specifies the compression type of the + topic. Known values are: "producer", "gzip", "snappy", "Iz4", "zstd", and + "uncompressed". + "delete_retention_ms": 86400000, # Optional. Default value + is 86400000. The delete_retention_ms specifies how long (in ms) to retain + delete tombstone markers for topics. + "file_delete_delay_ms": 60000, # Optional. Default value is + 60000. The file_delete_delay_ms specifies the time (in ms) to wait before + deleting a file from the filesystem. + "flush_messages": 9223372036854776000, # Optional. Default + value is 9223372036854776000. The flush_messages specifies the number of + messages to accumulate on a log partition before messages are flushed to + disk. + "flush_ms": 9223372036854776000, # Optional. Default value + is 9223372036854776000. The flush_ms specifies the maximum time (in ms) + that a message is kept in memory before being flushed to disk. + "index_interval_bytes": 4096, # Optional. Default value is + 4096. The index_interval_bytes specifies the number of bytes between + entries being added into te offset index. + "max_compaction_lag_ms": 9223372036854776000, # Optional. + Default value is 9223372036854776000. The max_compaction_lag_ms specifies + the maximum amount of time (in ms) that a message will remain + uncompacted. This is only applicable if the logs are have compaction + enabled. + "max_message_bytes": 1048588, # Optional. Default value is + 1048588. The max_messages_bytes specifies the largest record batch size + (in bytes) that can be sent to the server. This is calculated after + compression if compression is enabled. + "message_down_conversion_enable": True, # Optional. Default + value is True. The message_down_conversion_enable specifies whether + down-conversion of message formats is enabled to satisfy consumer + requests. When 'false', the broker will not perform conversion for + consumers expecting older message formats. The broker will respond with + an ``UNSUPPORTED_VERSION`` error for consume requests from these older + clients. + "message_format_version": "3.0-IV1", # Optional. Default + value is "3.0-IV1". The message_format_version specifies the message + format version used by the broker to append messages to the logs. The + value of this setting is assumed to be 3.0-IV1 if the broker protocol + version is 3.0 or higher. By setting a particular message format + version, all existing messages on disk must be smaller or equal to the + specified version. Known values are: "0.8.0", "0.8.1", "0.8.2", "0.9.0", + "0.10.0-IV0", "0.10.0-IV1", "0.10.1-IV0", "0.10.1-IV1", "0.10.1-IV2", + "0.10.2-IV0", "0.11.0-IV0", "0.11.0-IV1", "0.11.0-IV2", "1.0-IV0", + "1.1-IV0", "2.0-IV0", "2.0-IV1", "2.1-IV0", "2.1-IV1", "2.1-IV2", + "2.2-IV0", "2.2-IV1", "2.3-IV0", "2.3-IV1", "2.4-IV0", "2.4-IV1", + "2.5-IV0", "2.6-IV0", "2.7-IV0", "2.7-IV1", "2.7-IV2", "2.8-IV0", + "2.8-IV1", "3.0-IV0", "3.0-IV1", "3.1-IV0", "3.2-IV0", "3.3-IV0", + "3.3-IV1", "3.3-IV2", and "3.3-IV3". + "message_timestamp_type": "create_time", # Optional. Default + value is "create_time". The message_timestamp_type specifies whether to + use the message create time or log append time as the timestamp on a + message. Known values are: "create_time" and "log_append_time". + "min_cleanable_dirty_ratio": 0.5, # Optional. Default value + is 0.5. The min_cleanable_dirty_ratio specifies the frequency of log + compaction (if enabled) in relation to duplicates present in the logs. + For example, at 0.5, at most 50% of the log could be duplicates before + compaction would begin. + "min_compaction_lag_ms": 0, # Optional. Default value is 0. + The min_compaction_lag_ms specifies the minimum time (in ms) that a + message will remain uncompacted in the log. Only relevant if log + compaction is enabled. + "min_insync_replicas": 1, # Optional. Default value is 1. + The min_insync_replicas specifies the number of replicas that must ACK a + write for the write to be considered successful. + "preallocate": False, # Optional. Default value is False. + The preallocate specifies whether a file should be preallocated on disk + when creating a new log segment. + "retention_bytes": -1, # Optional. Default value is -1. The + retention_bytes specifies the maximum size of the log (in bytes) before + deleting messages. -1 indicates that there is no limit. + "retention_ms": 604800000, # Optional. Default value is + 604800000. The retention_ms specifies the maximum amount of time (in ms) + to keep a message before deleting it. + "segment_bytes": 209715200, # Optional. Default value is + 209715200. The segment_bytes specifies the maximum size of a single log + file (in bytes). + "segment_jitter_ms": 0, # Optional. Default value is 0. The + segment_jitter_ms specifies the maximum random jitter subtracted from the + scheduled segment roll time to avoid thundering herds of segment rolling. + "segment_ms": 604800000 # Optional. Default value is + 604800000. The segment_ms specifies the period of time after which the + log will be forced to roll if the segment file isn't full. This ensures + that retention can delete or compact old data. + }, + "name": "str", # Optional. The name of the Kafka topic. + "partitions": [ + { + "consumer_groups": [ + { + "group_name": "str", # Optional. + Name of the consumer group. + "offset": 0 # Optional. The current + offset of the consumer group. + } + ], + "earliest_offset": 0, # Optional. The earliest + consumer offset amongst consumer groups. + "id": 0, # Optional. An identifier for the + partition. + "in_sync_replicas": 0, # Optional. The number of + nodes that are in-sync (have the latest data) for the given + partition. + "size": 0 # Optional. Size of the topic partition in + bytes. + } + ], + "replication_factor": 0, # Optional. The number of nodes to + replicate data across the cluster. + "state": "str" # Optional. The state of the Kafka topic. Known + values are: "active", "configuring", "deleting", and "unknown". + } + } # response body for status code(s): 404 response == { "id": "str", # A short identifier corresponding to the HTTP status code @@ -115340,37 +122650,160 @@ async def update_connection_pool( """ @overload - async def update_connection_pool( + async def create_kafka_topic( self, database_cluster_uuid: str, - pool_name: str, - body: IO[bytes], + body: Optional[IO[bytes]] = None, *, content_type: str = "application/json", **kwargs: Any - ) -> Optional[JSON]: + ) -> JSON: # pylint: disable=line-too-long - """Update Connection Pools (PostgreSQL). + """Create Topic for a Kafka Cluster. - To update a connection pool for a PostgreSQL database cluster, send a PUT request to - ``/v2/databases/$DATABASE_ID/pools/$POOL_NAME``. + To create a topic attached to a Kafka cluster, send a POST request to + ``/v2/databases/$DATABASE_ID/topics``. + + The result will be a JSON object with a ``topic`` key. :param database_cluster_uuid: A unique identifier for a database cluster. Required. :type database_cluster_uuid: str - :param pool_name: The name used to identify the connection pool. Required. - :type pool_name: str - :param body: Required. + :param body: Default value is None. :type body: IO[bytes] :keyword content_type: Body Parameter content-type. Content type parameter for binary body. Default value is "application/json". :paramtype content_type: str - :return: JSON object or None - :rtype: JSON or None + :return: JSON object + :rtype: JSON :raises ~azure.core.exceptions.HttpResponseError: Example: .. code-block:: python + # response body for status code(s): 201 + response == { + "topic": { + "config": { + "cleanup_policy": "delete", # Optional. Default value is + "delete". The cleanup_policy sets the retention policy to use on log + segments. 'delete' will discard old segments when retention time/size + limits are reached. 'compact' will enable log compaction, resulting in + retention of the latest value for each key. Known values are: "delete", + "compact", and "compact_delete". + "compression_type": "producer", # Optional. Default value is + "producer". The compression_type specifies the compression type of the + topic. Known values are: "producer", "gzip", "snappy", "Iz4", "zstd", and + "uncompressed". + "delete_retention_ms": 86400000, # Optional. Default value + is 86400000. The delete_retention_ms specifies how long (in ms) to retain + delete tombstone markers for topics. + "file_delete_delay_ms": 60000, # Optional. Default value is + 60000. The file_delete_delay_ms specifies the time (in ms) to wait before + deleting a file from the filesystem. + "flush_messages": 9223372036854776000, # Optional. Default + value is 9223372036854776000. The flush_messages specifies the number of + messages to accumulate on a log partition before messages are flushed to + disk. + "flush_ms": 9223372036854776000, # Optional. Default value + is 9223372036854776000. The flush_ms specifies the maximum time (in ms) + that a message is kept in memory before being flushed to disk. + "index_interval_bytes": 4096, # Optional. Default value is + 4096. The index_interval_bytes specifies the number of bytes between + entries being added into te offset index. + "max_compaction_lag_ms": 9223372036854776000, # Optional. + Default value is 9223372036854776000. The max_compaction_lag_ms specifies + the maximum amount of time (in ms) that a message will remain + uncompacted. This is only applicable if the logs are have compaction + enabled. + "max_message_bytes": 1048588, # Optional. Default value is + 1048588. The max_messages_bytes specifies the largest record batch size + (in bytes) that can be sent to the server. This is calculated after + compression if compression is enabled. + "message_down_conversion_enable": True, # Optional. Default + value is True. The message_down_conversion_enable specifies whether + down-conversion of message formats is enabled to satisfy consumer + requests. When 'false', the broker will not perform conversion for + consumers expecting older message formats. The broker will respond with + an ``UNSUPPORTED_VERSION`` error for consume requests from these older + clients. + "message_format_version": "3.0-IV1", # Optional. Default + value is "3.0-IV1". The message_format_version specifies the message + format version used by the broker to append messages to the logs. The + value of this setting is assumed to be 3.0-IV1 if the broker protocol + version is 3.0 or higher. By setting a particular message format + version, all existing messages on disk must be smaller or equal to the + specified version. Known values are: "0.8.0", "0.8.1", "0.8.2", "0.9.0", + "0.10.0-IV0", "0.10.0-IV1", "0.10.1-IV0", "0.10.1-IV1", "0.10.1-IV2", + "0.10.2-IV0", "0.11.0-IV0", "0.11.0-IV1", "0.11.0-IV2", "1.0-IV0", + "1.1-IV0", "2.0-IV0", "2.0-IV1", "2.1-IV0", "2.1-IV1", "2.1-IV2", + "2.2-IV0", "2.2-IV1", "2.3-IV0", "2.3-IV1", "2.4-IV0", "2.4-IV1", + "2.5-IV0", "2.6-IV0", "2.7-IV0", "2.7-IV1", "2.7-IV2", "2.8-IV0", + "2.8-IV1", "3.0-IV0", "3.0-IV1", "3.1-IV0", "3.2-IV0", "3.3-IV0", + "3.3-IV1", "3.3-IV2", and "3.3-IV3". + "message_timestamp_type": "create_time", # Optional. Default + value is "create_time". The message_timestamp_type specifies whether to + use the message create time or log append time as the timestamp on a + message. Known values are: "create_time" and "log_append_time". + "min_cleanable_dirty_ratio": 0.5, # Optional. Default value + is 0.5. The min_cleanable_dirty_ratio specifies the frequency of log + compaction (if enabled) in relation to duplicates present in the logs. + For example, at 0.5, at most 50% of the log could be duplicates before + compaction would begin. + "min_compaction_lag_ms": 0, # Optional. Default value is 0. + The min_compaction_lag_ms specifies the minimum time (in ms) that a + message will remain uncompacted in the log. Only relevant if log + compaction is enabled. + "min_insync_replicas": 1, # Optional. Default value is 1. + The min_insync_replicas specifies the number of replicas that must ACK a + write for the write to be considered successful. + "preallocate": False, # Optional. Default value is False. + The preallocate specifies whether a file should be preallocated on disk + when creating a new log segment. + "retention_bytes": -1, # Optional. Default value is -1. The + retention_bytes specifies the maximum size of the log (in bytes) before + deleting messages. -1 indicates that there is no limit. + "retention_ms": 604800000, # Optional. Default value is + 604800000. The retention_ms specifies the maximum amount of time (in ms) + to keep a message before deleting it. + "segment_bytes": 209715200, # Optional. Default value is + 209715200. The segment_bytes specifies the maximum size of a single log + file (in bytes). + "segment_jitter_ms": 0, # Optional. Default value is 0. The + segment_jitter_ms specifies the maximum random jitter subtracted from the + scheduled segment roll time to avoid thundering herds of segment rolling. + "segment_ms": 604800000 # Optional. Default value is + 604800000. The segment_ms specifies the period of time after which the + log will be forced to roll if the segment file isn't full. This ensures + that retention can delete or compact old data. + }, + "name": "str", # Optional. The name of the Kafka topic. + "partitions": [ + { + "consumer_groups": [ + { + "group_name": "str", # Optional. + Name of the consumer group. + "offset": 0 # Optional. The current + offset of the consumer group. + } + ], + "earliest_offset": 0, # Optional. The earliest + consumer offset amongst consumer groups. + "id": 0, # Optional. An identifier for the + partition. + "in_sync_replicas": 0, # Optional. The number of + nodes that are in-sync (have the latest data) for the given + partition. + "size": 0 # Optional. Size of the topic partition in + bytes. + } + ], + "replication_factor": 0, # Optional. The number of nodes to + replicate data across the cluster. + "state": "str" # Optional. The state of the Kafka topic. Known + values are: "active", "configuring", "deleting", and "unknown". + } + } # response body for status code(s): 404 response == { "id": "str", # A short identifier corresponding to the HTTP status code @@ -115385,27 +122818,26 @@ async def update_connection_pool( """ @distributed_trace_async - async def update_connection_pool( + async def create_kafka_topic( self, database_cluster_uuid: str, - pool_name: str, - body: Union[JSON, IO[bytes]], + body: Optional[Union[JSON, IO[bytes]]] = None, **kwargs: Any - ) -> Optional[JSON]: + ) -> JSON: # pylint: disable=line-too-long - """Update Connection Pools (PostgreSQL). + """Create Topic for a Kafka Cluster. - To update a connection pool for a PostgreSQL database cluster, send a PUT request to - ``/v2/databases/$DATABASE_ID/pools/$POOL_NAME``. + To create a topic attached to a Kafka cluster, send a POST request to + ``/v2/databases/$DATABASE_ID/topics``. + + The result will be a JSON object with a ``topic`` key. :param database_cluster_uuid: A unique identifier for a database cluster. Required. :type database_cluster_uuid: str - :param pool_name: The name used to identify the connection pool. Required. - :type pool_name: str - :param body: Is either a JSON type or a IO[bytes] type. Required. + :param body: Is either a JSON type or a IO[bytes] type. Default value is None. :type body: JSON or IO[bytes] - :return: JSON object or None - :rtype: JSON or None + :return: JSON object + :rtype: JSON :raises ~azure.core.exceptions.HttpResponseError: Example: @@ -115413,19 +122845,223 @@ async def update_connection_pool( # JSON input template you can fill out and use as your body input. body = { - "db": "str", # The database for use with the connection pool. Required. - "mode": "str", # The PGBouncer transaction mode for the connection pool. The - allowed values are session, transaction, and statement. Required. - "size": 0, # The desired size of the PGBouncer connection pool. The maximum - allowed size is determined by the size of the cluster's primary node. 25 backend - server connections are allowed for every 1GB of RAM. Three are reserved for - maintenance. For example, a primary node with 1 GB of RAM allows for a maximum of - 22 backend server connections while one with 4 GB would allow for 97. Note that - these are shared across all connection pools in a cluster. Required. - "user": "str" # Optional. The name of the user for use with the connection - pool. When excluded, all sessions connect to the database as the inbound user. + "config": { + "cleanup_policy": "delete", # Optional. Default value is "delete". + The cleanup_policy sets the retention policy to use on log segments. 'delete' + will discard old segments when retention time/size limits are reached. + 'compact' will enable log compaction, resulting in retention of the latest + value for each key. Known values are: "delete", "compact", and + "compact_delete". + "compression_type": "producer", # Optional. Default value is + "producer". The compression_type specifies the compression type of the topic. + Known values are: "producer", "gzip", "snappy", "Iz4", "zstd", and + "uncompressed". + "delete_retention_ms": 86400000, # Optional. Default value is + 86400000. The delete_retention_ms specifies how long (in ms) to retain delete + tombstone markers for topics. + "file_delete_delay_ms": 60000, # Optional. Default value is 60000. + The file_delete_delay_ms specifies the time (in ms) to wait before deleting a + file from the filesystem. + "flush_messages": 9223372036854776000, # Optional. Default value is + 9223372036854776000. The flush_messages specifies the number of messages to + accumulate on a log partition before messages are flushed to disk. + "flush_ms": 9223372036854776000, # Optional. Default value is + 9223372036854776000. The flush_ms specifies the maximum time (in ms) that a + message is kept in memory before being flushed to disk. + "index_interval_bytes": 4096, # Optional. Default value is 4096. The + index_interval_bytes specifies the number of bytes between entries being + added into te offset index. + "max_compaction_lag_ms": 9223372036854776000, # Optional. Default + value is 9223372036854776000. The max_compaction_lag_ms specifies the maximum + amount of time (in ms) that a message will remain uncompacted. This is only + applicable if the logs are have compaction enabled. + "max_message_bytes": 1048588, # Optional. Default value is 1048588. + The max_messages_bytes specifies the largest record batch size (in bytes) + that can be sent to the server. This is calculated after compression if + compression is enabled. + "message_down_conversion_enable": True, # Optional. Default value is + True. The message_down_conversion_enable specifies whether down-conversion of + message formats is enabled to satisfy consumer requests. When 'false', the + broker will not perform conversion for consumers expecting older message + formats. The broker will respond with an ``UNSUPPORTED_VERSION`` error for + consume requests from these older clients. + "message_format_version": "3.0-IV1", # Optional. Default value is + "3.0-IV1". The message_format_version specifies the message format version + used by the broker to append messages to the logs. The value of this setting + is assumed to be 3.0-IV1 if the broker protocol version is 3.0 or higher. By + setting a particular message format version, all existing messages on disk + must be smaller or equal to the specified version. Known values are: "0.8.0", + "0.8.1", "0.8.2", "0.9.0", "0.10.0-IV0", "0.10.0-IV1", "0.10.1-IV0", + "0.10.1-IV1", "0.10.1-IV2", "0.10.2-IV0", "0.11.0-IV0", "0.11.0-IV1", + "0.11.0-IV2", "1.0-IV0", "1.1-IV0", "2.0-IV0", "2.0-IV1", "2.1-IV0", + "2.1-IV1", "2.1-IV2", "2.2-IV0", "2.2-IV1", "2.3-IV0", "2.3-IV1", "2.4-IV0", + "2.4-IV1", "2.5-IV0", "2.6-IV0", "2.7-IV0", "2.7-IV1", "2.7-IV2", "2.8-IV0", + "2.8-IV1", "3.0-IV0", "3.0-IV1", "3.1-IV0", "3.2-IV0", "3.3-IV0", "3.3-IV1", + "3.3-IV2", and "3.3-IV3". + "message_timestamp_type": "create_time", # Optional. Default value + is "create_time". The message_timestamp_type specifies whether to use the + message create time or log append time as the timestamp on a message. Known + values are: "create_time" and "log_append_time". + "min_cleanable_dirty_ratio": 0.5, # Optional. Default value is 0.5. + The min_cleanable_dirty_ratio specifies the frequency of log compaction (if + enabled) in relation to duplicates present in the logs. For example, at 0.5, + at most 50% of the log could be duplicates before compaction would begin. + "min_compaction_lag_ms": 0, # Optional. Default value is 0. The + min_compaction_lag_ms specifies the minimum time (in ms) that a message will + remain uncompacted in the log. Only relevant if log compaction is enabled. + "min_insync_replicas": 1, # Optional. Default value is 1. The + min_insync_replicas specifies the number of replicas that must ACK a write + for the write to be considered successful. + "preallocate": False, # Optional. Default value is False. The + preallocate specifies whether a file should be preallocated on disk when + creating a new log segment. + "retention_bytes": -1, # Optional. Default value is -1. The + retention_bytes specifies the maximum size of the log (in bytes) before + deleting messages. -1 indicates that there is no limit. + "retention_ms": 604800000, # Optional. Default value is 604800000. + The retention_ms specifies the maximum amount of time (in ms) to keep a + message before deleting it. + "segment_bytes": 209715200, # Optional. Default value is 209715200. + The segment_bytes specifies the maximum size of a single log file (in bytes). + "segment_jitter_ms": 0, # Optional. Default value is 0. The + segment_jitter_ms specifies the maximum random jitter subtracted from the + scheduled segment roll time to avoid thundering herds of segment rolling. + "segment_ms": 604800000 # Optional. Default value is 604800000. The + segment_ms specifies the period of time after which the log will be forced to + roll if the segment file isn't full. This ensures that retention can delete + or compact old data. + }, + "name": "str", # Optional. The name of the Kafka topic. + "partition_count": 0, # Optional. The number of partitions available for the + topic. On update, this value can only be increased. + "replication_factor": 0 # Optional. The number of nodes to replicate data + across the cluster. } + # response body for status code(s): 201 + response == { + "topic": { + "config": { + "cleanup_policy": "delete", # Optional. Default value is + "delete". The cleanup_policy sets the retention policy to use on log + segments. 'delete' will discard old segments when retention time/size + limits are reached. 'compact' will enable log compaction, resulting in + retention of the latest value for each key. Known values are: "delete", + "compact", and "compact_delete". + "compression_type": "producer", # Optional. Default value is + "producer". The compression_type specifies the compression type of the + topic. Known values are: "producer", "gzip", "snappy", "Iz4", "zstd", and + "uncompressed". + "delete_retention_ms": 86400000, # Optional. Default value + is 86400000. The delete_retention_ms specifies how long (in ms) to retain + delete tombstone markers for topics. + "file_delete_delay_ms": 60000, # Optional. Default value is + 60000. The file_delete_delay_ms specifies the time (in ms) to wait before + deleting a file from the filesystem. + "flush_messages": 9223372036854776000, # Optional. Default + value is 9223372036854776000. The flush_messages specifies the number of + messages to accumulate on a log partition before messages are flushed to + disk. + "flush_ms": 9223372036854776000, # Optional. Default value + is 9223372036854776000. The flush_ms specifies the maximum time (in ms) + that a message is kept in memory before being flushed to disk. + "index_interval_bytes": 4096, # Optional. Default value is + 4096. The index_interval_bytes specifies the number of bytes between + entries being added into te offset index. + "max_compaction_lag_ms": 9223372036854776000, # Optional. + Default value is 9223372036854776000. The max_compaction_lag_ms specifies + the maximum amount of time (in ms) that a message will remain + uncompacted. This is only applicable if the logs are have compaction + enabled. + "max_message_bytes": 1048588, # Optional. Default value is + 1048588. The max_messages_bytes specifies the largest record batch size + (in bytes) that can be sent to the server. This is calculated after + compression if compression is enabled. + "message_down_conversion_enable": True, # Optional. Default + value is True. The message_down_conversion_enable specifies whether + down-conversion of message formats is enabled to satisfy consumer + requests. When 'false', the broker will not perform conversion for + consumers expecting older message formats. The broker will respond with + an ``UNSUPPORTED_VERSION`` error for consume requests from these older + clients. + "message_format_version": "3.0-IV1", # Optional. Default + value is "3.0-IV1". The message_format_version specifies the message + format version used by the broker to append messages to the logs. The + value of this setting is assumed to be 3.0-IV1 if the broker protocol + version is 3.0 or higher. By setting a particular message format + version, all existing messages on disk must be smaller or equal to the + specified version. Known values are: "0.8.0", "0.8.1", "0.8.2", "0.9.0", + "0.10.0-IV0", "0.10.0-IV1", "0.10.1-IV0", "0.10.1-IV1", "0.10.1-IV2", + "0.10.2-IV0", "0.11.0-IV0", "0.11.0-IV1", "0.11.0-IV2", "1.0-IV0", + "1.1-IV0", "2.0-IV0", "2.0-IV1", "2.1-IV0", "2.1-IV1", "2.1-IV2", + "2.2-IV0", "2.2-IV1", "2.3-IV0", "2.3-IV1", "2.4-IV0", "2.4-IV1", + "2.5-IV0", "2.6-IV0", "2.7-IV0", "2.7-IV1", "2.7-IV2", "2.8-IV0", + "2.8-IV1", "3.0-IV0", "3.0-IV1", "3.1-IV0", "3.2-IV0", "3.3-IV0", + "3.3-IV1", "3.3-IV2", and "3.3-IV3". + "message_timestamp_type": "create_time", # Optional. Default + value is "create_time". The message_timestamp_type specifies whether to + use the message create time or log append time as the timestamp on a + message. Known values are: "create_time" and "log_append_time". + "min_cleanable_dirty_ratio": 0.5, # Optional. Default value + is 0.5. The min_cleanable_dirty_ratio specifies the frequency of log + compaction (if enabled) in relation to duplicates present in the logs. + For example, at 0.5, at most 50% of the log could be duplicates before + compaction would begin. + "min_compaction_lag_ms": 0, # Optional. Default value is 0. + The min_compaction_lag_ms specifies the minimum time (in ms) that a + message will remain uncompacted in the log. Only relevant if log + compaction is enabled. + "min_insync_replicas": 1, # Optional. Default value is 1. + The min_insync_replicas specifies the number of replicas that must ACK a + write for the write to be considered successful. + "preallocate": False, # Optional. Default value is False. + The preallocate specifies whether a file should be preallocated on disk + when creating a new log segment. + "retention_bytes": -1, # Optional. Default value is -1. The + retention_bytes specifies the maximum size of the log (in bytes) before + deleting messages. -1 indicates that there is no limit. + "retention_ms": 604800000, # Optional. Default value is + 604800000. The retention_ms specifies the maximum amount of time (in ms) + to keep a message before deleting it. + "segment_bytes": 209715200, # Optional. Default value is + 209715200. The segment_bytes specifies the maximum size of a single log + file (in bytes). + "segment_jitter_ms": 0, # Optional. Default value is 0. The + segment_jitter_ms specifies the maximum random jitter subtracted from the + scheduled segment roll time to avoid thundering herds of segment rolling. + "segment_ms": 604800000 # Optional. Default value is + 604800000. The segment_ms specifies the period of time after which the + log will be forced to roll if the segment file isn't full. This ensures + that retention can delete or compact old data. + }, + "name": "str", # Optional. The name of the Kafka topic. + "partitions": [ + { + "consumer_groups": [ + { + "group_name": "str", # Optional. + Name of the consumer group. + "offset": 0 # Optional. The current + offset of the consumer group. + } + ], + "earliest_offset": 0, # Optional. The earliest + consumer offset amongst consumer groups. + "id": 0, # Optional. An identifier for the + partition. + "in_sync_replicas": 0, # Optional. The number of + nodes that are in-sync (have the latest data) for the given + partition. + "size": 0 # Optional. Size of the topic partition in + bytes. + } + ], + "replication_factor": 0, # Optional. The number of nodes to + replicate data across the cluster. + "state": "str" # Optional. The state of the Kafka topic. Known + values are: "active", "configuring", "deleting", and "unknown". + } + } # response body for status code(s): 404 response == { "id": "str", # A short identifier corresponding to the HTTP status code @@ -115457,7 +123093,7 @@ async def update_connection_pool( content_type: Optional[str] = kwargs.pop( "content_type", _headers.pop("Content-Type", None) ) - cls: ClsType[Optional[JSON]] = kwargs.pop("cls", None) + cls: ClsType[JSON] = kwargs.pop("cls", None) content_type = content_type or "application/json" _json = None @@ -115465,125 +123101,16 @@ async def update_connection_pool( if isinstance(body, (IOBase, bytes)): _content = body else: - _json = body + if body is not None: + _json = body + else: + _json = None - _request = build_databases_update_connection_pool_request( + _request = build_databases_create_kafka_topic_request( database_cluster_uuid=database_cluster_uuid, - pool_name=pool_name, content_type=content_type, json=_json, - content=_content, - headers=_headers, - params=_params, - ) - _request.url = self._client.format_url(_request.url) - - _stream = False - pipeline_response: PipelineResponse = ( - await self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - ) - - response = pipeline_response.http_response - - if response.status_code not in [204, 404]: - if _stream: - await response.read() # Load the body in memory and close the socket - map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore - raise HttpResponseError(response=response) - - deserialized = None - response_headers = {} - if response.status_code == 204: - response_headers["ratelimit-limit"] = self._deserialize( - "int", response.headers.get("ratelimit-limit") - ) - response_headers["ratelimit-remaining"] = self._deserialize( - "int", response.headers.get("ratelimit-remaining") - ) - response_headers["ratelimit-reset"] = self._deserialize( - "int", response.headers.get("ratelimit-reset") - ) - - if response.status_code == 404: - response_headers["ratelimit-limit"] = self._deserialize( - "int", response.headers.get("ratelimit-limit") - ) - response_headers["ratelimit-remaining"] = self._deserialize( - "int", response.headers.get("ratelimit-remaining") - ) - response_headers["ratelimit-reset"] = self._deserialize( - "int", response.headers.get("ratelimit-reset") - ) - - if response.content: - deserialized = response.json() - else: - deserialized = None - - if cls: - return cls(pipeline_response, deserialized, response_headers) # type: ignore - - return deserialized # type: ignore - - @distributed_trace_async - async def delete_connection_pool( - self, database_cluster_uuid: str, pool_name: str, **kwargs: Any - ) -> Optional[JSON]: - # pylint: disable=line-too-long - """Delete a Connection Pool (PostgreSQL). - - To delete a specific connection pool for a PostgreSQL database cluster, send - a DELETE request to ``/v2/databases/$DATABASE_ID/pools/$POOL_NAME``. - - A status of 204 will be given. This indicates that the request was processed - successfully, but that no response body is needed. - - :param database_cluster_uuid: A unique identifier for a database cluster. Required. - :type database_cluster_uuid: str - :param pool_name: The name used to identify the connection pool. Required. - :type pool_name: str - :return: JSON object or None - :rtype: JSON or None - :raises ~azure.core.exceptions.HttpResponseError: - - Example: - .. code-block:: python - - # response body for status code(s): 404 - response == { - "id": "str", # A short identifier corresponding to the HTTP status code - returned. For example, the ID for a response returning a 404 status code would - be "not_found.". Required. - "message": "str", # A message providing additional information about the - error, including details to help resolve it when possible. Required. - "request_id": "str" # Optional. Optionally, some endpoints may include a - request ID that should be provided when reporting bugs or opening support - tickets to help identify the issue. - } - """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - 401: cast( - Type[HttpResponseError], - lambda response: ClientAuthenticationError(response=response), - ), - 429: HttpResponseError, - 500: HttpResponseError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = kwargs.pop("headers", {}) or {} - _params = kwargs.pop("params", {}) or {} - - cls: ClsType[Optional[JSON]] = kwargs.pop("cls", None) - - _request = build_databases_delete_connection_pool_request( - database_cluster_uuid=database_cluster_uuid, - pool_name=pool_name, + content=_content, headers=_headers, params=_params, ) @@ -115598,15 +123125,14 @@ async def delete_connection_pool( response = pipeline_response.http_response - if response.status_code not in [204, 404]: + if response.status_code not in [201, 404]: if _stream: await response.read() # Load the body in memory and close the socket map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore raise HttpResponseError(response=response) - deserialized = None response_headers = {} - if response.status_code == 204: + if response.status_code == 201: response_headers["ratelimit-limit"] = self._deserialize( "int", response.headers.get("ratelimit-limit") ) @@ -115617,6 +123143,11 @@ async def delete_connection_pool( "int", response.headers.get("ratelimit-reset") ) + if response.content: + deserialized = response.json() + else: + deserialized = None + if response.status_code == 404: response_headers["ratelimit-limit"] = self._deserialize( "int", response.headers.get("ratelimit-limit") @@ -115634,24 +123165,26 @@ async def delete_connection_pool( deserialized = None if cls: - return cls(pipeline_response, deserialized, response_headers) # type: ignore + return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore - return deserialized # type: ignore + return cast(JSON, deserialized) # type: ignore @distributed_trace_async - async def get_eviction_policy( - self, database_cluster_uuid: str, **kwargs: Any + async def get_kafka_topic( + self, database_cluster_uuid: str, topic_name: str, **kwargs: Any ) -> JSON: # pylint: disable=line-too-long - """Retrieve the Eviction Policy for a Caching or Valkey Cluster. + """Get Topic for a Kafka Cluster. - To retrieve the configured eviction policy for an existing Caching or Valkey cluster, send a - GET request to ``/v2/databases/$DATABASE_ID/eviction_policy``. - The response will be a JSON object with an ``eviction_policy`` key. This will be set to a - string representing the eviction policy. + To retrieve a given topic by name from the set of a Kafka cluster's topics, + send a GET request to ``/v2/databases/$DATABASE_ID/topics/$TOPIC_NAME``. + + The result will be a JSON object with a ``topic`` key. :param database_cluster_uuid: A unique identifier for a database cluster. Required. :type database_cluster_uuid: str + :param topic_name: The name used to identify the Kafka topic. Required. + :type topic_name: str :return: JSON object :rtype: JSON :raises ~azure.core.exceptions.HttpResponseError: @@ -115661,16 +123194,127 @@ async def get_eviction_policy( # response body for status code(s): 200 response == { - "eviction_policy": "str" # A string specifying the desired eviction policy - for a Caching or Valkey cluster. * ``noeviction``"" : Don't evict any data, - returns error when memory limit is reached. * ``allkeys_lru:`` Evict any key, - least recently used (LRU) first. * ``allkeys_random``"" : Evict keys in a random - order. * ``volatile_lru``"" : Evict keys with expiration only, least recently - used (LRU) first. * ``volatile_random``"" : Evict keys with expiration only in a - random order. * ``volatile_ttl``"" : Evict keys with expiration only, shortest - time-to-live (TTL) first. Required. Known values are: "noeviction", - "allkeys_lru", "allkeys_random", "volatile_lru", "volatile_random", and - "volatile_ttl". + "topic": { + "config": { + "cleanup_policy": "delete", # Optional. Default value is + "delete". The cleanup_policy sets the retention policy to use on log + segments. 'delete' will discard old segments when retention time/size + limits are reached. 'compact' will enable log compaction, resulting in + retention of the latest value for each key. Known values are: "delete", + "compact", and "compact_delete". + "compression_type": "producer", # Optional. Default value is + "producer". The compression_type specifies the compression type of the + topic. Known values are: "producer", "gzip", "snappy", "Iz4", "zstd", and + "uncompressed". + "delete_retention_ms": 86400000, # Optional. Default value + is 86400000. The delete_retention_ms specifies how long (in ms) to retain + delete tombstone markers for topics. + "file_delete_delay_ms": 60000, # Optional. Default value is + 60000. The file_delete_delay_ms specifies the time (in ms) to wait before + deleting a file from the filesystem. + "flush_messages": 9223372036854776000, # Optional. Default + value is 9223372036854776000. The flush_messages specifies the number of + messages to accumulate on a log partition before messages are flushed to + disk. + "flush_ms": 9223372036854776000, # Optional. Default value + is 9223372036854776000. The flush_ms specifies the maximum time (in ms) + that a message is kept in memory before being flushed to disk. + "index_interval_bytes": 4096, # Optional. Default value is + 4096. The index_interval_bytes specifies the number of bytes between + entries being added into te offset index. + "max_compaction_lag_ms": 9223372036854776000, # Optional. + Default value is 9223372036854776000. The max_compaction_lag_ms specifies + the maximum amount of time (in ms) that a message will remain + uncompacted. This is only applicable if the logs are have compaction + enabled. + "max_message_bytes": 1048588, # Optional. Default value is + 1048588. The max_messages_bytes specifies the largest record batch size + (in bytes) that can be sent to the server. This is calculated after + compression if compression is enabled. + "message_down_conversion_enable": True, # Optional. Default + value is True. The message_down_conversion_enable specifies whether + down-conversion of message formats is enabled to satisfy consumer + requests. When 'false', the broker will not perform conversion for + consumers expecting older message formats. The broker will respond with + an ``UNSUPPORTED_VERSION`` error for consume requests from these older + clients. + "message_format_version": "3.0-IV1", # Optional. Default + value is "3.0-IV1". The message_format_version specifies the message + format version used by the broker to append messages to the logs. The + value of this setting is assumed to be 3.0-IV1 if the broker protocol + version is 3.0 or higher. By setting a particular message format + version, all existing messages on disk must be smaller or equal to the + specified version. Known values are: "0.8.0", "0.8.1", "0.8.2", "0.9.0", + "0.10.0-IV0", "0.10.0-IV1", "0.10.1-IV0", "0.10.1-IV1", "0.10.1-IV2", + "0.10.2-IV0", "0.11.0-IV0", "0.11.0-IV1", "0.11.0-IV2", "1.0-IV0", + "1.1-IV0", "2.0-IV0", "2.0-IV1", "2.1-IV0", "2.1-IV1", "2.1-IV2", + "2.2-IV0", "2.2-IV1", "2.3-IV0", "2.3-IV1", "2.4-IV0", "2.4-IV1", + "2.5-IV0", "2.6-IV0", "2.7-IV0", "2.7-IV1", "2.7-IV2", "2.8-IV0", + "2.8-IV1", "3.0-IV0", "3.0-IV1", "3.1-IV0", "3.2-IV0", "3.3-IV0", + "3.3-IV1", "3.3-IV2", and "3.3-IV3". + "message_timestamp_type": "create_time", # Optional. Default + value is "create_time". The message_timestamp_type specifies whether to + use the message create time or log append time as the timestamp on a + message. Known values are: "create_time" and "log_append_time". + "min_cleanable_dirty_ratio": 0.5, # Optional. Default value + is 0.5. The min_cleanable_dirty_ratio specifies the frequency of log + compaction (if enabled) in relation to duplicates present in the logs. + For example, at 0.5, at most 50% of the log could be duplicates before + compaction would begin. + "min_compaction_lag_ms": 0, # Optional. Default value is 0. + The min_compaction_lag_ms specifies the minimum time (in ms) that a + message will remain uncompacted in the log. Only relevant if log + compaction is enabled. + "min_insync_replicas": 1, # Optional. Default value is 1. + The min_insync_replicas specifies the number of replicas that must ACK a + write for the write to be considered successful. + "preallocate": False, # Optional. Default value is False. + The preallocate specifies whether a file should be preallocated on disk + when creating a new log segment. + "retention_bytes": -1, # Optional. Default value is -1. The + retention_bytes specifies the maximum size of the log (in bytes) before + deleting messages. -1 indicates that there is no limit. + "retention_ms": 604800000, # Optional. Default value is + 604800000. The retention_ms specifies the maximum amount of time (in ms) + to keep a message before deleting it. + "segment_bytes": 209715200, # Optional. Default value is + 209715200. The segment_bytes specifies the maximum size of a single log + file (in bytes). + "segment_jitter_ms": 0, # Optional. Default value is 0. The + segment_jitter_ms specifies the maximum random jitter subtracted from the + scheduled segment roll time to avoid thundering herds of segment rolling. + "segment_ms": 604800000 # Optional. Default value is + 604800000. The segment_ms specifies the period of time after which the + log will be forced to roll if the segment file isn't full. This ensures + that retention can delete or compact old data. + }, + "name": "str", # Optional. The name of the Kafka topic. + "partitions": [ + { + "consumer_groups": [ + { + "group_name": "str", # Optional. + Name of the consumer group. + "offset": 0 # Optional. The current + offset of the consumer group. + } + ], + "earliest_offset": 0, # Optional. The earliest + consumer offset amongst consumer groups. + "id": 0, # Optional. An identifier for the + partition. + "in_sync_replicas": 0, # Optional. The number of + nodes that are in-sync (have the latest data) for the given + partition. + "size": 0 # Optional. Size of the topic partition in + bytes. + } + ], + "replication_factor": 0, # Optional. The number of nodes to + replicate data across the cluster. + "state": "str" # Optional. The state of the Kafka topic. Known + values are: "active", "configuring", "deleting", and "unknown". + } } # response body for status code(s): 404 response == { @@ -115702,8 +123346,9 @@ async def get_eviction_policy( cls: ClsType[JSON] = kwargs.pop("cls", None) - _request = build_databases_get_eviction_policy_request( + _request = build_databases_get_kafka_topic_request( database_cluster_uuid=database_cluster_uuid, + topic_name=topic_name, headers=_headers, params=_params, ) @@ -115763,29 +123408,34 @@ async def get_eviction_policy( return cast(JSON, deserialized) # type: ignore @overload - async def update_eviction_policy( + async def update_kafka_topic( self, database_cluster_uuid: str, - body: JSON, + topic_name: str, + body: Optional[JSON] = None, *, content_type: str = "application/json", **kwargs: Any - ) -> Optional[JSON]: + ) -> JSON: # pylint: disable=line-too-long - """Configure the Eviction Policy for a Caching or Valkey Cluster. + """Update Topic for a Kafka Cluster. - To configure an eviction policy for an existing Caching or Valkey cluster, send a PUT request - to ``/v2/databases/$DATABASE_ID/eviction_policy`` specifying the desired policy. + To update a topic attached to a Kafka cluster, send a PUT request to + ``/v2/databases/$DATABASE_ID/topics/$TOPIC_NAME``. + + The result will be a JSON object with a ``topic`` key. :param database_cluster_uuid: A unique identifier for a database cluster. Required. :type database_cluster_uuid: str - :param body: Required. + :param topic_name: The name used to identify the Kafka topic. Required. + :type topic_name: str + :param body: Default value is None. :type body: JSON :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str - :return: JSON object or None - :rtype: JSON or None + :return: JSON object + :rtype: JSON :raises ~azure.core.exceptions.HttpResponseError: Example: @@ -115793,18 +123443,222 @@ async def update_eviction_policy( # JSON input template you can fill out and use as your body input. body = { - "eviction_policy": "str" # A string specifying the desired eviction policy - for a Caching or Valkey cluster. * ``noeviction``"" : Don't evict any data, - returns error when memory limit is reached. * ``allkeys_lru:`` Evict any key, - least recently used (LRU) first. * ``allkeys_random``"" : Evict keys in a random - order. * ``volatile_lru``"" : Evict keys with expiration only, least recently - used (LRU) first. * ``volatile_random``"" : Evict keys with expiration only in a - random order. * ``volatile_ttl``"" : Evict keys with expiration only, shortest - time-to-live (TTL) first. Required. Known values are: "noeviction", - "allkeys_lru", "allkeys_random", "volatile_lru", "volatile_random", and - "volatile_ttl". + "config": { + "cleanup_policy": "delete", # Optional. Default value is "delete". + The cleanup_policy sets the retention policy to use on log segments. 'delete' + will discard old segments when retention time/size limits are reached. + 'compact' will enable log compaction, resulting in retention of the latest + value for each key. Known values are: "delete", "compact", and + "compact_delete". + "compression_type": "producer", # Optional. Default value is + "producer". The compression_type specifies the compression type of the topic. + Known values are: "producer", "gzip", "snappy", "Iz4", "zstd", and + "uncompressed". + "delete_retention_ms": 86400000, # Optional. Default value is + 86400000. The delete_retention_ms specifies how long (in ms) to retain delete + tombstone markers for topics. + "file_delete_delay_ms": 60000, # Optional. Default value is 60000. + The file_delete_delay_ms specifies the time (in ms) to wait before deleting a + file from the filesystem. + "flush_messages": 9223372036854776000, # Optional. Default value is + 9223372036854776000. The flush_messages specifies the number of messages to + accumulate on a log partition before messages are flushed to disk. + "flush_ms": 9223372036854776000, # Optional. Default value is + 9223372036854776000. The flush_ms specifies the maximum time (in ms) that a + message is kept in memory before being flushed to disk. + "index_interval_bytes": 4096, # Optional. Default value is 4096. The + index_interval_bytes specifies the number of bytes between entries being + added into te offset index. + "max_compaction_lag_ms": 9223372036854776000, # Optional. Default + value is 9223372036854776000. The max_compaction_lag_ms specifies the maximum + amount of time (in ms) that a message will remain uncompacted. This is only + applicable if the logs are have compaction enabled. + "max_message_bytes": 1048588, # Optional. Default value is 1048588. + The max_messages_bytes specifies the largest record batch size (in bytes) + that can be sent to the server. This is calculated after compression if + compression is enabled. + "message_down_conversion_enable": True, # Optional. Default value is + True. The message_down_conversion_enable specifies whether down-conversion of + message formats is enabled to satisfy consumer requests. When 'false', the + broker will not perform conversion for consumers expecting older message + formats. The broker will respond with an ``UNSUPPORTED_VERSION`` error for + consume requests from these older clients. + "message_format_version": "3.0-IV1", # Optional. Default value is + "3.0-IV1". The message_format_version specifies the message format version + used by the broker to append messages to the logs. The value of this setting + is assumed to be 3.0-IV1 if the broker protocol version is 3.0 or higher. By + setting a particular message format version, all existing messages on disk + must be smaller or equal to the specified version. Known values are: "0.8.0", + "0.8.1", "0.8.2", "0.9.0", "0.10.0-IV0", "0.10.0-IV1", "0.10.1-IV0", + "0.10.1-IV1", "0.10.1-IV2", "0.10.2-IV0", "0.11.0-IV0", "0.11.0-IV1", + "0.11.0-IV2", "1.0-IV0", "1.1-IV0", "2.0-IV0", "2.0-IV1", "2.1-IV0", + "2.1-IV1", "2.1-IV2", "2.2-IV0", "2.2-IV1", "2.3-IV0", "2.3-IV1", "2.4-IV0", + "2.4-IV1", "2.5-IV0", "2.6-IV0", "2.7-IV0", "2.7-IV1", "2.7-IV2", "2.8-IV0", + "2.8-IV1", "3.0-IV0", "3.0-IV1", "3.1-IV0", "3.2-IV0", "3.3-IV0", "3.3-IV1", + "3.3-IV2", and "3.3-IV3". + "message_timestamp_type": "create_time", # Optional. Default value + is "create_time". The message_timestamp_type specifies whether to use the + message create time or log append time as the timestamp on a message. Known + values are: "create_time" and "log_append_time". + "min_cleanable_dirty_ratio": 0.5, # Optional. Default value is 0.5. + The min_cleanable_dirty_ratio specifies the frequency of log compaction (if + enabled) in relation to duplicates present in the logs. For example, at 0.5, + at most 50% of the log could be duplicates before compaction would begin. + "min_compaction_lag_ms": 0, # Optional. Default value is 0. The + min_compaction_lag_ms specifies the minimum time (in ms) that a message will + remain uncompacted in the log. Only relevant if log compaction is enabled. + "min_insync_replicas": 1, # Optional. Default value is 1. The + min_insync_replicas specifies the number of replicas that must ACK a write + for the write to be considered successful. + "preallocate": False, # Optional. Default value is False. The + preallocate specifies whether a file should be preallocated on disk when + creating a new log segment. + "retention_bytes": -1, # Optional. Default value is -1. The + retention_bytes specifies the maximum size of the log (in bytes) before + deleting messages. -1 indicates that there is no limit. + "retention_ms": 604800000, # Optional. Default value is 604800000. + The retention_ms specifies the maximum amount of time (in ms) to keep a + message before deleting it. + "segment_bytes": 209715200, # Optional. Default value is 209715200. + The segment_bytes specifies the maximum size of a single log file (in bytes). + "segment_jitter_ms": 0, # Optional. Default value is 0. The + segment_jitter_ms specifies the maximum random jitter subtracted from the + scheduled segment roll time to avoid thundering herds of segment rolling. + "segment_ms": 604800000 # Optional. Default value is 604800000. The + segment_ms specifies the period of time after which the log will be forced to + roll if the segment file isn't full. This ensures that retention can delete + or compact old data. + }, + "partition_count": 0, # Optional. The number of partitions available for the + topic. On update, this value can only be increased. + "replication_factor": 0 # Optional. The number of nodes to replicate data + across the cluster. } + # response body for status code(s): 200 + response == { + "topic": { + "config": { + "cleanup_policy": "delete", # Optional. Default value is + "delete". The cleanup_policy sets the retention policy to use on log + segments. 'delete' will discard old segments when retention time/size + limits are reached. 'compact' will enable log compaction, resulting in + retention of the latest value for each key. Known values are: "delete", + "compact", and "compact_delete". + "compression_type": "producer", # Optional. Default value is + "producer". The compression_type specifies the compression type of the + topic. Known values are: "producer", "gzip", "snappy", "Iz4", "zstd", and + "uncompressed". + "delete_retention_ms": 86400000, # Optional. Default value + is 86400000. The delete_retention_ms specifies how long (in ms) to retain + delete tombstone markers for topics. + "file_delete_delay_ms": 60000, # Optional. Default value is + 60000. The file_delete_delay_ms specifies the time (in ms) to wait before + deleting a file from the filesystem. + "flush_messages": 9223372036854776000, # Optional. Default + value is 9223372036854776000. The flush_messages specifies the number of + messages to accumulate on a log partition before messages are flushed to + disk. + "flush_ms": 9223372036854776000, # Optional. Default value + is 9223372036854776000. The flush_ms specifies the maximum time (in ms) + that a message is kept in memory before being flushed to disk. + "index_interval_bytes": 4096, # Optional. Default value is + 4096. The index_interval_bytes specifies the number of bytes between + entries being added into te offset index. + "max_compaction_lag_ms": 9223372036854776000, # Optional. + Default value is 9223372036854776000. The max_compaction_lag_ms specifies + the maximum amount of time (in ms) that a message will remain + uncompacted. This is only applicable if the logs are have compaction + enabled. + "max_message_bytes": 1048588, # Optional. Default value is + 1048588. The max_messages_bytes specifies the largest record batch size + (in bytes) that can be sent to the server. This is calculated after + compression if compression is enabled. + "message_down_conversion_enable": True, # Optional. Default + value is True. The message_down_conversion_enable specifies whether + down-conversion of message formats is enabled to satisfy consumer + requests. When 'false', the broker will not perform conversion for + consumers expecting older message formats. The broker will respond with + an ``UNSUPPORTED_VERSION`` error for consume requests from these older + clients. + "message_format_version": "3.0-IV1", # Optional. Default + value is "3.0-IV1". The message_format_version specifies the message + format version used by the broker to append messages to the logs. The + value of this setting is assumed to be 3.0-IV1 if the broker protocol + version is 3.0 or higher. By setting a particular message format + version, all existing messages on disk must be smaller or equal to the + specified version. Known values are: "0.8.0", "0.8.1", "0.8.2", "0.9.0", + "0.10.0-IV0", "0.10.0-IV1", "0.10.1-IV0", "0.10.1-IV1", "0.10.1-IV2", + "0.10.2-IV0", "0.11.0-IV0", "0.11.0-IV1", "0.11.0-IV2", "1.0-IV0", + "1.1-IV0", "2.0-IV0", "2.0-IV1", "2.1-IV0", "2.1-IV1", "2.1-IV2", + "2.2-IV0", "2.2-IV1", "2.3-IV0", "2.3-IV1", "2.4-IV0", "2.4-IV1", + "2.5-IV0", "2.6-IV0", "2.7-IV0", "2.7-IV1", "2.7-IV2", "2.8-IV0", + "2.8-IV1", "3.0-IV0", "3.0-IV1", "3.1-IV0", "3.2-IV0", "3.3-IV0", + "3.3-IV1", "3.3-IV2", and "3.3-IV3". + "message_timestamp_type": "create_time", # Optional. Default + value is "create_time". The message_timestamp_type specifies whether to + use the message create time or log append time as the timestamp on a + message. Known values are: "create_time" and "log_append_time". + "min_cleanable_dirty_ratio": 0.5, # Optional. Default value + is 0.5. The min_cleanable_dirty_ratio specifies the frequency of log + compaction (if enabled) in relation to duplicates present in the logs. + For example, at 0.5, at most 50% of the log could be duplicates before + compaction would begin. + "min_compaction_lag_ms": 0, # Optional. Default value is 0. + The min_compaction_lag_ms specifies the minimum time (in ms) that a + message will remain uncompacted in the log. Only relevant if log + compaction is enabled. + "min_insync_replicas": 1, # Optional. Default value is 1. + The min_insync_replicas specifies the number of replicas that must ACK a + write for the write to be considered successful. + "preallocate": False, # Optional. Default value is False. + The preallocate specifies whether a file should be preallocated on disk + when creating a new log segment. + "retention_bytes": -1, # Optional. Default value is -1. The + retention_bytes specifies the maximum size of the log (in bytes) before + deleting messages. -1 indicates that there is no limit. + "retention_ms": 604800000, # Optional. Default value is + 604800000. The retention_ms specifies the maximum amount of time (in ms) + to keep a message before deleting it. + "segment_bytes": 209715200, # Optional. Default value is + 209715200. The segment_bytes specifies the maximum size of a single log + file (in bytes). + "segment_jitter_ms": 0, # Optional. Default value is 0. The + segment_jitter_ms specifies the maximum random jitter subtracted from the + scheduled segment roll time to avoid thundering herds of segment rolling. + "segment_ms": 604800000 # Optional. Default value is + 604800000. The segment_ms specifies the period of time after which the + log will be forced to roll if the segment file isn't full. This ensures + that retention can delete or compact old data. + }, + "name": "str", # Optional. The name of the Kafka topic. + "partitions": [ + { + "consumer_groups": [ + { + "group_name": "str", # Optional. + Name of the consumer group. + "offset": 0 # Optional. The current + offset of the consumer group. + } + ], + "earliest_offset": 0, # Optional. The earliest + consumer offset amongst consumer groups. + "id": 0, # Optional. An identifier for the + partition. + "in_sync_replicas": 0, # Optional. The number of + nodes that are in-sync (have the latest data) for the given + partition. + "size": 0 # Optional. Size of the topic partition in + bytes. + } + ], + "replication_factor": 0, # Optional. The number of nodes to + replicate data across the cluster. + "state": "str" # Optional. The state of the Kafka topic. Known + values are: "active", "configuring", "deleting", and "unknown". + } + } # response body for status code(s): 404 response == { "id": "str", # A short identifier corresponding to the HTTP status code @@ -115819,34 +123673,163 @@ async def update_eviction_policy( """ @overload - async def update_eviction_policy( + async def update_kafka_topic( self, database_cluster_uuid: str, - body: IO[bytes], + topic_name: str, + body: Optional[IO[bytes]] = None, *, content_type: str = "application/json", **kwargs: Any - ) -> Optional[JSON]: + ) -> JSON: # pylint: disable=line-too-long - """Configure the Eviction Policy for a Caching or Valkey Cluster. + """Update Topic for a Kafka Cluster. - To configure an eviction policy for an existing Caching or Valkey cluster, send a PUT request - to ``/v2/databases/$DATABASE_ID/eviction_policy`` specifying the desired policy. + To update a topic attached to a Kafka cluster, send a PUT request to + ``/v2/databases/$DATABASE_ID/topics/$TOPIC_NAME``. + + The result will be a JSON object with a ``topic`` key. :param database_cluster_uuid: A unique identifier for a database cluster. Required. :type database_cluster_uuid: str - :param body: Required. + :param topic_name: The name used to identify the Kafka topic. Required. + :type topic_name: str + :param body: Default value is None. :type body: IO[bytes] :keyword content_type: Body Parameter content-type. Content type parameter for binary body. Default value is "application/json". :paramtype content_type: str - :return: JSON object or None - :rtype: JSON or None + :return: JSON object + :rtype: JSON :raises ~azure.core.exceptions.HttpResponseError: Example: .. code-block:: python + # response body for status code(s): 200 + response == { + "topic": { + "config": { + "cleanup_policy": "delete", # Optional. Default value is + "delete". The cleanup_policy sets the retention policy to use on log + segments. 'delete' will discard old segments when retention time/size + limits are reached. 'compact' will enable log compaction, resulting in + retention of the latest value for each key. Known values are: "delete", + "compact", and "compact_delete". + "compression_type": "producer", # Optional. Default value is + "producer". The compression_type specifies the compression type of the + topic. Known values are: "producer", "gzip", "snappy", "Iz4", "zstd", and + "uncompressed". + "delete_retention_ms": 86400000, # Optional. Default value + is 86400000. The delete_retention_ms specifies how long (in ms) to retain + delete tombstone markers for topics. + "file_delete_delay_ms": 60000, # Optional. Default value is + 60000. The file_delete_delay_ms specifies the time (in ms) to wait before + deleting a file from the filesystem. + "flush_messages": 9223372036854776000, # Optional. Default + value is 9223372036854776000. The flush_messages specifies the number of + messages to accumulate on a log partition before messages are flushed to + disk. + "flush_ms": 9223372036854776000, # Optional. Default value + is 9223372036854776000. The flush_ms specifies the maximum time (in ms) + that a message is kept in memory before being flushed to disk. + "index_interval_bytes": 4096, # Optional. Default value is + 4096. The index_interval_bytes specifies the number of bytes between + entries being added into te offset index. + "max_compaction_lag_ms": 9223372036854776000, # Optional. + Default value is 9223372036854776000. The max_compaction_lag_ms specifies + the maximum amount of time (in ms) that a message will remain + uncompacted. This is only applicable if the logs are have compaction + enabled. + "max_message_bytes": 1048588, # Optional. Default value is + 1048588. The max_messages_bytes specifies the largest record batch size + (in bytes) that can be sent to the server. This is calculated after + compression if compression is enabled. + "message_down_conversion_enable": True, # Optional. Default + value is True. The message_down_conversion_enable specifies whether + down-conversion of message formats is enabled to satisfy consumer + requests. When 'false', the broker will not perform conversion for + consumers expecting older message formats. The broker will respond with + an ``UNSUPPORTED_VERSION`` error for consume requests from these older + clients. + "message_format_version": "3.0-IV1", # Optional. Default + value is "3.0-IV1". The message_format_version specifies the message + format version used by the broker to append messages to the logs. The + value of this setting is assumed to be 3.0-IV1 if the broker protocol + version is 3.0 or higher. By setting a particular message format + version, all existing messages on disk must be smaller or equal to the + specified version. Known values are: "0.8.0", "0.8.1", "0.8.2", "0.9.0", + "0.10.0-IV0", "0.10.0-IV1", "0.10.1-IV0", "0.10.1-IV1", "0.10.1-IV2", + "0.10.2-IV0", "0.11.0-IV0", "0.11.0-IV1", "0.11.0-IV2", "1.0-IV0", + "1.1-IV0", "2.0-IV0", "2.0-IV1", "2.1-IV0", "2.1-IV1", "2.1-IV2", + "2.2-IV0", "2.2-IV1", "2.3-IV0", "2.3-IV1", "2.4-IV0", "2.4-IV1", + "2.5-IV0", "2.6-IV0", "2.7-IV0", "2.7-IV1", "2.7-IV2", "2.8-IV0", + "2.8-IV1", "3.0-IV0", "3.0-IV1", "3.1-IV0", "3.2-IV0", "3.3-IV0", + "3.3-IV1", "3.3-IV2", and "3.3-IV3". + "message_timestamp_type": "create_time", # Optional. Default + value is "create_time". The message_timestamp_type specifies whether to + use the message create time or log append time as the timestamp on a + message. Known values are: "create_time" and "log_append_time". + "min_cleanable_dirty_ratio": 0.5, # Optional. Default value + is 0.5. The min_cleanable_dirty_ratio specifies the frequency of log + compaction (if enabled) in relation to duplicates present in the logs. + For example, at 0.5, at most 50% of the log could be duplicates before + compaction would begin. + "min_compaction_lag_ms": 0, # Optional. Default value is 0. + The min_compaction_lag_ms specifies the minimum time (in ms) that a + message will remain uncompacted in the log. Only relevant if log + compaction is enabled. + "min_insync_replicas": 1, # Optional. Default value is 1. + The min_insync_replicas specifies the number of replicas that must ACK a + write for the write to be considered successful. + "preallocate": False, # Optional. Default value is False. + The preallocate specifies whether a file should be preallocated on disk + when creating a new log segment. + "retention_bytes": -1, # Optional. Default value is -1. The + retention_bytes specifies the maximum size of the log (in bytes) before + deleting messages. -1 indicates that there is no limit. + "retention_ms": 604800000, # Optional. Default value is + 604800000. The retention_ms specifies the maximum amount of time (in ms) + to keep a message before deleting it. + "segment_bytes": 209715200, # Optional. Default value is + 209715200. The segment_bytes specifies the maximum size of a single log + file (in bytes). + "segment_jitter_ms": 0, # Optional. Default value is 0. The + segment_jitter_ms specifies the maximum random jitter subtracted from the + scheduled segment roll time to avoid thundering herds of segment rolling. + "segment_ms": 604800000 # Optional. Default value is + 604800000. The segment_ms specifies the period of time after which the + log will be forced to roll if the segment file isn't full. This ensures + that retention can delete or compact old data. + }, + "name": "str", # Optional. The name of the Kafka topic. + "partitions": [ + { + "consumer_groups": [ + { + "group_name": "str", # Optional. + Name of the consumer group. + "offset": 0 # Optional. The current + offset of the consumer group. + } + ], + "earliest_offset": 0, # Optional. The earliest + consumer offset amongst consumer groups. + "id": 0, # Optional. An identifier for the + partition. + "in_sync_replicas": 0, # Optional. The number of + nodes that are in-sync (have the latest data) for the given + partition. + "size": 0 # Optional. Size of the topic partition in + bytes. + } + ], + "replication_factor": 0, # Optional. The number of nodes to + replicate data across the cluster. + "state": "str" # Optional. The state of the Kafka topic. Known + values are: "active", "configuring", "deleting", and "unknown". + } + } # response body for status code(s): 404 response == { "id": "str", # A short identifier corresponding to the HTTP status code @@ -115861,21 +123844,29 @@ async def update_eviction_policy( """ @distributed_trace_async - async def update_eviction_policy( - self, database_cluster_uuid: str, body: Union[JSON, IO[bytes]], **kwargs: Any - ) -> Optional[JSON]: + async def update_kafka_topic( + self, + database_cluster_uuid: str, + topic_name: str, + body: Optional[Union[JSON, IO[bytes]]] = None, + **kwargs: Any + ) -> JSON: # pylint: disable=line-too-long - """Configure the Eviction Policy for a Caching or Valkey Cluster. + """Update Topic for a Kafka Cluster. - To configure an eviction policy for an existing Caching or Valkey cluster, send a PUT request - to ``/v2/databases/$DATABASE_ID/eviction_policy`` specifying the desired policy. + To update a topic attached to a Kafka cluster, send a PUT request to + ``/v2/databases/$DATABASE_ID/topics/$TOPIC_NAME``. + + The result will be a JSON object with a ``topic`` key. :param database_cluster_uuid: A unique identifier for a database cluster. Required. :type database_cluster_uuid: str - :param body: Is either a JSON type or a IO[bytes] type. Required. + :param topic_name: The name used to identify the Kafka topic. Required. + :type topic_name: str + :param body: Is either a JSON type or a IO[bytes] type. Default value is None. :type body: JSON or IO[bytes] - :return: JSON object or None - :rtype: JSON or None + :return: JSON object + :rtype: JSON :raises ~azure.core.exceptions.HttpResponseError: Example: @@ -115883,18 +123874,222 @@ async def update_eviction_policy( # JSON input template you can fill out and use as your body input. body = { - "eviction_policy": "str" # A string specifying the desired eviction policy - for a Caching or Valkey cluster. * ``noeviction``"" : Don't evict any data, - returns error when memory limit is reached. * ``allkeys_lru:`` Evict any key, - least recently used (LRU) first. * ``allkeys_random``"" : Evict keys in a random - order. * ``volatile_lru``"" : Evict keys with expiration only, least recently - used (LRU) first. * ``volatile_random``"" : Evict keys with expiration only in a - random order. * ``volatile_ttl``"" : Evict keys with expiration only, shortest - time-to-live (TTL) first. Required. Known values are: "noeviction", - "allkeys_lru", "allkeys_random", "volatile_lru", "volatile_random", and - "volatile_ttl". + "config": { + "cleanup_policy": "delete", # Optional. Default value is "delete". + The cleanup_policy sets the retention policy to use on log segments. 'delete' + will discard old segments when retention time/size limits are reached. + 'compact' will enable log compaction, resulting in retention of the latest + value for each key. Known values are: "delete", "compact", and + "compact_delete". + "compression_type": "producer", # Optional. Default value is + "producer". The compression_type specifies the compression type of the topic. + Known values are: "producer", "gzip", "snappy", "Iz4", "zstd", and + "uncompressed". + "delete_retention_ms": 86400000, # Optional. Default value is + 86400000. The delete_retention_ms specifies how long (in ms) to retain delete + tombstone markers for topics. + "file_delete_delay_ms": 60000, # Optional. Default value is 60000. + The file_delete_delay_ms specifies the time (in ms) to wait before deleting a + file from the filesystem. + "flush_messages": 9223372036854776000, # Optional. Default value is + 9223372036854776000. The flush_messages specifies the number of messages to + accumulate on a log partition before messages are flushed to disk. + "flush_ms": 9223372036854776000, # Optional. Default value is + 9223372036854776000. The flush_ms specifies the maximum time (in ms) that a + message is kept in memory before being flushed to disk. + "index_interval_bytes": 4096, # Optional. Default value is 4096. The + index_interval_bytes specifies the number of bytes between entries being + added into te offset index. + "max_compaction_lag_ms": 9223372036854776000, # Optional. Default + value is 9223372036854776000. The max_compaction_lag_ms specifies the maximum + amount of time (in ms) that a message will remain uncompacted. This is only + applicable if the logs are have compaction enabled. + "max_message_bytes": 1048588, # Optional. Default value is 1048588. + The max_messages_bytes specifies the largest record batch size (in bytes) + that can be sent to the server. This is calculated after compression if + compression is enabled. + "message_down_conversion_enable": True, # Optional. Default value is + True. The message_down_conversion_enable specifies whether down-conversion of + message formats is enabled to satisfy consumer requests. When 'false', the + broker will not perform conversion for consumers expecting older message + formats. The broker will respond with an ``UNSUPPORTED_VERSION`` error for + consume requests from these older clients. + "message_format_version": "3.0-IV1", # Optional. Default value is + "3.0-IV1". The message_format_version specifies the message format version + used by the broker to append messages to the logs. The value of this setting + is assumed to be 3.0-IV1 if the broker protocol version is 3.0 or higher. By + setting a particular message format version, all existing messages on disk + must be smaller or equal to the specified version. Known values are: "0.8.0", + "0.8.1", "0.8.2", "0.9.0", "0.10.0-IV0", "0.10.0-IV1", "0.10.1-IV0", + "0.10.1-IV1", "0.10.1-IV2", "0.10.2-IV0", "0.11.0-IV0", "0.11.0-IV1", + "0.11.0-IV2", "1.0-IV0", "1.1-IV0", "2.0-IV0", "2.0-IV1", "2.1-IV0", + "2.1-IV1", "2.1-IV2", "2.2-IV0", "2.2-IV1", "2.3-IV0", "2.3-IV1", "2.4-IV0", + "2.4-IV1", "2.5-IV0", "2.6-IV0", "2.7-IV0", "2.7-IV1", "2.7-IV2", "2.8-IV0", + "2.8-IV1", "3.0-IV0", "3.0-IV1", "3.1-IV0", "3.2-IV0", "3.3-IV0", "3.3-IV1", + "3.3-IV2", and "3.3-IV3". + "message_timestamp_type": "create_time", # Optional. Default value + is "create_time". The message_timestamp_type specifies whether to use the + message create time or log append time as the timestamp on a message. Known + values are: "create_time" and "log_append_time". + "min_cleanable_dirty_ratio": 0.5, # Optional. Default value is 0.5. + The min_cleanable_dirty_ratio specifies the frequency of log compaction (if + enabled) in relation to duplicates present in the logs. For example, at 0.5, + at most 50% of the log could be duplicates before compaction would begin. + "min_compaction_lag_ms": 0, # Optional. Default value is 0. The + min_compaction_lag_ms specifies the minimum time (in ms) that a message will + remain uncompacted in the log. Only relevant if log compaction is enabled. + "min_insync_replicas": 1, # Optional. Default value is 1. The + min_insync_replicas specifies the number of replicas that must ACK a write + for the write to be considered successful. + "preallocate": False, # Optional. Default value is False. The + preallocate specifies whether a file should be preallocated on disk when + creating a new log segment. + "retention_bytes": -1, # Optional. Default value is -1. The + retention_bytes specifies the maximum size of the log (in bytes) before + deleting messages. -1 indicates that there is no limit. + "retention_ms": 604800000, # Optional. Default value is 604800000. + The retention_ms specifies the maximum amount of time (in ms) to keep a + message before deleting it. + "segment_bytes": 209715200, # Optional. Default value is 209715200. + The segment_bytes specifies the maximum size of a single log file (in bytes). + "segment_jitter_ms": 0, # Optional. Default value is 0. The + segment_jitter_ms specifies the maximum random jitter subtracted from the + scheduled segment roll time to avoid thundering herds of segment rolling. + "segment_ms": 604800000 # Optional. Default value is 604800000. The + segment_ms specifies the period of time after which the log will be forced to + roll if the segment file isn't full. This ensures that retention can delete + or compact old data. + }, + "partition_count": 0, # Optional. The number of partitions available for the + topic. On update, this value can only be increased. + "replication_factor": 0 # Optional. The number of nodes to replicate data + across the cluster. } + # response body for status code(s): 200 + response == { + "topic": { + "config": { + "cleanup_policy": "delete", # Optional. Default value is + "delete". The cleanup_policy sets the retention policy to use on log + segments. 'delete' will discard old segments when retention time/size + limits are reached. 'compact' will enable log compaction, resulting in + retention of the latest value for each key. Known values are: "delete", + "compact", and "compact_delete". + "compression_type": "producer", # Optional. Default value is + "producer". The compression_type specifies the compression type of the + topic. Known values are: "producer", "gzip", "snappy", "Iz4", "zstd", and + "uncompressed". + "delete_retention_ms": 86400000, # Optional. Default value + is 86400000. The delete_retention_ms specifies how long (in ms) to retain + delete tombstone markers for topics. + "file_delete_delay_ms": 60000, # Optional. Default value is + 60000. The file_delete_delay_ms specifies the time (in ms) to wait before + deleting a file from the filesystem. + "flush_messages": 9223372036854776000, # Optional. Default + value is 9223372036854776000. The flush_messages specifies the number of + messages to accumulate on a log partition before messages are flushed to + disk. + "flush_ms": 9223372036854776000, # Optional. Default value + is 9223372036854776000. The flush_ms specifies the maximum time (in ms) + that a message is kept in memory before being flushed to disk. + "index_interval_bytes": 4096, # Optional. Default value is + 4096. The index_interval_bytes specifies the number of bytes between + entries being added into te offset index. + "max_compaction_lag_ms": 9223372036854776000, # Optional. + Default value is 9223372036854776000. The max_compaction_lag_ms specifies + the maximum amount of time (in ms) that a message will remain + uncompacted. This is only applicable if the logs are have compaction + enabled. + "max_message_bytes": 1048588, # Optional. Default value is + 1048588. The max_messages_bytes specifies the largest record batch size + (in bytes) that can be sent to the server. This is calculated after + compression if compression is enabled. + "message_down_conversion_enable": True, # Optional. Default + value is True. The message_down_conversion_enable specifies whether + down-conversion of message formats is enabled to satisfy consumer + requests. When 'false', the broker will not perform conversion for + consumers expecting older message formats. The broker will respond with + an ``UNSUPPORTED_VERSION`` error for consume requests from these older + clients. + "message_format_version": "3.0-IV1", # Optional. Default + value is "3.0-IV1". The message_format_version specifies the message + format version used by the broker to append messages to the logs. The + value of this setting is assumed to be 3.0-IV1 if the broker protocol + version is 3.0 or higher. By setting a particular message format + version, all existing messages on disk must be smaller or equal to the + specified version. Known values are: "0.8.0", "0.8.1", "0.8.2", "0.9.0", + "0.10.0-IV0", "0.10.0-IV1", "0.10.1-IV0", "0.10.1-IV1", "0.10.1-IV2", + "0.10.2-IV0", "0.11.0-IV0", "0.11.0-IV1", "0.11.0-IV2", "1.0-IV0", + "1.1-IV0", "2.0-IV0", "2.0-IV1", "2.1-IV0", "2.1-IV1", "2.1-IV2", + "2.2-IV0", "2.2-IV1", "2.3-IV0", "2.3-IV1", "2.4-IV0", "2.4-IV1", + "2.5-IV0", "2.6-IV0", "2.7-IV0", "2.7-IV1", "2.7-IV2", "2.8-IV0", + "2.8-IV1", "3.0-IV0", "3.0-IV1", "3.1-IV0", "3.2-IV0", "3.3-IV0", + "3.3-IV1", "3.3-IV2", and "3.3-IV3". + "message_timestamp_type": "create_time", # Optional. Default + value is "create_time". The message_timestamp_type specifies whether to + use the message create time or log append time as the timestamp on a + message. Known values are: "create_time" and "log_append_time". + "min_cleanable_dirty_ratio": 0.5, # Optional. Default value + is 0.5. The min_cleanable_dirty_ratio specifies the frequency of log + compaction (if enabled) in relation to duplicates present in the logs. + For example, at 0.5, at most 50% of the log could be duplicates before + compaction would begin. + "min_compaction_lag_ms": 0, # Optional. Default value is 0. + The min_compaction_lag_ms specifies the minimum time (in ms) that a + message will remain uncompacted in the log. Only relevant if log + compaction is enabled. + "min_insync_replicas": 1, # Optional. Default value is 1. + The min_insync_replicas specifies the number of replicas that must ACK a + write for the write to be considered successful. + "preallocate": False, # Optional. Default value is False. + The preallocate specifies whether a file should be preallocated on disk + when creating a new log segment. + "retention_bytes": -1, # Optional. Default value is -1. The + retention_bytes specifies the maximum size of the log (in bytes) before + deleting messages. -1 indicates that there is no limit. + "retention_ms": 604800000, # Optional. Default value is + 604800000. The retention_ms specifies the maximum amount of time (in ms) + to keep a message before deleting it. + "segment_bytes": 209715200, # Optional. Default value is + 209715200. The segment_bytes specifies the maximum size of a single log + file (in bytes). + "segment_jitter_ms": 0, # Optional. Default value is 0. The + segment_jitter_ms specifies the maximum random jitter subtracted from the + scheduled segment roll time to avoid thundering herds of segment rolling. + "segment_ms": 604800000 # Optional. Default value is + 604800000. The segment_ms specifies the period of time after which the + log will be forced to roll if the segment file isn't full. This ensures + that retention can delete or compact old data. + }, + "name": "str", # Optional. The name of the Kafka topic. + "partitions": [ + { + "consumer_groups": [ + { + "group_name": "str", # Optional. + Name of the consumer group. + "offset": 0 # Optional. The current + offset of the consumer group. + } + ], + "earliest_offset": 0, # Optional. The earliest + consumer offset amongst consumer groups. + "id": 0, # Optional. An identifier for the + partition. + "in_sync_replicas": 0, # Optional. The number of + nodes that are in-sync (have the latest data) for the given + partition. + "size": 0 # Optional. Size of the topic partition in + bytes. + } + ], + "replication_factor": 0, # Optional. The number of nodes to + replicate data across the cluster. + "state": "str" # Optional. The state of the Kafka topic. Known + values are: "active", "configuring", "deleting", and "unknown". + } + } # response body for status code(s): 404 response == { "id": "str", # A short identifier corresponding to the HTTP status code @@ -115926,7 +124121,7 @@ async def update_eviction_policy( content_type: Optional[str] = kwargs.pop( "content_type", _headers.pop("Content-Type", None) ) - cls: ClsType[Optional[JSON]] = kwargs.pop("cls", None) + cls: ClsType[JSON] = kwargs.pop("cls", None) content_type = content_type or "application/json" _json = None @@ -115934,10 +124129,14 @@ async def update_eviction_policy( if isinstance(body, (IOBase, bytes)): _content = body else: - _json = body + if body is not None: + _json = body + else: + _json = None - _request = build_databases_update_eviction_policy_request( + _request = build_databases_update_kafka_topic_request( database_cluster_uuid=database_cluster_uuid, + topic_name=topic_name, content_type=content_type, json=_json, content=_content, @@ -115955,15 +124154,14 @@ async def update_eviction_policy( response = pipeline_response.http_response - if response.status_code not in [204, 404]: + if response.status_code not in [200, 404]: if _stream: await response.read() # Load the body in memory and close the socket map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore raise HttpResponseError(response=response) - deserialized = None response_headers = {} - if response.status_code == 204: + if response.status_code == 200: response_headers["ratelimit-limit"] = self._deserialize( "int", response.headers.get("ratelimit-limit") ) @@ -115974,6 +124172,11 @@ async def update_eviction_policy( "int", response.headers.get("ratelimit-reset") ) + if response.content: + deserialized = response.json() + else: + deserialized = None + if response.status_code == 404: response_headers["ratelimit-limit"] = self._deserialize( "int", response.headers.get("ratelimit-limit") @@ -115991,34 +124194,34 @@ async def update_eviction_policy( deserialized = None if cls: - return cls(pipeline_response, deserialized, response_headers) # type: ignore + return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore - return deserialized # type: ignore + return cast(JSON, deserialized) # type: ignore @distributed_trace_async - async def get_sql_mode(self, database_cluster_uuid: str, **kwargs: Any) -> JSON: + async def delete_kafka_topic( + self, database_cluster_uuid: str, topic_name: str, **kwargs: Any + ) -> Optional[JSON]: # pylint: disable=line-too-long - """Retrieve the SQL Modes for a MySQL Cluster. + """Delete Topic for a Kafka Cluster. - To retrieve the configured SQL modes for an existing MySQL cluster, send a GET request to - ``/v2/databases/$DATABASE_ID/sql_mode``. - The response will be a JSON object with a ``sql_mode`` key. This will be set to a string - representing the configured SQL modes. + To delete a single topic within a Kafka cluster, send a DELETE request + to ``/v2/databases/$DATABASE_ID/topics/$TOPIC_NAME``. + + A status of 204 will be given. This indicates that the request was + processed successfully, but that no response body is needed. :param database_cluster_uuid: A unique identifier for a database cluster. Required. :type database_cluster_uuid: str - :return: JSON object - :rtype: JSON + :param topic_name: The name used to identify the Kafka topic. Required. + :type topic_name: str + :return: JSON object or None + :rtype: JSON or None :raises ~azure.core.exceptions.HttpResponseError: Example: .. code-block:: python - # response body for status code(s): 200 - response == { - "sql_mode": "str" # A string specifying the configured SQL modes for the - MySQL cluster. Required. - } # response body for status code(s): 404 response == { "id": "str", # A short identifier corresponding to the HTTP status code @@ -116047,10 +124250,11 @@ async def get_sql_mode(self, database_cluster_uuid: str, **kwargs: Any) -> JSON: _headers = kwargs.pop("headers", {}) or {} _params = kwargs.pop("params", {}) or {} - cls: ClsType[JSON] = kwargs.pop("cls", None) + cls: ClsType[Optional[JSON]] = kwargs.pop("cls", None) - _request = build_databases_get_sql_mode_request( + _request = build_databases_delete_kafka_topic_request( database_cluster_uuid=database_cluster_uuid, + topic_name=topic_name, headers=_headers, params=_params, ) @@ -116065,14 +124269,15 @@ async def get_sql_mode(self, database_cluster_uuid: str, **kwargs: Any) -> JSON: response = pipeline_response.http_response - if response.status_code not in [200, 404]: + if response.status_code not in [204, 404]: if _stream: await response.read() # Load the body in memory and close the socket map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore raise HttpResponseError(response=response) + deserialized = None response_headers = {} - if response.status_code == 200: + if response.status_code == 204: response_headers["ratelimit-limit"] = self._deserialize( "int", response.headers.get("ratelimit-limit") ) @@ -116083,11 +124288,6 @@ async def get_sql_mode(self, database_cluster_uuid: str, **kwargs: Any) -> JSON: "int", response.headers.get("ratelimit-reset") ) - if response.content: - deserialized = response.json() - else: - deserialized = None - if response.status_code == 404: response_headers["ratelimit-limit"] = self._deserialize( "int", response.headers.get("ratelimit-limit") @@ -116105,136 +124305,40 @@ async def get_sql_mode(self, database_cluster_uuid: str, **kwargs: Any) -> JSON: deserialized = None if cls: - return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore - - return cast(JSON, deserialized) # type: ignore - - @overload - async def update_sql_mode( - self, - database_cluster_uuid: str, - body: JSON, - *, - content_type: str = "application/json", - **kwargs: Any - ) -> Optional[JSON]: - # pylint: disable=line-too-long - """Update SQL Mode for a Cluster. - - To configure the SQL modes for an existing MySQL cluster, send a PUT request to - ``/v2/databases/$DATABASE_ID/sql_mode`` specifying the desired modes. See the official MySQL 8 - documentation for a `full list of supported SQL modes - `_. - A successful request will receive a 204 No Content status code with no body in response. - - :param database_cluster_uuid: A unique identifier for a database cluster. Required. - :type database_cluster_uuid: str - :param body: Required. - :type body: JSON - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: JSON object or None - :rtype: JSON or None - :raises ~azure.core.exceptions.HttpResponseError: - - Example: - .. code-block:: python - - # JSON input template you can fill out and use as your body input. - body = { - "sql_mode": "str" # A string specifying the configured SQL modes for the - MySQL cluster. Required. - } - - # response body for status code(s): 404 - response == { - "id": "str", # A short identifier corresponding to the HTTP status code - returned. For example, the ID for a response returning a 404 status code would - be "not_found.". Required. - "message": "str", # A message providing additional information about the - error, including details to help resolve it when possible. Required. - "request_id": "str" # Optional. Optionally, some endpoints may include a - request ID that should be provided when reporting bugs or opening support - tickets to help identify the issue. - } - """ - - @overload - async def update_sql_mode( - self, - database_cluster_uuid: str, - body: IO[bytes], - *, - content_type: str = "application/json", - **kwargs: Any - ) -> Optional[JSON]: - # pylint: disable=line-too-long - """Update SQL Mode for a Cluster. - - To configure the SQL modes for an existing MySQL cluster, send a PUT request to - ``/v2/databases/$DATABASE_ID/sql_mode`` specifying the desired modes. See the official MySQL 8 - documentation for a `full list of supported SQL modes - `_. - A successful request will receive a 204 No Content status code with no body in response. - - :param database_cluster_uuid: A unique identifier for a database cluster. Required. - :type database_cluster_uuid: str - :param body: Required. - :type body: IO[bytes] - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str - :return: JSON object or None - :rtype: JSON or None - :raises ~azure.core.exceptions.HttpResponseError: - - Example: - .. code-block:: python + return cls(pipeline_response, deserialized, response_headers) # type: ignore - # response body for status code(s): 404 - response == { - "id": "str", # A short identifier corresponding to the HTTP status code - returned. For example, the ID for a response returning a 404 status code would - be "not_found.". Required. - "message": "str", # A message providing additional information about the - error, including details to help resolve it when possible. Required. - "request_id": "str" # Optional. Optionally, some endpoints may include a - request ID that should be provided when reporting bugs or opening support - tickets to help identify the issue. - } - """ + return deserialized # type: ignore @distributed_trace_async - async def update_sql_mode( - self, database_cluster_uuid: str, body: Union[JSON, IO[bytes]], **kwargs: Any - ) -> Optional[JSON]: + async def list_logsink(self, database_cluster_uuid: str, **kwargs: Any) -> JSON: # pylint: disable=line-too-long - """Update SQL Mode for a Cluster. + """List Logsinks for a Database Cluster. - To configure the SQL modes for an existing MySQL cluster, send a PUT request to - ``/v2/databases/$DATABASE_ID/sql_mode`` specifying the desired modes. See the official MySQL 8 - documentation for a `full list of supported SQL modes - `_. - A successful request will receive a 204 No Content status code with no body in response. + To list logsinks for a database cluster, send a GET request to + ``/v2/databases/$DATABASE_ID/logsink``. :param database_cluster_uuid: A unique identifier for a database cluster. Required. :type database_cluster_uuid: str - :param body: Is either a JSON type or a IO[bytes] type. Required. - :type body: JSON or IO[bytes] - :return: JSON object or None - :rtype: JSON or None + :return: JSON object + :rtype: JSON :raises ~azure.core.exceptions.HttpResponseError: Example: .. code-block:: python - # JSON input template you can fill out and use as your body input. - body = { - "sql_mode": "str" # A string specifying the configured SQL modes for the - MySQL cluster. Required. + # response body for status code(s): 200 + response == { + "sinks": [ + { + "config": {}, + "sink_id": "str", # Optional. A unique identifier for + Logsink. + "sink_name": "str", # Optional. The name of the Logsink. + "sink_type": "str" # Optional. Known values are: "rsyslog", + "elasticsearch", and "opensearch". + } + ] } - # response body for status code(s): 404 response == { "id": "str", # A short identifier corresponding to the HTTP status code @@ -116260,27 +124364,13 @@ async def update_sql_mode( } error_map.update(kwargs.pop("error_map", {}) or {}) - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _headers = kwargs.pop("headers", {}) or {} _params = kwargs.pop("params", {}) or {} - content_type: Optional[str] = kwargs.pop( - "content_type", _headers.pop("Content-Type", None) - ) - cls: ClsType[Optional[JSON]] = kwargs.pop("cls", None) - - content_type = content_type or "application/json" - _json = None - _content = None - if isinstance(body, (IOBase, bytes)): - _content = body - else: - _json = body + cls: ClsType[JSON] = kwargs.pop("cls", None) - _request = build_databases_update_sql_mode_request( + _request = build_databases_list_logsink_request( database_cluster_uuid=database_cluster_uuid, - content_type=content_type, - json=_json, - content=_content, headers=_headers, params=_params, ) @@ -116295,15 +124385,14 @@ async def update_sql_mode( response = pipeline_response.http_response - if response.status_code not in [204, 404]: + if response.status_code not in [200, 404]: if _stream: await response.read() # Load the body in memory and close the socket map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore raise HttpResponseError(response=response) - deserialized = None response_headers = {} - if response.status_code == 204: + if response.status_code == 200: response_headers["ratelimit-limit"] = self._deserialize( "int", response.headers.get("ratelimit-limit") ) @@ -116314,6 +124403,11 @@ async def update_sql_mode( "int", response.headers.get("ratelimit-reset") ) + if response.content: + deserialized = response.json() + else: + deserialized = None + if response.status_code == 404: response_headers["ratelimit-limit"] = self._deserialize( "int", response.headers.get("ratelimit-limit") @@ -116331,25 +124425,24 @@ async def update_sql_mode( deserialized = None if cls: - return cls(pipeline_response, deserialized, response_headers) # type: ignore + return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore - return deserialized # type: ignore + return cast(JSON, deserialized) # type: ignore @overload - async def update_major_version( + async def create_logsink( self, database_cluster_uuid: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any - ) -> Optional[JSON]: + ) -> JSON: # pylint: disable=line-too-long - """Upgrade Major Version for a Database. + """Create Logsink for a Database Cluster. - To upgrade the major version of a database, send a PUT request to - ``/v2/databases/$DATABASE_ID/upgrade``\\ , specifying the target version. - A successful request will receive a 204 No Content status code with no body in response. + To create logsink for a database cluster, send a POST request to + ``/v2/databases/$DATABASE_ID/logsink``. :param database_cluster_uuid: A unique identifier for a database cluster. Required. :type database_cluster_uuid: str @@ -116358,8 +124451,8 @@ async def update_major_version( :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str - :return: JSON object or None - :rtype: JSON or None + :return: JSON object + :rtype: JSON :raises ~azure.core.exceptions.HttpResponseError: Example: @@ -116367,10 +124460,26 @@ async def update_major_version( # JSON input template you can fill out and use as your body input. body = { - "version": "str" # Optional. A string representing the version of the - database engine in use for the cluster. + "config": {}, + "sink_name": "str", # Optional. The name of the Logsink. + "sink_type": "str" # Optional. Type of logsink integration. * Use + ``datadog`` for Datadog integration **only with MongoDB clusters**. * For + non-MongoDB clusters, use ``rsyslog`` for general syslog forwarding. * Other + supported types include ``elasticsearch`` and ``opensearch``. More details about + the configuration can be found in the ``config`` property. Known values are: + "rsyslog", "elasticsearch", "opensearch", and "datadog". } + # response body for status code(s): 201 + response == { + "sink": { + "config": {}, + "sink_id": "str", # Optional. A unique identifier for Logsink. + "sink_name": "str", # Optional. The name of the Logsink. + "sink_type": "str" # Optional. Known values are: "rsyslog", + "elasticsearch", and "opensearch". + } + } # response body for status code(s): 404 response == { "id": "str", # A short identifier corresponding to the HTTP status code @@ -116385,20 +124494,19 @@ async def update_major_version( """ @overload - async def update_major_version( + async def create_logsink( self, database_cluster_uuid: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any - ) -> Optional[JSON]: + ) -> JSON: # pylint: disable=line-too-long - """Upgrade Major Version for a Database. + """Create Logsink for a Database Cluster. - To upgrade the major version of a database, send a PUT request to - ``/v2/databases/$DATABASE_ID/upgrade``\\ , specifying the target version. - A successful request will receive a 204 No Content status code with no body in response. + To create logsink for a database cluster, send a POST request to + ``/v2/databases/$DATABASE_ID/logsink``. :param database_cluster_uuid: A unique identifier for a database cluster. Required. :type database_cluster_uuid: str @@ -116407,13 +124515,23 @@ async def update_major_version( :keyword content_type: Body Parameter content-type. Content type parameter for binary body. Default value is "application/json". :paramtype content_type: str - :return: JSON object or None - :rtype: JSON or None + :return: JSON object + :rtype: JSON :raises ~azure.core.exceptions.HttpResponseError: Example: .. code-block:: python + # response body for status code(s): 201 + response == { + "sink": { + "config": {}, + "sink_id": "str", # Optional. A unique identifier for Logsink. + "sink_name": "str", # Optional. The name of the Logsink. + "sink_type": "str" # Optional. Known values are: "rsyslog", + "elasticsearch", and "opensearch". + } + } # response body for status code(s): 404 response == { "id": "str", # A short identifier corresponding to the HTTP status code @@ -116428,22 +124546,21 @@ async def update_major_version( """ @distributed_trace_async - async def update_major_version( + async def create_logsink( self, database_cluster_uuid: str, body: Union[JSON, IO[bytes]], **kwargs: Any - ) -> Optional[JSON]: + ) -> JSON: # pylint: disable=line-too-long - """Upgrade Major Version for a Database. + """Create Logsink for a Database Cluster. - To upgrade the major version of a database, send a PUT request to - ``/v2/databases/$DATABASE_ID/upgrade``\\ , specifying the target version. - A successful request will receive a 204 No Content status code with no body in response. + To create logsink for a database cluster, send a POST request to + ``/v2/databases/$DATABASE_ID/logsink``. :param database_cluster_uuid: A unique identifier for a database cluster. Required. :type database_cluster_uuid: str :param body: Is either a JSON type or a IO[bytes] type. Required. :type body: JSON or IO[bytes] - :return: JSON object or None - :rtype: JSON or None + :return: JSON object + :rtype: JSON :raises ~azure.core.exceptions.HttpResponseError: Example: @@ -116451,10 +124568,26 @@ async def update_major_version( # JSON input template you can fill out and use as your body input. body = { - "version": "str" # Optional. A string representing the version of the - database engine in use for the cluster. + "config": {}, + "sink_name": "str", # Optional. The name of the Logsink. + "sink_type": "str" # Optional. Type of logsink integration. * Use + ``datadog`` for Datadog integration **only with MongoDB clusters**. * For + non-MongoDB clusters, use ``rsyslog`` for general syslog forwarding. * Other + supported types include ``elasticsearch`` and ``opensearch``. More details about + the configuration can be found in the ``config`` property. Known values are: + "rsyslog", "elasticsearch", "opensearch", and "datadog". } + # response body for status code(s): 201 + response == { + "sink": { + "config": {}, + "sink_id": "str", # Optional. A unique identifier for Logsink. + "sink_name": "str", # Optional. The name of the Logsink. + "sink_type": "str" # Optional. Known values are: "rsyslog", + "elasticsearch", and "opensearch". + } + } # response body for status code(s): 404 response == { "id": "str", # A short identifier corresponding to the HTTP status code @@ -116486,7 +124619,7 @@ async def update_major_version( content_type: Optional[str] = kwargs.pop( "content_type", _headers.pop("Content-Type", None) ) - cls: ClsType[Optional[JSON]] = kwargs.pop("cls", None) + cls: ClsType[JSON] = kwargs.pop("cls", None) content_type = content_type or "application/json" _json = None @@ -116496,7 +124629,7 @@ async def update_major_version( else: _json = body - _request = build_databases_update_major_version_request( + _request = build_databases_create_logsink_request( database_cluster_uuid=database_cluster_uuid, content_type=content_type, json=_json, @@ -116515,15 +124648,14 @@ async def update_major_version( response = pipeline_response.http_response - if response.status_code not in [204, 404]: + if response.status_code not in [201, 404]: if _stream: await response.read() # Load the body in memory and close the socket map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore raise HttpResponseError(response=response) - deserialized = None response_headers = {} - if response.status_code == 204: + if response.status_code == 201: response_headers["ratelimit-limit"] = self._deserialize( "int", response.headers.get("ratelimit-limit") ) @@ -116534,6 +124666,11 @@ async def update_major_version( "int", response.headers.get("ratelimit-reset") ) + if response.content: + deserialized = response.json() + else: + deserialized = None + if response.status_code == 404: response_headers["ratelimit-limit"] = self._deserialize( "int", response.headers.get("ratelimit-limit") @@ -116551,21 +124688,24 @@ async def update_major_version( deserialized = None if cls: - return cls(pipeline_response, deserialized, response_headers) # type: ignore + return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore - return deserialized # type: ignore + return cast(JSON, deserialized) # type: ignore @distributed_trace_async - async def get_autoscale(self, database_cluster_uuid: str, **kwargs: Any) -> JSON: + async def get_logsink( + self, database_cluster_uuid: str, logsink_id: str, **kwargs: Any + ) -> JSON: # pylint: disable=line-too-long - """Retrieve Autoscale Configuration for a Database Cluster. + """Get Logsink for a Database Cluster. - To retrieve the autoscale configuration for an existing database cluster, send a GET request to - ``/v2/databases/$DATABASE_ID/autoscale``. - The response will be a JSON object with autoscaling configuration details. + To get a logsink for a database cluster, send a GET request to + ``/v2/databases/$DATABASE_ID/logsink/$LOGSINK_ID``. :param database_cluster_uuid: A unique identifier for a database cluster. Required. :type database_cluster_uuid: str + :param logsink_id: A unique identifier for a logsink of a database cluster. Required. + :type logsink_id: str :return: JSON object :rtype: JSON :raises ~azure.core.exceptions.HttpResponseError: @@ -116575,17 +124715,11 @@ async def get_autoscale(self, database_cluster_uuid: str, **kwargs: Any) -> JSON # response body for status code(s): 200 response == { - "autoscale": { - "storage": { - "enabled": bool, # Whether storage autoscaling is enabled - for the cluster. Required. - "increment_gib": 0, # Optional. The amount of additional - storage to add (in GiB) when autoscaling is triggered. - "threshold_percent": 0 # Optional. The storage usage - threshold percentage that triggers autoscaling. When storage usage - exceeds this percentage, additional storage will be added automatically. - } - } + "config": {}, + "sink_id": "str", # Optional. A unique identifier for Logsink. + "sink_name": "str", # Optional. The name of the Logsink. + "sink_type": "str" # Optional. Known values are: "rsyslog", "elasticsearch", + and "opensearch". } # response body for status code(s): 404 response == { @@ -116617,8 +124751,9 @@ async def get_autoscale(self, database_cluster_uuid: str, **kwargs: Any) -> JSON cls: ClsType[JSON] = kwargs.pop("cls", None) - _request = build_databases_get_autoscale_request( + _request = build_databases_get_logsink_request( database_cluster_uuid=database_cluster_uuid, + logsink_id=logsink_id, headers=_headers, params=_params, ) @@ -116678,23 +124813,25 @@ async def get_autoscale(self, database_cluster_uuid: str, **kwargs: Any) -> JSON return cast(JSON, deserialized) # type: ignore @overload - async def update_autoscale( + async def update_logsink( self, database_cluster_uuid: str, + logsink_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any ) -> Optional[JSON]: # pylint: disable=line-too-long - """Configure Autoscale Settings for a Database Cluster. + """Update Logsink for a Database Cluster. - To configure autoscale settings for an existing database cluster, send a PUT request to - ``/v2/databases/$DATABASE_ID/autoscale``\\ , specifying the autoscale configuration. - A successful request will receive a 204 No Content status code with no body in response. + To update a logsink for a database cluster, send a PUT request to + ``/v2/databases/$DATABASE_ID/logsink/$LOGSINK_ID``. :param database_cluster_uuid: A unique identifier for a database cluster. Required. :type database_cluster_uuid: str + :param logsink_id: A unique identifier for a logsink of a database cluster. Required. + :type logsink_id: str :param body: Required. :type body: JSON :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. @@ -116709,18 +124846,10 @@ async def update_autoscale( # JSON input template you can fill out and use as your body input. body = { - "storage": { - "enabled": bool, # Whether storage autoscaling is enabled for the - cluster. Required. - "increment_gib": 0, # Optional. The amount of additional storage to - add (in GiB) when autoscaling is triggered. - "threshold_percent": 0 # Optional. The storage usage threshold - percentage that triggers autoscaling. When storage usage exceeds this - percentage, additional storage will be added automatically. - } + "config": {} } - # response body for status code(s): 404, 422 + # response body for status code(s): 404 response == { "id": "str", # A short identifier corresponding to the HTTP status code returned. For example, the ID for a response returning a 404 status code would @@ -116734,23 +124863,25 @@ async def update_autoscale( """ @overload - async def update_autoscale( + async def update_logsink( self, database_cluster_uuid: str, + logsink_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any ) -> Optional[JSON]: # pylint: disable=line-too-long - """Configure Autoscale Settings for a Database Cluster. + """Update Logsink for a Database Cluster. - To configure autoscale settings for an existing database cluster, send a PUT request to - ``/v2/databases/$DATABASE_ID/autoscale``\\ , specifying the autoscale configuration. - A successful request will receive a 204 No Content status code with no body in response. + To update a logsink for a database cluster, send a PUT request to + ``/v2/databases/$DATABASE_ID/logsink/$LOGSINK_ID``. :param database_cluster_uuid: A unique identifier for a database cluster. Required. :type database_cluster_uuid: str + :param logsink_id: A unique identifier for a logsink of a database cluster. Required. + :type logsink_id: str :param body: Required. :type body: IO[bytes] :keyword content_type: Body Parameter content-type. Content type parameter for binary body. @@ -116763,7 +124894,7 @@ async def update_autoscale( Example: .. code-block:: python - # response body for status code(s): 404, 422 + # response body for status code(s): 404 response == { "id": "str", # A short identifier corresponding to the HTTP status code returned. For example, the ID for a response returning a 404 status code would @@ -116777,18 +124908,23 @@ async def update_autoscale( """ @distributed_trace_async - async def update_autoscale( - self, database_cluster_uuid: str, body: Union[JSON, IO[bytes]], **kwargs: Any + async def update_logsink( + self, + database_cluster_uuid: str, + logsink_id: str, + body: Union[JSON, IO[bytes]], + **kwargs: Any ) -> Optional[JSON]: # pylint: disable=line-too-long - """Configure Autoscale Settings for a Database Cluster. + """Update Logsink for a Database Cluster. - To configure autoscale settings for an existing database cluster, send a PUT request to - ``/v2/databases/$DATABASE_ID/autoscale``\\ , specifying the autoscale configuration. - A successful request will receive a 204 No Content status code with no body in response. + To update a logsink for a database cluster, send a PUT request to + ``/v2/databases/$DATABASE_ID/logsink/$LOGSINK_ID``. :param database_cluster_uuid: A unique identifier for a database cluster. Required. :type database_cluster_uuid: str + :param logsink_id: A unique identifier for a logsink of a database cluster. Required. + :type logsink_id: str :param body: Is either a JSON type or a IO[bytes] type. Required. :type body: JSON or IO[bytes] :return: JSON object or None @@ -116800,18 +124936,10 @@ async def update_autoscale( # JSON input template you can fill out and use as your body input. body = { - "storage": { - "enabled": bool, # Whether storage autoscaling is enabled for the - cluster. Required. - "increment_gib": 0, # Optional. The amount of additional storage to - add (in GiB) when autoscaling is triggered. - "threshold_percent": 0 # Optional. The storage usage threshold - percentage that triggers autoscaling. When storage usage exceeds this - percentage, additional storage will be added automatically. - } + "config": {} } - # response body for status code(s): 404, 422 + # response body for status code(s): 404 response == { "id": "str", # A short identifier corresponding to the HTTP status code returned. For example, the ID for a response returning a 404 status code would @@ -116852,8 +124980,9 @@ async def update_autoscale( else: _json = body - _request = build_databases_update_autoscale_request( + _request = build_databases_update_logsink_request( database_cluster_uuid=database_cluster_uuid, + logsink_id=logsink_id, content_type=content_type, json=_json, content=_content, @@ -116871,7 +125000,7 @@ async def update_autoscale( response = pipeline_response.http_response - if response.status_code not in [204, 404, 422]: + if response.status_code not in [200, 404]: if _stream: await response.read() # Load the body in memory and close the socket map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore @@ -116879,7 +125008,7 @@ async def update_autoscale( deserialized = None response_headers = {} - if response.status_code == 204: + if response.status_code == 200: response_headers["ratelimit-limit"] = self._deserialize( "int", response.headers.get("ratelimit-limit") ) @@ -116906,7 +125035,99 @@ async def update_autoscale( else: deserialized = None - if response.status_code == 422: + if cls: + return cls(pipeline_response, deserialized, response_headers) # type: ignore + + return deserialized # type: ignore + + @distributed_trace_async + async def delete_logsink( + self, database_cluster_uuid: str, logsink_id: str, **kwargs: Any + ) -> Optional[JSON]: + # pylint: disable=line-too-long + """Delete Logsink for a Database Cluster. + + To delete a logsink for a database cluster, send a DELETE request to + ``/v2/databases/$DATABASE_ID/logsink/$LOGSINK_ID``. + + :param database_cluster_uuid: A unique identifier for a database cluster. Required. + :type database_cluster_uuid: str + :param logsink_id: A unique identifier for a logsink of a database cluster. Required. + :type logsink_id: str + :return: JSON object or None + :rtype: JSON or None + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 404 + response == { + "id": "str", # A short identifier corresponding to the HTTP status code + returned. For example, the ID for a response returning a 404 status code would + be "not_found.". Required. + "message": "str", # A message providing additional information about the + error, including details to help resolve it when possible. Required. + "request_id": "str" # Optional. Optionally, some endpoints may include a + request ID that should be provided when reporting bugs or opening support + tickets to help identify the issue. + } + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + 401: cast( + Type[HttpResponseError], + lambda response: ClientAuthenticationError(response=response), + ), + 429: HttpResponseError, + 500: HttpResponseError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[Optional[JSON]] = kwargs.pop("cls", None) + + _request = build_databases_delete_logsink_request( + database_cluster_uuid=database_cluster_uuid, + logsink_id=logsink_id, + headers=_headers, + params=_params, + ) + _request.url = self._client.format_url(_request.url) + + _stream = False + pipeline_response: PipelineResponse = ( + await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + ) + + response = pipeline_response.http_response + + if response.status_code not in [200, 404]: + if _stream: + await response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore + raise HttpResponseError(response=response) + + deserialized = None + response_headers = {} + if response.status_code == 200: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.status_code == 404: response_headers["ratelimit-limit"] = self._deserialize( "int", response.headers.get("ratelimit-limit") ) @@ -116928,16 +125149,14 @@ async def update_autoscale( return deserialized # type: ignore @distributed_trace_async - async def list_kafka_topics( + async def list_kafka_schemas( self, database_cluster_uuid: str, **kwargs: Any ) -> JSON: # pylint: disable=line-too-long - """List Topics for a Kafka Cluster. - - To list all of a Kafka cluster's topics, send a GET request to - ``/v2/databases/$DATABASE_ID/topics``. + """List Schemas for Kafka Cluster. - The result will be a JSON object with a ``topics`` key. + To list all schemas for a Kafka cluster, send a GET request to + ``/v2/databases/$DATABASE_ID/schema-registry``. :param database_cluster_uuid: A unique identifier for a database cluster. Required. :type database_cluster_uuid: str @@ -116950,15 +125169,15 @@ async def list_kafka_topics( # response body for status code(s): 200 response == { - "topics": [ + "subjects": [ { - "name": "str", # Optional. The name of the Kafka topic. - "partition_count": 0, # Optional. The number of partitions - available for the topic. On update, this value can only be increased. - "replication_factor": 0, # Optional. The number of nodes to - replicate data across the cluster. - "state": "str" # Optional. The state of the Kafka topic. - Known values are: "active", "configuring", "deleting", and "unknown". + "schema": "str", # Optional. The schema definition in the + specified format. + "schema_id": 0, # Optional. The id for schema. + "schema_type": "str", # Optional. The type of the schema. + Known values are: "AVRO", "JSON", and "PROTOBUF". + "subject_name": "str" # Optional. The name of the schema + subject. } ] } @@ -116992,7 +125211,7 @@ async def list_kafka_topics( cls: ClsType[JSON] = kwargs.pop("cls", None) - _request = build_databases_list_kafka_topics_request( + _request = build_databases_list_kafka_schemas_request( database_cluster_uuid=database_cluster_uuid, headers=_headers, params=_params, @@ -117053,680 +125272,149 @@ async def list_kafka_topics( return cast(JSON, deserialized) # type: ignore @overload - async def create_kafka_topic( + async def create_kafka_schema( self, database_cluster_uuid: str, - body: Optional[JSON] = None, + body: JSON, *, content_type: str = "application/json", **kwargs: Any ) -> JSON: # pylint: disable=line-too-long - """Create Topic for a Kafka Cluster. - - To create a topic attached to a Kafka cluster, send a POST request to - ``/v2/databases/$DATABASE_ID/topics``. + """Create Schema Registry for Kafka Cluster. - The result will be a JSON object with a ``topic`` key. + To create a Kafka schema for a database cluster, send a POST request to + ``/v2/databases/$DATABASE_ID/schema-registry``. :param database_cluster_uuid: A unique identifier for a database cluster. Required. :type database_cluster_uuid: str - :param body: Default value is None. + :param body: Required. :type body: JSON :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str :return: JSON object :rtype: JSON - :raises ~azure.core.exceptions.HttpResponseError: - - Example: - .. code-block:: python - - # JSON input template you can fill out and use as your body input. - body = { - "config": { - "cleanup_policy": "delete", # Optional. Default value is "delete". - The cleanup_policy sets the retention policy to use on log segments. 'delete' - will discard old segments when retention time/size limits are reached. - 'compact' will enable log compaction, resulting in retention of the latest - value for each key. Known values are: "delete", "compact", and - "compact_delete". - "compression_type": "producer", # Optional. Default value is - "producer". The compression_type specifies the compression type of the topic. - Known values are: "producer", "gzip", "snappy", "Iz4", "zstd", and - "uncompressed". - "delete_retention_ms": 86400000, # Optional. Default value is - 86400000. The delete_retention_ms specifies how long (in ms) to retain delete - tombstone markers for topics. - "file_delete_delay_ms": 60000, # Optional. Default value is 60000. - The file_delete_delay_ms specifies the time (in ms) to wait before deleting a - file from the filesystem. - "flush_messages": 9223372036854776000, # Optional. Default value is - 9223372036854776000. The flush_messages specifies the number of messages to - accumulate on a log partition before messages are flushed to disk. - "flush_ms": 9223372036854776000, # Optional. Default value is - 9223372036854776000. The flush_ms specifies the maximum time (in ms) that a - message is kept in memory before being flushed to disk. - "index_interval_bytes": 4096, # Optional. Default value is 4096. The - index_interval_bytes specifies the number of bytes between entries being - added into te offset index. - "max_compaction_lag_ms": 9223372036854776000, # Optional. Default - value is 9223372036854776000. The max_compaction_lag_ms specifies the maximum - amount of time (in ms) that a message will remain uncompacted. This is only - applicable if the logs are have compaction enabled. - "max_message_bytes": 1048588, # Optional. Default value is 1048588. - The max_messages_bytes specifies the largest record batch size (in bytes) - that can be sent to the server. This is calculated after compression if - compression is enabled. - "message_down_conversion_enable": True, # Optional. Default value is - True. The message_down_conversion_enable specifies whether down-conversion of - message formats is enabled to satisfy consumer requests. When 'false', the - broker will not perform conversion for consumers expecting older message - formats. The broker will respond with an ``UNSUPPORTED_VERSION`` error for - consume requests from these older clients. - "message_format_version": "3.0-IV1", # Optional. Default value is - "3.0-IV1". The message_format_version specifies the message format version - used by the broker to append messages to the logs. The value of this setting - is assumed to be 3.0-IV1 if the broker protocol version is 3.0 or higher. By - setting a particular message format version, all existing messages on disk - must be smaller or equal to the specified version. Known values are: "0.8.0", - "0.8.1", "0.8.2", "0.9.0", "0.10.0-IV0", "0.10.0-IV1", "0.10.1-IV0", - "0.10.1-IV1", "0.10.1-IV2", "0.10.2-IV0", "0.11.0-IV0", "0.11.0-IV1", - "0.11.0-IV2", "1.0-IV0", "1.1-IV0", "2.0-IV0", "2.0-IV1", "2.1-IV0", - "2.1-IV1", "2.1-IV2", "2.2-IV0", "2.2-IV1", "2.3-IV0", "2.3-IV1", "2.4-IV0", - "2.4-IV1", "2.5-IV0", "2.6-IV0", "2.7-IV0", "2.7-IV1", "2.7-IV2", "2.8-IV0", - "2.8-IV1", "3.0-IV0", "3.0-IV1", "3.1-IV0", "3.2-IV0", "3.3-IV0", "3.3-IV1", - "3.3-IV2", and "3.3-IV3". - "message_timestamp_type": "create_time", # Optional. Default value - is "create_time". The message_timestamp_type specifies whether to use the - message create time or log append time as the timestamp on a message. Known - values are: "create_time" and "log_append_time". - "min_cleanable_dirty_ratio": 0.5, # Optional. Default value is 0.5. - The min_cleanable_dirty_ratio specifies the frequency of log compaction (if - enabled) in relation to duplicates present in the logs. For example, at 0.5, - at most 50% of the log could be duplicates before compaction would begin. - "min_compaction_lag_ms": 0, # Optional. Default value is 0. The - min_compaction_lag_ms specifies the minimum time (in ms) that a message will - remain uncompacted in the log. Only relevant if log compaction is enabled. - "min_insync_replicas": 1, # Optional. Default value is 1. The - min_insync_replicas specifies the number of replicas that must ACK a write - for the write to be considered successful. - "preallocate": False, # Optional. Default value is False. The - preallocate specifies whether a file should be preallocated on disk when - creating a new log segment. - "retention_bytes": -1, # Optional. Default value is -1. The - retention_bytes specifies the maximum size of the log (in bytes) before - deleting messages. -1 indicates that there is no limit. - "retention_ms": 604800000, # Optional. Default value is 604800000. - The retention_ms specifies the maximum amount of time (in ms) to keep a - message before deleting it. - "segment_bytes": 209715200, # Optional. Default value is 209715200. - The segment_bytes specifies the maximum size of a single log file (in bytes). - "segment_jitter_ms": 0, # Optional. Default value is 0. The - segment_jitter_ms specifies the maximum random jitter subtracted from the - scheduled segment roll time to avoid thundering herds of segment rolling. - "segment_ms": 604800000 # Optional. Default value is 604800000. The - segment_ms specifies the period of time after which the log will be forced to - roll if the segment file isn't full. This ensures that retention can delete - or compact old data. - }, - "name": "str", # Optional. The name of the Kafka topic. - "partition_count": 0, # Optional. The number of partitions available for the - topic. On update, this value can only be increased. - "replication_factor": 0 # Optional. The number of nodes to replicate data - across the cluster. - } - - # response body for status code(s): 201 - response == { - "topic": { - "config": { - "cleanup_policy": "delete", # Optional. Default value is - "delete". The cleanup_policy sets the retention policy to use on log - segments. 'delete' will discard old segments when retention time/size - limits are reached. 'compact' will enable log compaction, resulting in - retention of the latest value for each key. Known values are: "delete", - "compact", and "compact_delete". - "compression_type": "producer", # Optional. Default value is - "producer". The compression_type specifies the compression type of the - topic. Known values are: "producer", "gzip", "snappy", "Iz4", "zstd", and - "uncompressed". - "delete_retention_ms": 86400000, # Optional. Default value - is 86400000. The delete_retention_ms specifies how long (in ms) to retain - delete tombstone markers for topics. - "file_delete_delay_ms": 60000, # Optional. Default value is - 60000. The file_delete_delay_ms specifies the time (in ms) to wait before - deleting a file from the filesystem. - "flush_messages": 9223372036854776000, # Optional. Default - value is 9223372036854776000. The flush_messages specifies the number of - messages to accumulate on a log partition before messages are flushed to - disk. - "flush_ms": 9223372036854776000, # Optional. Default value - is 9223372036854776000. The flush_ms specifies the maximum time (in ms) - that a message is kept in memory before being flushed to disk. - "index_interval_bytes": 4096, # Optional. Default value is - 4096. The index_interval_bytes specifies the number of bytes between - entries being added into te offset index. - "max_compaction_lag_ms": 9223372036854776000, # Optional. - Default value is 9223372036854776000. The max_compaction_lag_ms specifies - the maximum amount of time (in ms) that a message will remain - uncompacted. This is only applicable if the logs are have compaction - enabled. - "max_message_bytes": 1048588, # Optional. Default value is - 1048588. The max_messages_bytes specifies the largest record batch size - (in bytes) that can be sent to the server. This is calculated after - compression if compression is enabled. - "message_down_conversion_enable": True, # Optional. Default - value is True. The message_down_conversion_enable specifies whether - down-conversion of message formats is enabled to satisfy consumer - requests. When 'false', the broker will not perform conversion for - consumers expecting older message formats. The broker will respond with - an ``UNSUPPORTED_VERSION`` error for consume requests from these older - clients. - "message_format_version": "3.0-IV1", # Optional. Default - value is "3.0-IV1". The message_format_version specifies the message - format version used by the broker to append messages to the logs. The - value of this setting is assumed to be 3.0-IV1 if the broker protocol - version is 3.0 or higher. By setting a particular message format - version, all existing messages on disk must be smaller or equal to the - specified version. Known values are: "0.8.0", "0.8.1", "0.8.2", "0.9.0", - "0.10.0-IV0", "0.10.0-IV1", "0.10.1-IV0", "0.10.1-IV1", "0.10.1-IV2", - "0.10.2-IV0", "0.11.0-IV0", "0.11.0-IV1", "0.11.0-IV2", "1.0-IV0", - "1.1-IV0", "2.0-IV0", "2.0-IV1", "2.1-IV0", "2.1-IV1", "2.1-IV2", - "2.2-IV0", "2.2-IV1", "2.3-IV0", "2.3-IV1", "2.4-IV0", "2.4-IV1", - "2.5-IV0", "2.6-IV0", "2.7-IV0", "2.7-IV1", "2.7-IV2", "2.8-IV0", - "2.8-IV1", "3.0-IV0", "3.0-IV1", "3.1-IV0", "3.2-IV0", "3.3-IV0", - "3.3-IV1", "3.3-IV2", and "3.3-IV3". - "message_timestamp_type": "create_time", # Optional. Default - value is "create_time". The message_timestamp_type specifies whether to - use the message create time or log append time as the timestamp on a - message. Known values are: "create_time" and "log_append_time". - "min_cleanable_dirty_ratio": 0.5, # Optional. Default value - is 0.5. The min_cleanable_dirty_ratio specifies the frequency of log - compaction (if enabled) in relation to duplicates present in the logs. - For example, at 0.5, at most 50% of the log could be duplicates before - compaction would begin. - "min_compaction_lag_ms": 0, # Optional. Default value is 0. - The min_compaction_lag_ms specifies the minimum time (in ms) that a - message will remain uncompacted in the log. Only relevant if log - compaction is enabled. - "min_insync_replicas": 1, # Optional. Default value is 1. - The min_insync_replicas specifies the number of replicas that must ACK a - write for the write to be considered successful. - "preallocate": False, # Optional. Default value is False. - The preallocate specifies whether a file should be preallocated on disk - when creating a new log segment. - "retention_bytes": -1, # Optional. Default value is -1. The - retention_bytes specifies the maximum size of the log (in bytes) before - deleting messages. -1 indicates that there is no limit. - "retention_ms": 604800000, # Optional. Default value is - 604800000. The retention_ms specifies the maximum amount of time (in ms) - to keep a message before deleting it. - "segment_bytes": 209715200, # Optional. Default value is - 209715200. The segment_bytes specifies the maximum size of a single log - file (in bytes). - "segment_jitter_ms": 0, # Optional. Default value is 0. The - segment_jitter_ms specifies the maximum random jitter subtracted from the - scheduled segment roll time to avoid thundering herds of segment rolling. - "segment_ms": 604800000 # Optional. Default value is - 604800000. The segment_ms specifies the period of time after which the - log will be forced to roll if the segment file isn't full. This ensures - that retention can delete or compact old data. - }, - "name": "str", # Optional. The name of the Kafka topic. - "partitions": [ - { - "consumer_groups": [ - { - "group_name": "str", # Optional. - Name of the consumer group. - "offset": 0 # Optional. The current - offset of the consumer group. - } - ], - "earliest_offset": 0, # Optional. The earliest - consumer offset amongst consumer groups. - "id": 0, # Optional. An identifier for the - partition. - "in_sync_replicas": 0, # Optional. The number of - nodes that are in-sync (have the latest data) for the given - partition. - "size": 0 # Optional. Size of the topic partition in - bytes. - } - ], - "replication_factor": 0, # Optional. The number of nodes to - replicate data across the cluster. - "state": "str" # Optional. The state of the Kafka topic. Known - values are: "active", "configuring", "deleting", and "unknown". - } - } - # response body for status code(s): 404 - response == { - "id": "str", # A short identifier corresponding to the HTTP status code - returned. For example, the ID for a response returning a 404 status code would - be "not_found.". Required. - "message": "str", # A message providing additional information about the - error, including details to help resolve it when possible. Required. - "request_id": "str" # Optional. Optionally, some endpoints may include a - request ID that should be provided when reporting bugs or opening support - tickets to help identify the issue. - } - """ - - @overload - async def create_kafka_topic( - self, - database_cluster_uuid: str, - body: Optional[IO[bytes]] = None, - *, - content_type: str = "application/json", - **kwargs: Any - ) -> JSON: - # pylint: disable=line-too-long - """Create Topic for a Kafka Cluster. - - To create a topic attached to a Kafka cluster, send a POST request to - ``/v2/databases/$DATABASE_ID/topics``. - - The result will be a JSON object with a ``topic`` key. - - :param database_cluster_uuid: A unique identifier for a database cluster. Required. - :type database_cluster_uuid: str - :param body: Default value is None. - :type body: IO[bytes] - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str - :return: JSON object - :rtype: JSON - :raises ~azure.core.exceptions.HttpResponseError: - - Example: - .. code-block:: python - - # response body for status code(s): 201 - response == { - "topic": { - "config": { - "cleanup_policy": "delete", # Optional. Default value is - "delete". The cleanup_policy sets the retention policy to use on log - segments. 'delete' will discard old segments when retention time/size - limits are reached. 'compact' will enable log compaction, resulting in - retention of the latest value for each key. Known values are: "delete", - "compact", and "compact_delete". - "compression_type": "producer", # Optional. Default value is - "producer". The compression_type specifies the compression type of the - topic. Known values are: "producer", "gzip", "snappy", "Iz4", "zstd", and - "uncompressed". - "delete_retention_ms": 86400000, # Optional. Default value - is 86400000. The delete_retention_ms specifies how long (in ms) to retain - delete tombstone markers for topics. - "file_delete_delay_ms": 60000, # Optional. Default value is - 60000. The file_delete_delay_ms specifies the time (in ms) to wait before - deleting a file from the filesystem. - "flush_messages": 9223372036854776000, # Optional. Default - value is 9223372036854776000. The flush_messages specifies the number of - messages to accumulate on a log partition before messages are flushed to - disk. - "flush_ms": 9223372036854776000, # Optional. Default value - is 9223372036854776000. The flush_ms specifies the maximum time (in ms) - that a message is kept in memory before being flushed to disk. - "index_interval_bytes": 4096, # Optional. Default value is - 4096. The index_interval_bytes specifies the number of bytes between - entries being added into te offset index. - "max_compaction_lag_ms": 9223372036854776000, # Optional. - Default value is 9223372036854776000. The max_compaction_lag_ms specifies - the maximum amount of time (in ms) that a message will remain - uncompacted. This is only applicable if the logs are have compaction - enabled. - "max_message_bytes": 1048588, # Optional. Default value is - 1048588. The max_messages_bytes specifies the largest record batch size - (in bytes) that can be sent to the server. This is calculated after - compression if compression is enabled. - "message_down_conversion_enable": True, # Optional. Default - value is True. The message_down_conversion_enable specifies whether - down-conversion of message formats is enabled to satisfy consumer - requests. When 'false', the broker will not perform conversion for - consumers expecting older message formats. The broker will respond with - an ``UNSUPPORTED_VERSION`` error for consume requests from these older - clients. - "message_format_version": "3.0-IV1", # Optional. Default - value is "3.0-IV1". The message_format_version specifies the message - format version used by the broker to append messages to the logs. The - value of this setting is assumed to be 3.0-IV1 if the broker protocol - version is 3.0 or higher. By setting a particular message format - version, all existing messages on disk must be smaller or equal to the - specified version. Known values are: "0.8.0", "0.8.1", "0.8.2", "0.9.0", - "0.10.0-IV0", "0.10.0-IV1", "0.10.1-IV0", "0.10.1-IV1", "0.10.1-IV2", - "0.10.2-IV0", "0.11.0-IV0", "0.11.0-IV1", "0.11.0-IV2", "1.0-IV0", - "1.1-IV0", "2.0-IV0", "2.0-IV1", "2.1-IV0", "2.1-IV1", "2.1-IV2", - "2.2-IV0", "2.2-IV1", "2.3-IV0", "2.3-IV1", "2.4-IV0", "2.4-IV1", - "2.5-IV0", "2.6-IV0", "2.7-IV0", "2.7-IV1", "2.7-IV2", "2.8-IV0", - "2.8-IV1", "3.0-IV0", "3.0-IV1", "3.1-IV0", "3.2-IV0", "3.3-IV0", - "3.3-IV1", "3.3-IV2", and "3.3-IV3". - "message_timestamp_type": "create_time", # Optional. Default - value is "create_time". The message_timestamp_type specifies whether to - use the message create time or log append time as the timestamp on a - message. Known values are: "create_time" and "log_append_time". - "min_cleanable_dirty_ratio": 0.5, # Optional. Default value - is 0.5. The min_cleanable_dirty_ratio specifies the frequency of log - compaction (if enabled) in relation to duplicates present in the logs. - For example, at 0.5, at most 50% of the log could be duplicates before - compaction would begin. - "min_compaction_lag_ms": 0, # Optional. Default value is 0. - The min_compaction_lag_ms specifies the minimum time (in ms) that a - message will remain uncompacted in the log. Only relevant if log - compaction is enabled. - "min_insync_replicas": 1, # Optional. Default value is 1. - The min_insync_replicas specifies the number of replicas that must ACK a - write for the write to be considered successful. - "preallocate": False, # Optional. Default value is False. - The preallocate specifies whether a file should be preallocated on disk - when creating a new log segment. - "retention_bytes": -1, # Optional. Default value is -1. The - retention_bytes specifies the maximum size of the log (in bytes) before - deleting messages. -1 indicates that there is no limit. - "retention_ms": 604800000, # Optional. Default value is - 604800000. The retention_ms specifies the maximum amount of time (in ms) - to keep a message before deleting it. - "segment_bytes": 209715200, # Optional. Default value is - 209715200. The segment_bytes specifies the maximum size of a single log - file (in bytes). - "segment_jitter_ms": 0, # Optional. Default value is 0. The - segment_jitter_ms specifies the maximum random jitter subtracted from the - scheduled segment roll time to avoid thundering herds of segment rolling. - "segment_ms": 604800000 # Optional. Default value is - 604800000. The segment_ms specifies the period of time after which the - log will be forced to roll if the segment file isn't full. This ensures - that retention can delete or compact old data. - }, - "name": "str", # Optional. The name of the Kafka topic. - "partitions": [ - { - "consumer_groups": [ - { - "group_name": "str", # Optional. - Name of the consumer group. - "offset": 0 # Optional. The current - offset of the consumer group. - } - ], - "earliest_offset": 0, # Optional. The earliest - consumer offset amongst consumer groups. - "id": 0, # Optional. An identifier for the - partition. - "in_sync_replicas": 0, # Optional. The number of - nodes that are in-sync (have the latest data) for the given - partition. - "size": 0 # Optional. Size of the topic partition in - bytes. - } - ], - "replication_factor": 0, # Optional. The number of nodes to - replicate data across the cluster. - "state": "str" # Optional. The state of the Kafka topic. Known - values are: "active", "configuring", "deleting", and "unknown". - } - } - # response body for status code(s): 404 - response == { - "id": "str", # A short identifier corresponding to the HTTP status code - returned. For example, the ID for a response returning a 404 status code would - be "not_found.". Required. - "message": "str", # A message providing additional information about the - error, including details to help resolve it when possible. Required. - "request_id": "str" # Optional. Optionally, some endpoints may include a - request ID that should be provided when reporting bugs or opening support - tickets to help identify the issue. - } - """ - - @distributed_trace_async - async def create_kafka_topic( - self, - database_cluster_uuid: str, - body: Optional[Union[JSON, IO[bytes]]] = None, - **kwargs: Any - ) -> JSON: - # pylint: disable=line-too-long - """Create Topic for a Kafka Cluster. - - To create a topic attached to a Kafka cluster, send a POST request to - ``/v2/databases/$DATABASE_ID/topics``. - - The result will be a JSON object with a ``topic`` key. - - :param database_cluster_uuid: A unique identifier for a database cluster. Required. - :type database_cluster_uuid: str - :param body: Is either a JSON type or a IO[bytes] type. Default value is None. - :type body: JSON or IO[bytes] - :return: JSON object - :rtype: JSON - :raises ~azure.core.exceptions.HttpResponseError: - - Example: - .. code-block:: python - - # JSON input template you can fill out and use as your body input. - body = { - "config": { - "cleanup_policy": "delete", # Optional. Default value is "delete". - The cleanup_policy sets the retention policy to use on log segments. 'delete' - will discard old segments when retention time/size limits are reached. - 'compact' will enable log compaction, resulting in retention of the latest - value for each key. Known values are: "delete", "compact", and - "compact_delete". - "compression_type": "producer", # Optional. Default value is - "producer". The compression_type specifies the compression type of the topic. - Known values are: "producer", "gzip", "snappy", "Iz4", "zstd", and - "uncompressed". - "delete_retention_ms": 86400000, # Optional. Default value is - 86400000. The delete_retention_ms specifies how long (in ms) to retain delete - tombstone markers for topics. - "file_delete_delay_ms": 60000, # Optional. Default value is 60000. - The file_delete_delay_ms specifies the time (in ms) to wait before deleting a - file from the filesystem. - "flush_messages": 9223372036854776000, # Optional. Default value is - 9223372036854776000. The flush_messages specifies the number of messages to - accumulate on a log partition before messages are flushed to disk. - "flush_ms": 9223372036854776000, # Optional. Default value is - 9223372036854776000. The flush_ms specifies the maximum time (in ms) that a - message is kept in memory before being flushed to disk. - "index_interval_bytes": 4096, # Optional. Default value is 4096. The - index_interval_bytes specifies the number of bytes between entries being - added into te offset index. - "max_compaction_lag_ms": 9223372036854776000, # Optional. Default - value is 9223372036854776000. The max_compaction_lag_ms specifies the maximum - amount of time (in ms) that a message will remain uncompacted. This is only - applicable if the logs are have compaction enabled. - "max_message_bytes": 1048588, # Optional. Default value is 1048588. - The max_messages_bytes specifies the largest record batch size (in bytes) - that can be sent to the server. This is calculated after compression if - compression is enabled. - "message_down_conversion_enable": True, # Optional. Default value is - True. The message_down_conversion_enable specifies whether down-conversion of - message formats is enabled to satisfy consumer requests. When 'false', the - broker will not perform conversion for consumers expecting older message - formats. The broker will respond with an ``UNSUPPORTED_VERSION`` error for - consume requests from these older clients. - "message_format_version": "3.0-IV1", # Optional. Default value is - "3.0-IV1". The message_format_version specifies the message format version - used by the broker to append messages to the logs. The value of this setting - is assumed to be 3.0-IV1 if the broker protocol version is 3.0 or higher. By - setting a particular message format version, all existing messages on disk - must be smaller or equal to the specified version. Known values are: "0.8.0", - "0.8.1", "0.8.2", "0.9.0", "0.10.0-IV0", "0.10.0-IV1", "0.10.1-IV0", - "0.10.1-IV1", "0.10.1-IV2", "0.10.2-IV0", "0.11.0-IV0", "0.11.0-IV1", - "0.11.0-IV2", "1.0-IV0", "1.1-IV0", "2.0-IV0", "2.0-IV1", "2.1-IV0", - "2.1-IV1", "2.1-IV2", "2.2-IV0", "2.2-IV1", "2.3-IV0", "2.3-IV1", "2.4-IV0", - "2.4-IV1", "2.5-IV0", "2.6-IV0", "2.7-IV0", "2.7-IV1", "2.7-IV2", "2.8-IV0", - "2.8-IV1", "3.0-IV0", "3.0-IV1", "3.1-IV0", "3.2-IV0", "3.3-IV0", "3.3-IV1", - "3.3-IV2", and "3.3-IV3". - "message_timestamp_type": "create_time", # Optional. Default value - is "create_time". The message_timestamp_type specifies whether to use the - message create time or log append time as the timestamp on a message. Known - values are: "create_time" and "log_append_time". - "min_cleanable_dirty_ratio": 0.5, # Optional. Default value is 0.5. - The min_cleanable_dirty_ratio specifies the frequency of log compaction (if - enabled) in relation to duplicates present in the logs. For example, at 0.5, - at most 50% of the log could be duplicates before compaction would begin. - "min_compaction_lag_ms": 0, # Optional. Default value is 0. The - min_compaction_lag_ms specifies the minimum time (in ms) that a message will - remain uncompacted in the log. Only relevant if log compaction is enabled. - "min_insync_replicas": 1, # Optional. Default value is 1. The - min_insync_replicas specifies the number of replicas that must ACK a write - for the write to be considered successful. - "preallocate": False, # Optional. Default value is False. The - preallocate specifies whether a file should be preallocated on disk when - creating a new log segment. - "retention_bytes": -1, # Optional. Default value is -1. The - retention_bytes specifies the maximum size of the log (in bytes) before - deleting messages. -1 indicates that there is no limit. - "retention_ms": 604800000, # Optional. Default value is 604800000. - The retention_ms specifies the maximum amount of time (in ms) to keep a - message before deleting it. - "segment_bytes": 209715200, # Optional. Default value is 209715200. - The segment_bytes specifies the maximum size of a single log file (in bytes). - "segment_jitter_ms": 0, # Optional. Default value is 0. The - segment_jitter_ms specifies the maximum random jitter subtracted from the - scheduled segment roll time to avoid thundering herds of segment rolling. - "segment_ms": 604800000 # Optional. Default value is 604800000. The - segment_ms specifies the period of time after which the log will be forced to - roll if the segment file isn't full. This ensures that retention can delete - or compact old data. - }, - "name": "str", # Optional. The name of the Kafka topic. - "partition_count": 0, # Optional. The number of partitions available for the - topic. On update, this value can only be increased. - "replication_factor": 0 # Optional. The number of nodes to replicate data - across the cluster. - } - - # response body for status code(s): 201 - response == { - "topic": { - "config": { - "cleanup_policy": "delete", # Optional. Default value is - "delete". The cleanup_policy sets the retention policy to use on log - segments. 'delete' will discard old segments when retention time/size - limits are reached. 'compact' will enable log compaction, resulting in - retention of the latest value for each key. Known values are: "delete", - "compact", and "compact_delete". - "compression_type": "producer", # Optional. Default value is - "producer". The compression_type specifies the compression type of the - topic. Known values are: "producer", "gzip", "snappy", "Iz4", "zstd", and - "uncompressed". - "delete_retention_ms": 86400000, # Optional. Default value - is 86400000. The delete_retention_ms specifies how long (in ms) to retain - delete tombstone markers for topics. - "file_delete_delay_ms": 60000, # Optional. Default value is - 60000. The file_delete_delay_ms specifies the time (in ms) to wait before - deleting a file from the filesystem. - "flush_messages": 9223372036854776000, # Optional. Default - value is 9223372036854776000. The flush_messages specifies the number of - messages to accumulate on a log partition before messages are flushed to - disk. - "flush_ms": 9223372036854776000, # Optional. Default value - is 9223372036854776000. The flush_ms specifies the maximum time (in ms) - that a message is kept in memory before being flushed to disk. - "index_interval_bytes": 4096, # Optional. Default value is - 4096. The index_interval_bytes specifies the number of bytes between - entries being added into te offset index. - "max_compaction_lag_ms": 9223372036854776000, # Optional. - Default value is 9223372036854776000. The max_compaction_lag_ms specifies - the maximum amount of time (in ms) that a message will remain - uncompacted. This is only applicable if the logs are have compaction - enabled. - "max_message_bytes": 1048588, # Optional. Default value is - 1048588. The max_messages_bytes specifies the largest record batch size - (in bytes) that can be sent to the server. This is calculated after - compression if compression is enabled. - "message_down_conversion_enable": True, # Optional. Default - value is True. The message_down_conversion_enable specifies whether - down-conversion of message formats is enabled to satisfy consumer - requests. When 'false', the broker will not perform conversion for - consumers expecting older message formats. The broker will respond with - an ``UNSUPPORTED_VERSION`` error for consume requests from these older - clients. - "message_format_version": "3.0-IV1", # Optional. Default - value is "3.0-IV1". The message_format_version specifies the message - format version used by the broker to append messages to the logs. The - value of this setting is assumed to be 3.0-IV1 if the broker protocol - version is 3.0 or higher. By setting a particular message format - version, all existing messages on disk must be smaller or equal to the - specified version. Known values are: "0.8.0", "0.8.1", "0.8.2", "0.9.0", - "0.10.0-IV0", "0.10.0-IV1", "0.10.1-IV0", "0.10.1-IV1", "0.10.1-IV2", - "0.10.2-IV0", "0.11.0-IV0", "0.11.0-IV1", "0.11.0-IV2", "1.0-IV0", - "1.1-IV0", "2.0-IV0", "2.0-IV1", "2.1-IV0", "2.1-IV1", "2.1-IV2", - "2.2-IV0", "2.2-IV1", "2.3-IV0", "2.3-IV1", "2.4-IV0", "2.4-IV1", - "2.5-IV0", "2.6-IV0", "2.7-IV0", "2.7-IV1", "2.7-IV2", "2.8-IV0", - "2.8-IV1", "3.0-IV0", "3.0-IV1", "3.1-IV0", "3.2-IV0", "3.3-IV0", - "3.3-IV1", "3.3-IV2", and "3.3-IV3". - "message_timestamp_type": "create_time", # Optional. Default - value is "create_time". The message_timestamp_type specifies whether to - use the message create time or log append time as the timestamp on a - message. Known values are: "create_time" and "log_append_time". - "min_cleanable_dirty_ratio": 0.5, # Optional. Default value - is 0.5. The min_cleanable_dirty_ratio specifies the frequency of log - compaction (if enabled) in relation to duplicates present in the logs. - For example, at 0.5, at most 50% of the log could be duplicates before - compaction would begin. - "min_compaction_lag_ms": 0, # Optional. Default value is 0. - The min_compaction_lag_ms specifies the minimum time (in ms) that a - message will remain uncompacted in the log. Only relevant if log - compaction is enabled. - "min_insync_replicas": 1, # Optional. Default value is 1. - The min_insync_replicas specifies the number of replicas that must ACK a - write for the write to be considered successful. - "preallocate": False, # Optional. Default value is False. - The preallocate specifies whether a file should be preallocated on disk - when creating a new log segment. - "retention_bytes": -1, # Optional. Default value is -1. The - retention_bytes specifies the maximum size of the log (in bytes) before - deleting messages. -1 indicates that there is no limit. - "retention_ms": 604800000, # Optional. Default value is - 604800000. The retention_ms specifies the maximum amount of time (in ms) - to keep a message before deleting it. - "segment_bytes": 209715200, # Optional. Default value is - 209715200. The segment_bytes specifies the maximum size of a single log - file (in bytes). - "segment_jitter_ms": 0, # Optional. Default value is 0. The - segment_jitter_ms specifies the maximum random jitter subtracted from the - scheduled segment roll time to avoid thundering herds of segment rolling. - "segment_ms": 604800000 # Optional. Default value is - 604800000. The segment_ms specifies the period of time after which the - log will be forced to roll if the segment file isn't full. This ensures - that retention can delete or compact old data. - }, - "name": "str", # Optional. The name of the Kafka topic. - "partitions": [ - { - "consumer_groups": [ - { - "group_name": "str", # Optional. - Name of the consumer group. - "offset": 0 # Optional. The current - offset of the consumer group. - } - ], - "earliest_offset": 0, # Optional. The earliest - consumer offset amongst consumer groups. - "id": 0, # Optional. An identifier for the - partition. - "in_sync_replicas": 0, # Optional. The number of - nodes that are in-sync (have the latest data) for the given - partition. - "size": 0 # Optional. Size of the topic partition in - bytes. - } - ], - "replication_factor": 0, # Optional. The number of nodes to - replicate data across the cluster. - "state": "str" # Optional. The state of the Kafka topic. Known - values are: "active", "configuring", "deleting", and "unknown". - } + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "schema": "str", # Optional. The schema definition in the specified format. + "schema_type": "str", # Optional. The type of the schema. Known values are: + "AVRO", "JSON", and "PROTOBUF". + "subject_name": "str" # Optional. The name of the schema subject. + } + + # response body for status code(s): 201 + response == { + "schema": "str", # Optional. The schema definition in the specified format. + "schema_id": 0, # Optional. The id for schema. + "schema_type": "str", # Optional. The type of the schema. Known values are: + "AVRO", "JSON", and "PROTOBUF". + "subject_name": "str" # Optional. The name of the schema subject. + } + # response body for status code(s): 404 + response == { + "id": "str", # A short identifier corresponding to the HTTP status code + returned. For example, the ID for a response returning a 404 status code would + be "not_found.". Required. + "message": "str", # A message providing additional information about the + error, including details to help resolve it when possible. Required. + "request_id": "str" # Optional. Optionally, some endpoints may include a + request ID that should be provided when reporting bugs or opening support + tickets to help identify the issue. + } + """ + + @overload + async def create_kafka_schema( + self, + database_cluster_uuid: str, + body: IO[bytes], + *, + content_type: str = "application/json", + **kwargs: Any + ) -> JSON: + # pylint: disable=line-too-long + """Create Schema Registry for Kafka Cluster. + + To create a Kafka schema for a database cluster, send a POST request to + ``/v2/databases/$DATABASE_ID/schema-registry``. + + :param database_cluster_uuid: A unique identifier for a database cluster. Required. + :type database_cluster_uuid: str + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: JSON object + :rtype: JSON + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 201 + response == { + "schema": "str", # Optional. The schema definition in the specified format. + "schema_id": 0, # Optional. The id for schema. + "schema_type": "str", # Optional. The type of the schema. Known values are: + "AVRO", "JSON", and "PROTOBUF". + "subject_name": "str" # Optional. The name of the schema subject. + } + # response body for status code(s): 404 + response == { + "id": "str", # A short identifier corresponding to the HTTP status code + returned. For example, the ID for a response returning a 404 status code would + be "not_found.". Required. + "message": "str", # A message providing additional information about the + error, including details to help resolve it when possible. Required. + "request_id": "str" # Optional. Optionally, some endpoints may include a + request ID that should be provided when reporting bugs or opening support + tickets to help identify the issue. + } + """ + + @distributed_trace_async + async def create_kafka_schema( + self, database_cluster_uuid: str, body: Union[JSON, IO[bytes]], **kwargs: Any + ) -> JSON: + # pylint: disable=line-too-long + """Create Schema Registry for Kafka Cluster. + + To create a Kafka schema for a database cluster, send a POST request to + ``/v2/databases/$DATABASE_ID/schema-registry``. + + :param database_cluster_uuid: A unique identifier for a database cluster. Required. + :type database_cluster_uuid: str + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :return: JSON object + :rtype: JSON + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "schema": "str", # Optional. The schema definition in the specified format. + "schema_type": "str", # Optional. The type of the schema. Known values are: + "AVRO", "JSON", and "PROTOBUF". + "subject_name": "str" # Optional. The name of the schema subject. + } + + # response body for status code(s): 201 + response == { + "schema": "str", # Optional. The schema definition in the specified format. + "schema_id": 0, # Optional. The id for schema. + "schema_type": "str", # Optional. The type of the schema. Known values are: + "AVRO", "JSON", and "PROTOBUF". + "subject_name": "str" # Optional. The name of the schema subject. } # response body for status code(s): 404 response == { @@ -117767,12 +125455,9 @@ async def create_kafka_topic( if isinstance(body, (IOBase, bytes)): _content = body else: - if body is not None: - _json = body - else: - _json = None + _json = body - _request = build_databases_create_kafka_topic_request( + _request = build_databases_create_kafka_schema_request( database_cluster_uuid=database_cluster_uuid, content_type=content_type, json=_json, @@ -117836,21 +125521,19 @@ async def create_kafka_topic( return cast(JSON, deserialized) # type: ignore @distributed_trace_async - async def get_kafka_topic( - self, database_cluster_uuid: str, topic_name: str, **kwargs: Any + async def get_kafka_schema( + self, database_cluster_uuid: str, subject_name: str, **kwargs: Any ) -> JSON: # pylint: disable=line-too-long - """Get Topic for a Kafka Cluster. - - To retrieve a given topic by name from the set of a Kafka cluster's topics, - send a GET request to ``/v2/databases/$DATABASE_ID/topics/$TOPIC_NAME``. + """Get a Kafka Schema by Subject Name. - The result will be a JSON object with a ``topic`` key. + To get a specific schema by subject name for a Kafka cluster, send a GET request to + ``/v2/databases/$DATABASE_ID/schema-registry/$SUBJECT_NAME``. :param database_cluster_uuid: A unique identifier for a database cluster. Required. :type database_cluster_uuid: str - :param topic_name: The name used to identify the Kafka topic. Required. - :type topic_name: str + :param subject_name: The name of the Kafka schema subject. Required. + :type subject_name: str :return: JSON object :rtype: JSON :raises ~azure.core.exceptions.HttpResponseError: @@ -117860,127 +125543,12 @@ async def get_kafka_topic( # response body for status code(s): 200 response == { - "topic": { - "config": { - "cleanup_policy": "delete", # Optional. Default value is - "delete". The cleanup_policy sets the retention policy to use on log - segments. 'delete' will discard old segments when retention time/size - limits are reached. 'compact' will enable log compaction, resulting in - retention of the latest value for each key. Known values are: "delete", - "compact", and "compact_delete". - "compression_type": "producer", # Optional. Default value is - "producer". The compression_type specifies the compression type of the - topic. Known values are: "producer", "gzip", "snappy", "Iz4", "zstd", and - "uncompressed". - "delete_retention_ms": 86400000, # Optional. Default value - is 86400000. The delete_retention_ms specifies how long (in ms) to retain - delete tombstone markers for topics. - "file_delete_delay_ms": 60000, # Optional. Default value is - 60000. The file_delete_delay_ms specifies the time (in ms) to wait before - deleting a file from the filesystem. - "flush_messages": 9223372036854776000, # Optional. Default - value is 9223372036854776000. The flush_messages specifies the number of - messages to accumulate on a log partition before messages are flushed to - disk. - "flush_ms": 9223372036854776000, # Optional. Default value - is 9223372036854776000. The flush_ms specifies the maximum time (in ms) - that a message is kept in memory before being flushed to disk. - "index_interval_bytes": 4096, # Optional. Default value is - 4096. The index_interval_bytes specifies the number of bytes between - entries being added into te offset index. - "max_compaction_lag_ms": 9223372036854776000, # Optional. - Default value is 9223372036854776000. The max_compaction_lag_ms specifies - the maximum amount of time (in ms) that a message will remain - uncompacted. This is only applicable if the logs are have compaction - enabled. - "max_message_bytes": 1048588, # Optional. Default value is - 1048588. The max_messages_bytes specifies the largest record batch size - (in bytes) that can be sent to the server. This is calculated after - compression if compression is enabled. - "message_down_conversion_enable": True, # Optional. Default - value is True. The message_down_conversion_enable specifies whether - down-conversion of message formats is enabled to satisfy consumer - requests. When 'false', the broker will not perform conversion for - consumers expecting older message formats. The broker will respond with - an ``UNSUPPORTED_VERSION`` error for consume requests from these older - clients. - "message_format_version": "3.0-IV1", # Optional. Default - value is "3.0-IV1". The message_format_version specifies the message - format version used by the broker to append messages to the logs. The - value of this setting is assumed to be 3.0-IV1 if the broker protocol - version is 3.0 or higher. By setting a particular message format - version, all existing messages on disk must be smaller or equal to the - specified version. Known values are: "0.8.0", "0.8.1", "0.8.2", "0.9.0", - "0.10.0-IV0", "0.10.0-IV1", "0.10.1-IV0", "0.10.1-IV1", "0.10.1-IV2", - "0.10.2-IV0", "0.11.0-IV0", "0.11.0-IV1", "0.11.0-IV2", "1.0-IV0", - "1.1-IV0", "2.0-IV0", "2.0-IV1", "2.1-IV0", "2.1-IV1", "2.1-IV2", - "2.2-IV0", "2.2-IV1", "2.3-IV0", "2.3-IV1", "2.4-IV0", "2.4-IV1", - "2.5-IV0", "2.6-IV0", "2.7-IV0", "2.7-IV1", "2.7-IV2", "2.8-IV0", - "2.8-IV1", "3.0-IV0", "3.0-IV1", "3.1-IV0", "3.2-IV0", "3.3-IV0", - "3.3-IV1", "3.3-IV2", and "3.3-IV3". - "message_timestamp_type": "create_time", # Optional. Default - value is "create_time". The message_timestamp_type specifies whether to - use the message create time or log append time as the timestamp on a - message. Known values are: "create_time" and "log_append_time". - "min_cleanable_dirty_ratio": 0.5, # Optional. Default value - is 0.5. The min_cleanable_dirty_ratio specifies the frequency of log - compaction (if enabled) in relation to duplicates present in the logs. - For example, at 0.5, at most 50% of the log could be duplicates before - compaction would begin. - "min_compaction_lag_ms": 0, # Optional. Default value is 0. - The min_compaction_lag_ms specifies the minimum time (in ms) that a - message will remain uncompacted in the log. Only relevant if log - compaction is enabled. - "min_insync_replicas": 1, # Optional. Default value is 1. - The min_insync_replicas specifies the number of replicas that must ACK a - write for the write to be considered successful. - "preallocate": False, # Optional. Default value is False. - The preallocate specifies whether a file should be preallocated on disk - when creating a new log segment. - "retention_bytes": -1, # Optional. Default value is -1. The - retention_bytes specifies the maximum size of the log (in bytes) before - deleting messages. -1 indicates that there is no limit. - "retention_ms": 604800000, # Optional. Default value is - 604800000. The retention_ms specifies the maximum amount of time (in ms) - to keep a message before deleting it. - "segment_bytes": 209715200, # Optional. Default value is - 209715200. The segment_bytes specifies the maximum size of a single log - file (in bytes). - "segment_jitter_ms": 0, # Optional. Default value is 0. The - segment_jitter_ms specifies the maximum random jitter subtracted from the - scheduled segment roll time to avoid thundering herds of segment rolling. - "segment_ms": 604800000 # Optional. Default value is - 604800000. The segment_ms specifies the period of time after which the - log will be forced to roll if the segment file isn't full. This ensures - that retention can delete or compact old data. - }, - "name": "str", # Optional. The name of the Kafka topic. - "partitions": [ - { - "consumer_groups": [ - { - "group_name": "str", # Optional. - Name of the consumer group. - "offset": 0 # Optional. The current - offset of the consumer group. - } - ], - "earliest_offset": 0, # Optional. The earliest - consumer offset amongst consumer groups. - "id": 0, # Optional. An identifier for the - partition. - "in_sync_replicas": 0, # Optional. The number of - nodes that are in-sync (have the latest data) for the given - partition. - "size": 0 # Optional. Size of the topic partition in - bytes. - } - ], - "replication_factor": 0, # Optional. The number of nodes to - replicate data across the cluster. - "state": "str" # Optional. The state of the Kafka topic. Known - values are: "active", "configuring", "deleting", and "unknown". - } + "schema": "str", # Optional. The schema definition in the specified format. + "schema_id": 0, # Optional. The id for schema. + "schema_type": "str", # Optional. The type of the schema. Known values are: + "AVRO", "JSON", and "PROTOBUF". + "subject_name": "str", # Optional. The name of the schema subject. + "version": "str" # Optional. The version of the schema. } # response body for status code(s): 404 response == { @@ -118012,9 +125580,9 @@ async def get_kafka_topic( cls: ClsType[JSON] = kwargs.pop("cls", None) - _request = build_databases_get_kafka_topic_request( + _request = build_databases_get_kafka_schema_request( database_cluster_uuid=database_cluster_uuid, - topic_name=topic_name, + subject_name=subject_name, headers=_headers, params=_params, ) @@ -118073,258 +125641,27 @@ async def get_kafka_topic( return cast(JSON, deserialized) # type: ignore - @overload - async def update_kafka_topic( - self, - database_cluster_uuid: str, - topic_name: str, - body: Optional[JSON] = None, - *, - content_type: str = "application/json", - **kwargs: Any - ) -> JSON: + @distributed_trace_async + async def delete_kafka_schema( + self, database_cluster_uuid: str, subject_name: str, **kwargs: Any + ) -> Optional[JSON]: # pylint: disable=line-too-long - """Update Topic for a Kafka Cluster. - - To update a topic attached to a Kafka cluster, send a PUT request to - ``/v2/databases/$DATABASE_ID/topics/$TOPIC_NAME``. + """Delete a Kafka Schema by Subject Name. - The result will be a JSON object with a ``topic`` key. + To delete a specific schema by subject name for a Kafka cluster, send a DELETE request to + ``/v2/databases/$DATABASE_ID/schema-registry/$SUBJECT_NAME``. :param database_cluster_uuid: A unique identifier for a database cluster. Required. :type database_cluster_uuid: str - :param topic_name: The name used to identify the Kafka topic. Required. - :type topic_name: str - :param body: Default value is None. - :type body: JSON - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: JSON object - :rtype: JSON + :param subject_name: The name of the Kafka schema subject. Required. + :type subject_name: str + :return: JSON object or None + :rtype: JSON or None :raises ~azure.core.exceptions.HttpResponseError: Example: .. code-block:: python - # JSON input template you can fill out and use as your body input. - body = { - "config": { - "cleanup_policy": "delete", # Optional. Default value is "delete". - The cleanup_policy sets the retention policy to use on log segments. 'delete' - will discard old segments when retention time/size limits are reached. - 'compact' will enable log compaction, resulting in retention of the latest - value for each key. Known values are: "delete", "compact", and - "compact_delete". - "compression_type": "producer", # Optional. Default value is - "producer". The compression_type specifies the compression type of the topic. - Known values are: "producer", "gzip", "snappy", "Iz4", "zstd", and - "uncompressed". - "delete_retention_ms": 86400000, # Optional. Default value is - 86400000. The delete_retention_ms specifies how long (in ms) to retain delete - tombstone markers for topics. - "file_delete_delay_ms": 60000, # Optional. Default value is 60000. - The file_delete_delay_ms specifies the time (in ms) to wait before deleting a - file from the filesystem. - "flush_messages": 9223372036854776000, # Optional. Default value is - 9223372036854776000. The flush_messages specifies the number of messages to - accumulate on a log partition before messages are flushed to disk. - "flush_ms": 9223372036854776000, # Optional. Default value is - 9223372036854776000. The flush_ms specifies the maximum time (in ms) that a - message is kept in memory before being flushed to disk. - "index_interval_bytes": 4096, # Optional. Default value is 4096. The - index_interval_bytes specifies the number of bytes between entries being - added into te offset index. - "max_compaction_lag_ms": 9223372036854776000, # Optional. Default - value is 9223372036854776000. The max_compaction_lag_ms specifies the maximum - amount of time (in ms) that a message will remain uncompacted. This is only - applicable if the logs are have compaction enabled. - "max_message_bytes": 1048588, # Optional. Default value is 1048588. - The max_messages_bytes specifies the largest record batch size (in bytes) - that can be sent to the server. This is calculated after compression if - compression is enabled. - "message_down_conversion_enable": True, # Optional. Default value is - True. The message_down_conversion_enable specifies whether down-conversion of - message formats is enabled to satisfy consumer requests. When 'false', the - broker will not perform conversion for consumers expecting older message - formats. The broker will respond with an ``UNSUPPORTED_VERSION`` error for - consume requests from these older clients. - "message_format_version": "3.0-IV1", # Optional. Default value is - "3.0-IV1". The message_format_version specifies the message format version - used by the broker to append messages to the logs. The value of this setting - is assumed to be 3.0-IV1 if the broker protocol version is 3.0 or higher. By - setting a particular message format version, all existing messages on disk - must be smaller or equal to the specified version. Known values are: "0.8.0", - "0.8.1", "0.8.2", "0.9.0", "0.10.0-IV0", "0.10.0-IV1", "0.10.1-IV0", - "0.10.1-IV1", "0.10.1-IV2", "0.10.2-IV0", "0.11.0-IV0", "0.11.0-IV1", - "0.11.0-IV2", "1.0-IV0", "1.1-IV0", "2.0-IV0", "2.0-IV1", "2.1-IV0", - "2.1-IV1", "2.1-IV2", "2.2-IV0", "2.2-IV1", "2.3-IV0", "2.3-IV1", "2.4-IV0", - "2.4-IV1", "2.5-IV0", "2.6-IV0", "2.7-IV0", "2.7-IV1", "2.7-IV2", "2.8-IV0", - "2.8-IV1", "3.0-IV0", "3.0-IV1", "3.1-IV0", "3.2-IV0", "3.3-IV0", "3.3-IV1", - "3.3-IV2", and "3.3-IV3". - "message_timestamp_type": "create_time", # Optional. Default value - is "create_time". The message_timestamp_type specifies whether to use the - message create time or log append time as the timestamp on a message. Known - values are: "create_time" and "log_append_time". - "min_cleanable_dirty_ratio": 0.5, # Optional. Default value is 0.5. - The min_cleanable_dirty_ratio specifies the frequency of log compaction (if - enabled) in relation to duplicates present in the logs. For example, at 0.5, - at most 50% of the log could be duplicates before compaction would begin. - "min_compaction_lag_ms": 0, # Optional. Default value is 0. The - min_compaction_lag_ms specifies the minimum time (in ms) that a message will - remain uncompacted in the log. Only relevant if log compaction is enabled. - "min_insync_replicas": 1, # Optional. Default value is 1. The - min_insync_replicas specifies the number of replicas that must ACK a write - for the write to be considered successful. - "preallocate": False, # Optional. Default value is False. The - preallocate specifies whether a file should be preallocated on disk when - creating a new log segment. - "retention_bytes": -1, # Optional. Default value is -1. The - retention_bytes specifies the maximum size of the log (in bytes) before - deleting messages. -1 indicates that there is no limit. - "retention_ms": 604800000, # Optional. Default value is 604800000. - The retention_ms specifies the maximum amount of time (in ms) to keep a - message before deleting it. - "segment_bytes": 209715200, # Optional. Default value is 209715200. - The segment_bytes specifies the maximum size of a single log file (in bytes). - "segment_jitter_ms": 0, # Optional. Default value is 0. The - segment_jitter_ms specifies the maximum random jitter subtracted from the - scheduled segment roll time to avoid thundering herds of segment rolling. - "segment_ms": 604800000 # Optional. Default value is 604800000. The - segment_ms specifies the period of time after which the log will be forced to - roll if the segment file isn't full. This ensures that retention can delete - or compact old data. - }, - "partition_count": 0, # Optional. The number of partitions available for the - topic. On update, this value can only be increased. - "replication_factor": 0 # Optional. The number of nodes to replicate data - across the cluster. - } - - # response body for status code(s): 200 - response == { - "topic": { - "config": { - "cleanup_policy": "delete", # Optional. Default value is - "delete". The cleanup_policy sets the retention policy to use on log - segments. 'delete' will discard old segments when retention time/size - limits are reached. 'compact' will enable log compaction, resulting in - retention of the latest value for each key. Known values are: "delete", - "compact", and "compact_delete". - "compression_type": "producer", # Optional. Default value is - "producer". The compression_type specifies the compression type of the - topic. Known values are: "producer", "gzip", "snappy", "Iz4", "zstd", and - "uncompressed". - "delete_retention_ms": 86400000, # Optional. Default value - is 86400000. The delete_retention_ms specifies how long (in ms) to retain - delete tombstone markers for topics. - "file_delete_delay_ms": 60000, # Optional. Default value is - 60000. The file_delete_delay_ms specifies the time (in ms) to wait before - deleting a file from the filesystem. - "flush_messages": 9223372036854776000, # Optional. Default - value is 9223372036854776000. The flush_messages specifies the number of - messages to accumulate on a log partition before messages are flushed to - disk. - "flush_ms": 9223372036854776000, # Optional. Default value - is 9223372036854776000. The flush_ms specifies the maximum time (in ms) - that a message is kept in memory before being flushed to disk. - "index_interval_bytes": 4096, # Optional. Default value is - 4096. The index_interval_bytes specifies the number of bytes between - entries being added into te offset index. - "max_compaction_lag_ms": 9223372036854776000, # Optional. - Default value is 9223372036854776000. The max_compaction_lag_ms specifies - the maximum amount of time (in ms) that a message will remain - uncompacted. This is only applicable if the logs are have compaction - enabled. - "max_message_bytes": 1048588, # Optional. Default value is - 1048588. The max_messages_bytes specifies the largest record batch size - (in bytes) that can be sent to the server. This is calculated after - compression if compression is enabled. - "message_down_conversion_enable": True, # Optional. Default - value is True. The message_down_conversion_enable specifies whether - down-conversion of message formats is enabled to satisfy consumer - requests. When 'false', the broker will not perform conversion for - consumers expecting older message formats. The broker will respond with - an ``UNSUPPORTED_VERSION`` error for consume requests from these older - clients. - "message_format_version": "3.0-IV1", # Optional. Default - value is "3.0-IV1". The message_format_version specifies the message - format version used by the broker to append messages to the logs. The - value of this setting is assumed to be 3.0-IV1 if the broker protocol - version is 3.0 or higher. By setting a particular message format - version, all existing messages on disk must be smaller or equal to the - specified version. Known values are: "0.8.0", "0.8.1", "0.8.2", "0.9.0", - "0.10.0-IV0", "0.10.0-IV1", "0.10.1-IV0", "0.10.1-IV1", "0.10.1-IV2", - "0.10.2-IV0", "0.11.0-IV0", "0.11.0-IV1", "0.11.0-IV2", "1.0-IV0", - "1.1-IV0", "2.0-IV0", "2.0-IV1", "2.1-IV0", "2.1-IV1", "2.1-IV2", - "2.2-IV0", "2.2-IV1", "2.3-IV0", "2.3-IV1", "2.4-IV0", "2.4-IV1", - "2.5-IV0", "2.6-IV0", "2.7-IV0", "2.7-IV1", "2.7-IV2", "2.8-IV0", - "2.8-IV1", "3.0-IV0", "3.0-IV1", "3.1-IV0", "3.2-IV0", "3.3-IV0", - "3.3-IV1", "3.3-IV2", and "3.3-IV3". - "message_timestamp_type": "create_time", # Optional. Default - value is "create_time". The message_timestamp_type specifies whether to - use the message create time or log append time as the timestamp on a - message. Known values are: "create_time" and "log_append_time". - "min_cleanable_dirty_ratio": 0.5, # Optional. Default value - is 0.5. The min_cleanable_dirty_ratio specifies the frequency of log - compaction (if enabled) in relation to duplicates present in the logs. - For example, at 0.5, at most 50% of the log could be duplicates before - compaction would begin. - "min_compaction_lag_ms": 0, # Optional. Default value is 0. - The min_compaction_lag_ms specifies the minimum time (in ms) that a - message will remain uncompacted in the log. Only relevant if log - compaction is enabled. - "min_insync_replicas": 1, # Optional. Default value is 1. - The min_insync_replicas specifies the number of replicas that must ACK a - write for the write to be considered successful. - "preallocate": False, # Optional. Default value is False. - The preallocate specifies whether a file should be preallocated on disk - when creating a new log segment. - "retention_bytes": -1, # Optional. Default value is -1. The - retention_bytes specifies the maximum size of the log (in bytes) before - deleting messages. -1 indicates that there is no limit. - "retention_ms": 604800000, # Optional. Default value is - 604800000. The retention_ms specifies the maximum amount of time (in ms) - to keep a message before deleting it. - "segment_bytes": 209715200, # Optional. Default value is - 209715200. The segment_bytes specifies the maximum size of a single log - file (in bytes). - "segment_jitter_ms": 0, # Optional. Default value is 0. The - segment_jitter_ms specifies the maximum random jitter subtracted from the - scheduled segment roll time to avoid thundering herds of segment rolling. - "segment_ms": 604800000 # Optional. Default value is - 604800000. The segment_ms specifies the period of time after which the - log will be forced to roll if the segment file isn't full. This ensures - that retention can delete or compact old data. - }, - "name": "str", # Optional. The name of the Kafka topic. - "partitions": [ - { - "consumer_groups": [ - { - "group_name": "str", # Optional. - Name of the consumer group. - "offset": 0 # Optional. The current - offset of the consumer group. - } - ], - "earliest_offset": 0, # Optional. The earliest - consumer offset amongst consumer groups. - "id": 0, # Optional. An identifier for the - partition. - "in_sync_replicas": 0, # Optional. The number of - nodes that are in-sync (have the latest data) for the given - partition. - "size": 0 # Optional. Size of the topic partition in - bytes. - } - ], - "replication_factor": 0, # Optional. The number of nodes to - replicate data across the cluster. - "state": "str" # Optional. The state of the Kafka topic. Known - values are: "active", "configuring", "deleting", and "unknown". - } - } # response body for status code(s): 404 response == { "id": "str", # A short identifier corresponding to the HTTP status code @@ -118337,34 +125674,97 @@ async def update_kafka_topic( tickets to help identify the issue. } """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + 401: cast( + Type[HttpResponseError], + lambda response: ClientAuthenticationError(response=response), + ), + 429: HttpResponseError, + 500: HttpResponseError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) - @overload - async def update_kafka_topic( - self, - database_cluster_uuid: str, - topic_name: str, - body: Optional[IO[bytes]] = None, - *, - content_type: str = "application/json", - **kwargs: Any + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[Optional[JSON]] = kwargs.pop("cls", None) + + _request = build_databases_delete_kafka_schema_request( + database_cluster_uuid=database_cluster_uuid, + subject_name=subject_name, + headers=_headers, + params=_params, + ) + _request.url = self._client.format_url(_request.url) + + _stream = False + pipeline_response: PipelineResponse = ( + await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + ) + + response = pipeline_response.http_response + + if response.status_code not in [204, 404]: + if _stream: + await response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore + raise HttpResponseError(response=response) + + deserialized = None + response_headers = {} + if response.status_code == 204: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.status_code == 404: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.content: + deserialized = response.json() + else: + deserialized = None + + if cls: + return cls(pipeline_response, deserialized, response_headers) # type: ignore + + return deserialized # type: ignore + + @distributed_trace_async + async def get_kafka_schema_version( + self, database_cluster_uuid: str, subject_name: str, version: str, **kwargs: Any ) -> JSON: # pylint: disable=line-too-long - """Update Topic for a Kafka Cluster. - - To update a topic attached to a Kafka cluster, send a PUT request to - ``/v2/databases/$DATABASE_ID/topics/$TOPIC_NAME``. + """Get Kafka Schema by Subject Version. - The result will be a JSON object with a ``topic`` key. + To get a specific schema by subject name for a Kafka cluster, send a GET request to + ``/v2/databases/$DATABASE_ID/schema-registry/$SUBJECT_NAME/versions/$VERSION``. :param database_cluster_uuid: A unique identifier for a database cluster. Required. :type database_cluster_uuid: str - :param topic_name: The name used to identify the Kafka topic. Required. - :type topic_name: str - :param body: Default value is None. - :type body: IO[bytes] - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str + :param subject_name: The name of the Kafka schema subject. Required. + :type subject_name: str + :param version: The version of the Kafka schema subject. Required. + :type version: str :return: JSON object :rtype: JSON :raises ~azure.core.exceptions.HttpResponseError: @@ -118374,127 +125774,12 @@ async def update_kafka_topic( # response body for status code(s): 200 response == { - "topic": { - "config": { - "cleanup_policy": "delete", # Optional. Default value is - "delete". The cleanup_policy sets the retention policy to use on log - segments. 'delete' will discard old segments when retention time/size - limits are reached. 'compact' will enable log compaction, resulting in - retention of the latest value for each key. Known values are: "delete", - "compact", and "compact_delete". - "compression_type": "producer", # Optional. Default value is - "producer". The compression_type specifies the compression type of the - topic. Known values are: "producer", "gzip", "snappy", "Iz4", "zstd", and - "uncompressed". - "delete_retention_ms": 86400000, # Optional. Default value - is 86400000. The delete_retention_ms specifies how long (in ms) to retain - delete tombstone markers for topics. - "file_delete_delay_ms": 60000, # Optional. Default value is - 60000. The file_delete_delay_ms specifies the time (in ms) to wait before - deleting a file from the filesystem. - "flush_messages": 9223372036854776000, # Optional. Default - value is 9223372036854776000. The flush_messages specifies the number of - messages to accumulate on a log partition before messages are flushed to - disk. - "flush_ms": 9223372036854776000, # Optional. Default value - is 9223372036854776000. The flush_ms specifies the maximum time (in ms) - that a message is kept in memory before being flushed to disk. - "index_interval_bytes": 4096, # Optional. Default value is - 4096. The index_interval_bytes specifies the number of bytes between - entries being added into te offset index. - "max_compaction_lag_ms": 9223372036854776000, # Optional. - Default value is 9223372036854776000. The max_compaction_lag_ms specifies - the maximum amount of time (in ms) that a message will remain - uncompacted. This is only applicable if the logs are have compaction - enabled. - "max_message_bytes": 1048588, # Optional. Default value is - 1048588. The max_messages_bytes specifies the largest record batch size - (in bytes) that can be sent to the server. This is calculated after - compression if compression is enabled. - "message_down_conversion_enable": True, # Optional. Default - value is True. The message_down_conversion_enable specifies whether - down-conversion of message formats is enabled to satisfy consumer - requests. When 'false', the broker will not perform conversion for - consumers expecting older message formats. The broker will respond with - an ``UNSUPPORTED_VERSION`` error for consume requests from these older - clients. - "message_format_version": "3.0-IV1", # Optional. Default - value is "3.0-IV1". The message_format_version specifies the message - format version used by the broker to append messages to the logs. The - value of this setting is assumed to be 3.0-IV1 if the broker protocol - version is 3.0 or higher. By setting a particular message format - version, all existing messages on disk must be smaller or equal to the - specified version. Known values are: "0.8.0", "0.8.1", "0.8.2", "0.9.0", - "0.10.0-IV0", "0.10.0-IV1", "0.10.1-IV0", "0.10.1-IV1", "0.10.1-IV2", - "0.10.2-IV0", "0.11.0-IV0", "0.11.0-IV1", "0.11.0-IV2", "1.0-IV0", - "1.1-IV0", "2.0-IV0", "2.0-IV1", "2.1-IV0", "2.1-IV1", "2.1-IV2", - "2.2-IV0", "2.2-IV1", "2.3-IV0", "2.3-IV1", "2.4-IV0", "2.4-IV1", - "2.5-IV0", "2.6-IV0", "2.7-IV0", "2.7-IV1", "2.7-IV2", "2.8-IV0", - "2.8-IV1", "3.0-IV0", "3.0-IV1", "3.1-IV0", "3.2-IV0", "3.3-IV0", - "3.3-IV1", "3.3-IV2", and "3.3-IV3". - "message_timestamp_type": "create_time", # Optional. Default - value is "create_time". The message_timestamp_type specifies whether to - use the message create time or log append time as the timestamp on a - message. Known values are: "create_time" and "log_append_time". - "min_cleanable_dirty_ratio": 0.5, # Optional. Default value - is 0.5. The min_cleanable_dirty_ratio specifies the frequency of log - compaction (if enabled) in relation to duplicates present in the logs. - For example, at 0.5, at most 50% of the log could be duplicates before - compaction would begin. - "min_compaction_lag_ms": 0, # Optional. Default value is 0. - The min_compaction_lag_ms specifies the minimum time (in ms) that a - message will remain uncompacted in the log. Only relevant if log - compaction is enabled. - "min_insync_replicas": 1, # Optional. Default value is 1. - The min_insync_replicas specifies the number of replicas that must ACK a - write for the write to be considered successful. - "preallocate": False, # Optional. Default value is False. - The preallocate specifies whether a file should be preallocated on disk - when creating a new log segment. - "retention_bytes": -1, # Optional. Default value is -1. The - retention_bytes specifies the maximum size of the log (in bytes) before - deleting messages. -1 indicates that there is no limit. - "retention_ms": 604800000, # Optional. Default value is - 604800000. The retention_ms specifies the maximum amount of time (in ms) - to keep a message before deleting it. - "segment_bytes": 209715200, # Optional. Default value is - 209715200. The segment_bytes specifies the maximum size of a single log - file (in bytes). - "segment_jitter_ms": 0, # Optional. Default value is 0. The - segment_jitter_ms specifies the maximum random jitter subtracted from the - scheduled segment roll time to avoid thundering herds of segment rolling. - "segment_ms": 604800000 # Optional. Default value is - 604800000. The segment_ms specifies the period of time after which the - log will be forced to roll if the segment file isn't full. This ensures - that retention can delete or compact old data. - }, - "name": "str", # Optional. The name of the Kafka topic. - "partitions": [ - { - "consumer_groups": [ - { - "group_name": "str", # Optional. - Name of the consumer group. - "offset": 0 # Optional. The current - offset of the consumer group. - } - ], - "earliest_offset": 0, # Optional. The earliest - consumer offset amongst consumer groups. - "id": 0, # Optional. An identifier for the - partition. - "in_sync_replicas": 0, # Optional. The number of - nodes that are in-sync (have the latest data) for the given - partition. - "size": 0 # Optional. Size of the topic partition in - bytes. - } - ], - "replication_factor": 0, # Optional. The number of nodes to - replicate data across the cluster. - "state": "str" # Optional. The state of the Kafka topic. Known - values are: "active", "configuring", "deleting", and "unknown". - } + "schema": "str", # Optional. The schema definition in the specified format. + "schema_id": 0, # Optional. The id for schema. + "schema_type": "str", # Optional. The type of the schema. Known values are: + "AVRO", "JSON", and "PROTOBUF". + "subject_name": "str", # Optional. The name of the schema subject. + "version": "str" # Optional. The version of the schema. } # response body for status code(s): 404 response == { @@ -118508,253 +125793,112 @@ async def update_kafka_topic( tickets to help identify the issue. } """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + 401: cast( + Type[HttpResponseError], + lambda response: ClientAuthenticationError(response=response), + ), + 429: HttpResponseError, + 500: HttpResponseError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) - @distributed_trace_async - async def update_kafka_topic( - self, - database_cluster_uuid: str, - topic_name: str, - body: Optional[Union[JSON, IO[bytes]]] = None, - **kwargs: Any - ) -> JSON: - # pylint: disable=line-too-long - """Update Topic for a Kafka Cluster. - - To update a topic attached to a Kafka cluster, send a PUT request to - ``/v2/databases/$DATABASE_ID/topics/$TOPIC_NAME``. - - The result will be a JSON object with a ``topic`` key. - - :param database_cluster_uuid: A unique identifier for a database cluster. Required. - :type database_cluster_uuid: str - :param topic_name: The name used to identify the Kafka topic. Required. - :type topic_name: str - :param body: Is either a JSON type or a IO[bytes] type. Default value is None. - :type body: JSON or IO[bytes] - :return: JSON object - :rtype: JSON - :raises ~azure.core.exceptions.HttpResponseError: - - Example: - .. code-block:: python + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} - # JSON input template you can fill out and use as your body input. - body = { - "config": { - "cleanup_policy": "delete", # Optional. Default value is "delete". - The cleanup_policy sets the retention policy to use on log segments. 'delete' - will discard old segments when retention time/size limits are reached. - 'compact' will enable log compaction, resulting in retention of the latest - value for each key. Known values are: "delete", "compact", and - "compact_delete". - "compression_type": "producer", # Optional. Default value is - "producer". The compression_type specifies the compression type of the topic. - Known values are: "producer", "gzip", "snappy", "Iz4", "zstd", and - "uncompressed". - "delete_retention_ms": 86400000, # Optional. Default value is - 86400000. The delete_retention_ms specifies how long (in ms) to retain delete - tombstone markers for topics. - "file_delete_delay_ms": 60000, # Optional. Default value is 60000. - The file_delete_delay_ms specifies the time (in ms) to wait before deleting a - file from the filesystem. - "flush_messages": 9223372036854776000, # Optional. Default value is - 9223372036854776000. The flush_messages specifies the number of messages to - accumulate on a log partition before messages are flushed to disk. - "flush_ms": 9223372036854776000, # Optional. Default value is - 9223372036854776000. The flush_ms specifies the maximum time (in ms) that a - message is kept in memory before being flushed to disk. - "index_interval_bytes": 4096, # Optional. Default value is 4096. The - index_interval_bytes specifies the number of bytes between entries being - added into te offset index. - "max_compaction_lag_ms": 9223372036854776000, # Optional. Default - value is 9223372036854776000. The max_compaction_lag_ms specifies the maximum - amount of time (in ms) that a message will remain uncompacted. This is only - applicable if the logs are have compaction enabled. - "max_message_bytes": 1048588, # Optional. Default value is 1048588. - The max_messages_bytes specifies the largest record batch size (in bytes) - that can be sent to the server. This is calculated after compression if - compression is enabled. - "message_down_conversion_enable": True, # Optional. Default value is - True. The message_down_conversion_enable specifies whether down-conversion of - message formats is enabled to satisfy consumer requests. When 'false', the - broker will not perform conversion for consumers expecting older message - formats. The broker will respond with an ``UNSUPPORTED_VERSION`` error for - consume requests from these older clients. - "message_format_version": "3.0-IV1", # Optional. Default value is - "3.0-IV1". The message_format_version specifies the message format version - used by the broker to append messages to the logs. The value of this setting - is assumed to be 3.0-IV1 if the broker protocol version is 3.0 or higher. By - setting a particular message format version, all existing messages on disk - must be smaller or equal to the specified version. Known values are: "0.8.0", - "0.8.1", "0.8.2", "0.9.0", "0.10.0-IV0", "0.10.0-IV1", "0.10.1-IV0", - "0.10.1-IV1", "0.10.1-IV2", "0.10.2-IV0", "0.11.0-IV0", "0.11.0-IV1", - "0.11.0-IV2", "1.0-IV0", "1.1-IV0", "2.0-IV0", "2.0-IV1", "2.1-IV0", - "2.1-IV1", "2.1-IV2", "2.2-IV0", "2.2-IV1", "2.3-IV0", "2.3-IV1", "2.4-IV0", - "2.4-IV1", "2.5-IV0", "2.6-IV0", "2.7-IV0", "2.7-IV1", "2.7-IV2", "2.8-IV0", - "2.8-IV1", "3.0-IV0", "3.0-IV1", "3.1-IV0", "3.2-IV0", "3.3-IV0", "3.3-IV1", - "3.3-IV2", and "3.3-IV3". - "message_timestamp_type": "create_time", # Optional. Default value - is "create_time". The message_timestamp_type specifies whether to use the - message create time or log append time as the timestamp on a message. Known - values are: "create_time" and "log_append_time". - "min_cleanable_dirty_ratio": 0.5, # Optional. Default value is 0.5. - The min_cleanable_dirty_ratio specifies the frequency of log compaction (if - enabled) in relation to duplicates present in the logs. For example, at 0.5, - at most 50% of the log could be duplicates before compaction would begin. - "min_compaction_lag_ms": 0, # Optional. Default value is 0. The - min_compaction_lag_ms specifies the minimum time (in ms) that a message will - remain uncompacted in the log. Only relevant if log compaction is enabled. - "min_insync_replicas": 1, # Optional. Default value is 1. The - min_insync_replicas specifies the number of replicas that must ACK a write - for the write to be considered successful. - "preallocate": False, # Optional. Default value is False. The - preallocate specifies whether a file should be preallocated on disk when - creating a new log segment. - "retention_bytes": -1, # Optional. Default value is -1. The - retention_bytes specifies the maximum size of the log (in bytes) before - deleting messages. -1 indicates that there is no limit. - "retention_ms": 604800000, # Optional. Default value is 604800000. - The retention_ms specifies the maximum amount of time (in ms) to keep a - message before deleting it. - "segment_bytes": 209715200, # Optional. Default value is 209715200. - The segment_bytes specifies the maximum size of a single log file (in bytes). - "segment_jitter_ms": 0, # Optional. Default value is 0. The - segment_jitter_ms specifies the maximum random jitter subtracted from the - scheduled segment roll time to avoid thundering herds of segment rolling. - "segment_ms": 604800000 # Optional. Default value is 604800000. The - segment_ms specifies the period of time after which the log will be forced to - roll if the segment file isn't full. This ensures that retention can delete - or compact old data. - }, - "partition_count": 0, # Optional. The number of partitions available for the - topic. On update, this value can only be increased. - "replication_factor": 0 # Optional. The number of nodes to replicate data - across the cluster. - } + cls: ClsType[JSON] = kwargs.pop("cls", None) - # response body for status code(s): 200 - response == { - "topic": { - "config": { - "cleanup_policy": "delete", # Optional. Default value is - "delete". The cleanup_policy sets the retention policy to use on log - segments. 'delete' will discard old segments when retention time/size - limits are reached. 'compact' will enable log compaction, resulting in - retention of the latest value for each key. Known values are: "delete", - "compact", and "compact_delete". - "compression_type": "producer", # Optional. Default value is - "producer". The compression_type specifies the compression type of the - topic. Known values are: "producer", "gzip", "snappy", "Iz4", "zstd", and - "uncompressed". - "delete_retention_ms": 86400000, # Optional. Default value - is 86400000. The delete_retention_ms specifies how long (in ms) to retain - delete tombstone markers for topics. - "file_delete_delay_ms": 60000, # Optional. Default value is - 60000. The file_delete_delay_ms specifies the time (in ms) to wait before - deleting a file from the filesystem. - "flush_messages": 9223372036854776000, # Optional. Default - value is 9223372036854776000. The flush_messages specifies the number of - messages to accumulate on a log partition before messages are flushed to - disk. - "flush_ms": 9223372036854776000, # Optional. Default value - is 9223372036854776000. The flush_ms specifies the maximum time (in ms) - that a message is kept in memory before being flushed to disk. - "index_interval_bytes": 4096, # Optional. Default value is - 4096. The index_interval_bytes specifies the number of bytes between - entries being added into te offset index. - "max_compaction_lag_ms": 9223372036854776000, # Optional. - Default value is 9223372036854776000. The max_compaction_lag_ms specifies - the maximum amount of time (in ms) that a message will remain - uncompacted. This is only applicable if the logs are have compaction - enabled. - "max_message_bytes": 1048588, # Optional. Default value is - 1048588. The max_messages_bytes specifies the largest record batch size - (in bytes) that can be sent to the server. This is calculated after - compression if compression is enabled. - "message_down_conversion_enable": True, # Optional. Default - value is True. The message_down_conversion_enable specifies whether - down-conversion of message formats is enabled to satisfy consumer - requests. When 'false', the broker will not perform conversion for - consumers expecting older message formats. The broker will respond with - an ``UNSUPPORTED_VERSION`` error for consume requests from these older - clients. - "message_format_version": "3.0-IV1", # Optional. Default - value is "3.0-IV1". The message_format_version specifies the message - format version used by the broker to append messages to the logs. The - value of this setting is assumed to be 3.0-IV1 if the broker protocol - version is 3.0 or higher. By setting a particular message format - version, all existing messages on disk must be smaller or equal to the - specified version. Known values are: "0.8.0", "0.8.1", "0.8.2", "0.9.0", - "0.10.0-IV0", "0.10.0-IV1", "0.10.1-IV0", "0.10.1-IV1", "0.10.1-IV2", - "0.10.2-IV0", "0.11.0-IV0", "0.11.0-IV1", "0.11.0-IV2", "1.0-IV0", - "1.1-IV0", "2.0-IV0", "2.0-IV1", "2.1-IV0", "2.1-IV1", "2.1-IV2", - "2.2-IV0", "2.2-IV1", "2.3-IV0", "2.3-IV1", "2.4-IV0", "2.4-IV1", - "2.5-IV0", "2.6-IV0", "2.7-IV0", "2.7-IV1", "2.7-IV2", "2.8-IV0", - "2.8-IV1", "3.0-IV0", "3.0-IV1", "3.1-IV0", "3.2-IV0", "3.3-IV0", - "3.3-IV1", "3.3-IV2", and "3.3-IV3". - "message_timestamp_type": "create_time", # Optional. Default - value is "create_time". The message_timestamp_type specifies whether to - use the message create time or log append time as the timestamp on a - message. Known values are: "create_time" and "log_append_time". - "min_cleanable_dirty_ratio": 0.5, # Optional. Default value - is 0.5. The min_cleanable_dirty_ratio specifies the frequency of log - compaction (if enabled) in relation to duplicates present in the logs. - For example, at 0.5, at most 50% of the log could be duplicates before - compaction would begin. - "min_compaction_lag_ms": 0, # Optional. Default value is 0. - The min_compaction_lag_ms specifies the minimum time (in ms) that a - message will remain uncompacted in the log. Only relevant if log - compaction is enabled. - "min_insync_replicas": 1, # Optional. Default value is 1. - The min_insync_replicas specifies the number of replicas that must ACK a - write for the write to be considered successful. - "preallocate": False, # Optional. Default value is False. - The preallocate specifies whether a file should be preallocated on disk - when creating a new log segment. - "retention_bytes": -1, # Optional. Default value is -1. The - retention_bytes specifies the maximum size of the log (in bytes) before - deleting messages. -1 indicates that there is no limit. - "retention_ms": 604800000, # Optional. Default value is - 604800000. The retention_ms specifies the maximum amount of time (in ms) - to keep a message before deleting it. - "segment_bytes": 209715200, # Optional. Default value is - 209715200. The segment_bytes specifies the maximum size of a single log - file (in bytes). - "segment_jitter_ms": 0, # Optional. Default value is 0. The - segment_jitter_ms specifies the maximum random jitter subtracted from the - scheduled segment roll time to avoid thundering herds of segment rolling. - "segment_ms": 604800000 # Optional. Default value is - 604800000. The segment_ms specifies the period of time after which the - log will be forced to roll if the segment file isn't full. This ensures - that retention can delete or compact old data. - }, - "name": "str", # Optional. The name of the Kafka topic. - "partitions": [ - { - "consumer_groups": [ - { - "group_name": "str", # Optional. - Name of the consumer group. - "offset": 0 # Optional. The current - offset of the consumer group. - } - ], - "earliest_offset": 0, # Optional. The earliest - consumer offset amongst consumer groups. - "id": 0, # Optional. An identifier for the - partition. - "in_sync_replicas": 0, # Optional. The number of - nodes that are in-sync (have the latest data) for the given - partition. - "size": 0 # Optional. Size of the topic partition in - bytes. - } - ], - "replication_factor": 0, # Optional. The number of nodes to - replicate data across the cluster. - "state": "str" # Optional. The state of the Kafka topic. Known - values are: "active", "configuring", "deleting", and "unknown". - } + _request = build_databases_get_kafka_schema_version_request( + database_cluster_uuid=database_cluster_uuid, + subject_name=subject_name, + version=version, + headers=_headers, + params=_params, + ) + _request.url = self._client.format_url(_request.url) + + _stream = False + pipeline_response: PipelineResponse = ( + await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + ) + + response = pipeline_response.http_response + + if response.status_code not in [200, 404]: + if _stream: + await response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore + raise HttpResponseError(response=response) + + response_headers = {} + if response.status_code == 200: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.content: + deserialized = response.json() + else: + deserialized = None + + if response.status_code == 404: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.content: + deserialized = response.json() + else: + deserialized = None + + if cls: + return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore + + return cast(JSON, deserialized) # type: ignore + + @distributed_trace_async + async def get_kafka_schema_config( + self, database_cluster_uuid: str, **kwargs: Any + ) -> JSON: + # pylint: disable=line-too-long + """Retrieve Schema Registry Configuration for a kafka Cluster. + + To retrieve the Schema Registry configuration for a Kafka cluster, send a GET request to + ``/v2/databases/$DATABASE_ID/schema-registry/config``. + The response is a JSON object with a ``compatibility_level`` key, which is set to an object + containing any database configuration parameters. + + :param database_cluster_uuid: A unique identifier for a database cluster. Required. + :type database_cluster_uuid: str + :return: JSON object + :rtype: JSON + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 200 + response == { + "compatibility_level": "str" # The compatibility level of the schema + registry. Required. Known values are: "NONE", "BACKWARD", "BACKWARD_TRANSITIVE", + "FORWARD", "FORWARD_TRANSITIVE", "FULL", and "FULL_TRANSITIVE". } # response body for status code(s): 404 response == { @@ -118781,31 +125925,13 @@ async def update_kafka_topic( } error_map.update(kwargs.pop("error_map", {}) or {}) - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _headers = kwargs.pop("headers", {}) or {} _params = kwargs.pop("params", {}) or {} - content_type: Optional[str] = kwargs.pop( - "content_type", _headers.pop("Content-Type", None) - ) cls: ClsType[JSON] = kwargs.pop("cls", None) - content_type = content_type or "application/json" - _json = None - _content = None - if isinstance(body, (IOBase, bytes)): - _content = body - else: - if body is not None: - _json = body - else: - _json = None - - _request = build_databases_update_kafka_topic_request( + _request = build_databases_get_kafka_schema_config_request( database_cluster_uuid=database_cluster_uuid, - topic_name=topic_name, - content_type=content_type, - json=_json, - content=_content, headers=_headers, params=_params, ) @@ -118864,30 +125990,152 @@ async def update_kafka_topic( return cast(JSON, deserialized) # type: ignore - @distributed_trace_async - async def delete_kafka_topic( - self, database_cluster_uuid: str, topic_name: str, **kwargs: Any - ) -> Optional[JSON]: + @overload + async def update_kafka_schema_config( + self, + database_cluster_uuid: str, + body: Optional[JSON] = None, + *, + content_type: str = "application/json", + **kwargs: Any + ) -> JSON: # pylint: disable=line-too-long - """Delete Topic for a Kafka Cluster. + """Update Schema Registry Configuration for a kafka Cluster. - To delete a single topic within a Kafka cluster, send a DELETE request - to ``/v2/databases/$DATABASE_ID/topics/$TOPIC_NAME``. + To update the Schema Registry configuration for a Kafka cluster, send a PUT request to + ``/v2/databases/$DATABASE_ID/schema-registry/config``. + The response is a JSON object with a ``compatibility_level`` key, which is set to an object + containing any database configuration parameters. - A status of 204 will be given. This indicates that the request was - processed successfully, but that no response body is needed. + :param database_cluster_uuid: A unique identifier for a database cluster. Required. + :type database_cluster_uuid: str + :param body: Default value is None. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: JSON object + :rtype: JSON + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "compatibility_level": "str" # The compatibility level of the schema + registry. Required. Known values are: "NONE", "BACKWARD", "BACKWARD_TRANSITIVE", + "FORWARD", "FORWARD_TRANSITIVE", "FULL", and "FULL_TRANSITIVE". + } + + # response body for status code(s): 200 + response == { + "compatibility_level": "str" # The compatibility level of the schema + registry. Required. Known values are: "NONE", "BACKWARD", "BACKWARD_TRANSITIVE", + "FORWARD", "FORWARD_TRANSITIVE", "FULL", and "FULL_TRANSITIVE". + } + # response body for status code(s): 404 + response == { + "id": "str", # A short identifier corresponding to the HTTP status code + returned. For example, the ID for a response returning a 404 status code would + be "not_found.". Required. + "message": "str", # A message providing additional information about the + error, including details to help resolve it when possible. Required. + "request_id": "str" # Optional. Optionally, some endpoints may include a + request ID that should be provided when reporting bugs or opening support + tickets to help identify the issue. + } + """ + + @overload + async def update_kafka_schema_config( + self, + database_cluster_uuid: str, + body: Optional[IO[bytes]] = None, + *, + content_type: str = "application/json", + **kwargs: Any + ) -> JSON: + # pylint: disable=line-too-long + """Update Schema Registry Configuration for a kafka Cluster. + + To update the Schema Registry configuration for a Kafka cluster, send a PUT request to + ``/v2/databases/$DATABASE_ID/schema-registry/config``. + The response is a JSON object with a ``compatibility_level`` key, which is set to an object + containing any database configuration parameters. :param database_cluster_uuid: A unique identifier for a database cluster. Required. :type database_cluster_uuid: str - :param topic_name: The name used to identify the Kafka topic. Required. - :type topic_name: str - :return: JSON object or None - :rtype: JSON or None + :param body: Default value is None. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: JSON object + :rtype: JSON + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 200 + response == { + "compatibility_level": "str" # The compatibility level of the schema + registry. Required. Known values are: "NONE", "BACKWARD", "BACKWARD_TRANSITIVE", + "FORWARD", "FORWARD_TRANSITIVE", "FULL", and "FULL_TRANSITIVE". + } + # response body for status code(s): 404 + response == { + "id": "str", # A short identifier corresponding to the HTTP status code + returned. For example, the ID for a response returning a 404 status code would + be "not_found.". Required. + "message": "str", # A message providing additional information about the + error, including details to help resolve it when possible. Required. + "request_id": "str" # Optional. Optionally, some endpoints may include a + request ID that should be provided when reporting bugs or opening support + tickets to help identify the issue. + } + """ + + @distributed_trace_async + async def update_kafka_schema_config( + self, + database_cluster_uuid: str, + body: Optional[Union[JSON, IO[bytes]]] = None, + **kwargs: Any + ) -> JSON: + # pylint: disable=line-too-long + """Update Schema Registry Configuration for a kafka Cluster. + + To update the Schema Registry configuration for a Kafka cluster, send a PUT request to + ``/v2/databases/$DATABASE_ID/schema-registry/config``. + The response is a JSON object with a ``compatibility_level`` key, which is set to an object + containing any database configuration parameters. + + :param database_cluster_uuid: A unique identifier for a database cluster. Required. + :type database_cluster_uuid: str + :param body: Is either a JSON type or a IO[bytes] type. Default value is None. + :type body: JSON or IO[bytes] + :return: JSON object + :rtype: JSON :raises ~azure.core.exceptions.HttpResponseError: Example: .. code-block:: python + # JSON input template you can fill out and use as your body input. + body = { + "compatibility_level": "str" # The compatibility level of the schema + registry. Required. Known values are: "NONE", "BACKWARD", "BACKWARD_TRANSITIVE", + "FORWARD", "FORWARD_TRANSITIVE", "FULL", and "FULL_TRANSITIVE". + } + + # response body for status code(s): 200 + response == { + "compatibility_level": "str" # The compatibility level of the schema + registry. Required. Known values are: "NONE", "BACKWARD", "BACKWARD_TRANSITIVE", + "FORWARD", "FORWARD_TRANSITIVE", "FULL", and "FULL_TRANSITIVE". + } # response body for status code(s): 404 response == { "id": "str", # A short identifier corresponding to the HTTP status code @@ -118913,14 +126161,30 @@ async def delete_kafka_topic( } error_map.update(kwargs.pop("error_map", {}) or {}) - _headers = kwargs.pop("headers", {}) or {} + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = kwargs.pop("params", {}) or {} - cls: ClsType[Optional[JSON]] = kwargs.pop("cls", None) + content_type: Optional[str] = kwargs.pop( + "content_type", _headers.pop("Content-Type", None) + ) + cls: ClsType[JSON] = kwargs.pop("cls", None) - _request = build_databases_delete_kafka_topic_request( + content_type = content_type or "application/json" + _json = None + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + if body is not None: + _json = body + else: + _json = None + + _request = build_databases_update_kafka_schema_config_request( database_cluster_uuid=database_cluster_uuid, - topic_name=topic_name, + content_type=content_type, + json=_json, + content=_content, headers=_headers, params=_params, ) @@ -118935,15 +126199,14 @@ async def delete_kafka_topic( response = pipeline_response.http_response - if response.status_code not in [204, 404]: + if response.status_code not in [200, 404]: if _stream: await response.read() # Load the body in memory and close the socket map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore raise HttpResponseError(response=response) - deserialized = None response_headers = {} - if response.status_code == 204: + if response.status_code == 200: response_headers["ratelimit-limit"] = self._deserialize( "int", response.headers.get("ratelimit-limit") ) @@ -118954,6 +126217,11 @@ async def delete_kafka_topic( "int", response.headers.get("ratelimit-reset") ) + if response.content: + deserialized = response.json() + else: + deserialized = None + if response.status_code == 404: response_headers["ratelimit-limit"] = self._deserialize( "int", response.headers.get("ratelimit-limit") @@ -118971,20 +126239,27 @@ async def delete_kafka_topic( deserialized = None if cls: - return cls(pipeline_response, deserialized, response_headers) # type: ignore + return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore - return deserialized # type: ignore + return cast(JSON, deserialized) # type: ignore @distributed_trace_async - async def list_logsink(self, database_cluster_uuid: str, **kwargs: Any) -> JSON: + async def get_kafka_schema_subject_config( + self, database_cluster_uuid: str, subject_name: str, **kwargs: Any + ) -> JSON: # pylint: disable=line-too-long - """List Logsinks for a Database Cluster. + """Retrieve Schema Registry Configuration for a Subject of kafka Cluster. - To list logsinks for a database cluster, send a GET request to - ``/v2/databases/$DATABASE_ID/logsink``. + To retrieve the Schema Registry configuration for a Subject of a Kafka cluster, send a GET + request to + ``/v2/databases/$DATABASE_ID/schema-registry/config/$SUBJECT_NAME``. + The response is a JSON object with a ``compatibility_level`` key, which is set to an object + containing any database configuration parameters. :param database_cluster_uuid: A unique identifier for a database cluster. Required. :type database_cluster_uuid: str + :param subject_name: The name of the Kafka schema subject. Required. + :type subject_name: str :return: JSON object :rtype: JSON :raises ~azure.core.exceptions.HttpResponseError: @@ -118994,16 +126269,10 @@ async def list_logsink(self, database_cluster_uuid: str, **kwargs: Any) -> JSON: # response body for status code(s): 200 response == { - "sinks": [ - { - "config": {}, - "sink_id": "str", # Optional. A unique identifier for - Logsink. - "sink_name": "str", # Optional. The name of the Logsink. - "sink_type": "str" # Optional. Known values are: "rsyslog", - "elasticsearch", and "opensearch". - } - ] + "compatibility_level": "str", # The compatibility level of the schema + registry. Required. Known values are: "NONE", "BACKWARD", "BACKWARD_TRANSITIVE", + "FORWARD", "FORWARD_TRANSITIVE", "FULL", and "FULL_TRANSITIVE". + "subject_name": "str" # The name of the schema subject. Required. } # response body for status code(s): 404 response == { @@ -119035,8 +126304,9 @@ async def list_logsink(self, database_cluster_uuid: str, **kwargs: Any) -> JSON: cls: ClsType[JSON] = kwargs.pop("cls", None) - _request = build_databases_list_logsink_request( + _request = build_databases_get_kafka_schema_subject_config_request( database_cluster_uuid=database_cluster_uuid, + subject_name=subject_name, headers=_headers, params=_params, ) @@ -119096,23 +126366,29 @@ async def list_logsink(self, database_cluster_uuid: str, **kwargs: Any) -> JSON: return cast(JSON, deserialized) # type: ignore @overload - async def create_logsink( + async def update_kafka_schema_subject_config( self, database_cluster_uuid: str, - body: JSON, + subject_name: str, + body: Optional[JSON] = None, *, content_type: str = "application/json", **kwargs: Any ) -> JSON: # pylint: disable=line-too-long - """Create Logsink for a Database Cluster. + """Update Schema Registry Configuration for a Subject of kafka Cluster. - To create logsink for a database cluster, send a POST request to - ``/v2/databases/$DATABASE_ID/logsink``. + To update the Schema Registry configuration for a Subject of a Kafka cluster, send a PUT + request to + ``/v2/databases/$DATABASE_ID/schema-registry/config/$SUBJECT_NAME``. + The response is a JSON object with a ``compatibility_level`` key, which is set to an object + containing any database configuration parameters. :param database_cluster_uuid: A unique identifier for a database cluster. Required. :type database_cluster_uuid: str - :param body: Required. + :param subject_name: The name of the Kafka schema subject. Required. + :type subject_name: str + :param body: Default value is None. :type body: JSON :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". @@ -119126,25 +126402,17 @@ async def create_logsink( # JSON input template you can fill out and use as your body input. body = { - "config": {}, - "sink_name": "str", # Optional. The name of the Logsink. - "sink_type": "str" # Optional. Type of logsink integration. * Use - ``datadog`` for Datadog integration **only with MongoDB clusters**. * For - non-MongoDB clusters, use ``rsyslog`` for general syslog forwarding. * Other - supported types include ``elasticsearch`` and ``opensearch``. More details about - the configuration can be found in the ``config`` property. Known values are: - "rsyslog", "elasticsearch", "opensearch", and "datadog". + "compatibility_level": "str" # The compatibility level of the schema + registry. Required. Known values are: "NONE", "BACKWARD", "BACKWARD_TRANSITIVE", + "FORWARD", "FORWARD_TRANSITIVE", "FULL", and "FULL_TRANSITIVE". } - # response body for status code(s): 201 + # response body for status code(s): 200 response == { - "sink": { - "config": {}, - "sink_id": "str", # Optional. A unique identifier for Logsink. - "sink_name": "str", # Optional. The name of the Logsink. - "sink_type": "str" # Optional. Known values are: "rsyslog", - "elasticsearch", and "opensearch". - } + "compatibility_level": "str", # The compatibility level of the schema + registry. Required. Known values are: "NONE", "BACKWARD", "BACKWARD_TRANSITIVE", + "FORWARD", "FORWARD_TRANSITIVE", "FULL", and "FULL_TRANSITIVE". + "subject_name": "str" # The name of the schema subject. Required. } # response body for status code(s): 404 response == { @@ -119160,23 +126428,29 @@ async def create_logsink( """ @overload - async def create_logsink( + async def update_kafka_schema_subject_config( self, database_cluster_uuid: str, - body: IO[bytes], + subject_name: str, + body: Optional[IO[bytes]] = None, *, content_type: str = "application/json", **kwargs: Any ) -> JSON: # pylint: disable=line-too-long - """Create Logsink for a Database Cluster. + """Update Schema Registry Configuration for a Subject of kafka Cluster. - To create logsink for a database cluster, send a POST request to - ``/v2/databases/$DATABASE_ID/logsink``. + To update the Schema Registry configuration for a Subject of a Kafka cluster, send a PUT + request to + ``/v2/databases/$DATABASE_ID/schema-registry/config/$SUBJECT_NAME``. + The response is a JSON object with a ``compatibility_level`` key, which is set to an object + containing any database configuration parameters. :param database_cluster_uuid: A unique identifier for a database cluster. Required. :type database_cluster_uuid: str - :param body: Required. + :param subject_name: The name of the Kafka schema subject. Required. + :type subject_name: str + :param body: Default value is None. :type body: IO[bytes] :keyword content_type: Body Parameter content-type. Content type parameter for binary body. Default value is "application/json". @@ -119188,15 +126462,12 @@ async def create_logsink( Example: .. code-block:: python - # response body for status code(s): 201 + # response body for status code(s): 200 response == { - "sink": { - "config": {}, - "sink_id": "str", # Optional. A unique identifier for Logsink. - "sink_name": "str", # Optional. The name of the Logsink. - "sink_type": "str" # Optional. Known values are: "rsyslog", - "elasticsearch", and "opensearch". - } + "compatibility_level": "str", # The compatibility level of the schema + registry. Required. Known values are: "NONE", "BACKWARD", "BACKWARD_TRANSITIVE", + "FORWARD", "FORWARD_TRANSITIVE", "FULL", and "FULL_TRANSITIVE". + "subject_name": "str" # The name of the schema subject. Required. } # response body for status code(s): 404 response == { @@ -119212,18 +126483,27 @@ async def create_logsink( """ @distributed_trace_async - async def create_logsink( - self, database_cluster_uuid: str, body: Union[JSON, IO[bytes]], **kwargs: Any + async def update_kafka_schema_subject_config( + self, + database_cluster_uuid: str, + subject_name: str, + body: Optional[Union[JSON, IO[bytes]]] = None, + **kwargs: Any ) -> JSON: # pylint: disable=line-too-long - """Create Logsink for a Database Cluster. + """Update Schema Registry Configuration for a Subject of kafka Cluster. - To create logsink for a database cluster, send a POST request to - ``/v2/databases/$DATABASE_ID/logsink``. + To update the Schema Registry configuration for a Subject of a Kafka cluster, send a PUT + request to + ``/v2/databases/$DATABASE_ID/schema-registry/config/$SUBJECT_NAME``. + The response is a JSON object with a ``compatibility_level`` key, which is set to an object + containing any database configuration parameters. :param database_cluster_uuid: A unique identifier for a database cluster. Required. :type database_cluster_uuid: str - :param body: Is either a JSON type or a IO[bytes] type. Required. + :param subject_name: The name of the Kafka schema subject. Required. + :type subject_name: str + :param body: Is either a JSON type or a IO[bytes] type. Default value is None. :type body: JSON or IO[bytes] :return: JSON object :rtype: JSON @@ -119234,25 +126514,17 @@ async def create_logsink( # JSON input template you can fill out and use as your body input. body = { - "config": {}, - "sink_name": "str", # Optional. The name of the Logsink. - "sink_type": "str" # Optional. Type of logsink integration. * Use - ``datadog`` for Datadog integration **only with MongoDB clusters**. * For - non-MongoDB clusters, use ``rsyslog`` for general syslog forwarding. * Other - supported types include ``elasticsearch`` and ``opensearch``. More details about - the configuration can be found in the ``config`` property. Known values are: - "rsyslog", "elasticsearch", "opensearch", and "datadog". + "compatibility_level": "str" # The compatibility level of the schema + registry. Required. Known values are: "NONE", "BACKWARD", "BACKWARD_TRANSITIVE", + "FORWARD", "FORWARD_TRANSITIVE", "FULL", and "FULL_TRANSITIVE". } - # response body for status code(s): 201 + # response body for status code(s): 200 response == { - "sink": { - "config": {}, - "sink_id": "str", # Optional. A unique identifier for Logsink. - "sink_name": "str", # Optional. The name of the Logsink. - "sink_type": "str" # Optional. Known values are: "rsyslog", - "elasticsearch", and "opensearch". - } + "compatibility_level": "str", # The compatibility level of the schema + registry. Required. Known values are: "NONE", "BACKWARD", "BACKWARD_TRANSITIVE", + "FORWARD", "FORWARD_TRANSITIVE", "FULL", and "FULL_TRANSITIVE". + "subject_name": "str" # The name of the schema subject. Required. } # response body for status code(s): 404 response == { @@ -119293,10 +126565,14 @@ async def create_logsink( if isinstance(body, (IOBase, bytes)): _content = body else: - _json = body + if body is not None: + _json = body + else: + _json = None - _request = build_databases_create_logsink_request( + _request = build_databases_update_kafka_schema_subject_config_request( database_cluster_uuid=database_cluster_uuid, + subject_name=subject_name, content_type=content_type, json=_json, content=_content, @@ -119314,14 +126590,14 @@ async def create_logsink( response = pipeline_response.http_response - if response.status_code not in [201, 404]: + if response.status_code not in [200, 404]: if _stream: await response.read() # Load the body in memory and close the socket map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore raise HttpResponseError(response=response) response_headers = {} - if response.status_code == 201: + if response.status_code == 200: response_headers["ratelimit-limit"] = self._deserialize( "int", response.headers.get("ratelimit-limit") ) @@ -119359,19 +126635,14 @@ async def create_logsink( return cast(JSON, deserialized) # type: ignore @distributed_trace_async - async def get_logsink( - self, database_cluster_uuid: str, logsink_id: str, **kwargs: Any - ) -> JSON: + async def get_cluster_metrics_credentials(self, **kwargs: Any) -> JSON: # pylint: disable=line-too-long - """Get Logsink for a Database Cluster. + """Retrieve Database Clusters' Metrics Endpoint Credentials. - To get a logsink for a database cluster, send a GET request to - ``/v2/databases/$DATABASE_ID/logsink/$LOGSINK_ID``. + To show the credentials for all database clusters' metrics endpoints, send a GET request to + ``/v2/databases/metrics/credentials``. The result will be a JSON object with a ``credentials`` + key. - :param database_cluster_uuid: A unique identifier for a database cluster. Required. - :type database_cluster_uuid: str - :param logsink_id: A unique identifier for a logsink of a database cluster. Required. - :type logsink_id: str :return: JSON object :rtype: JSON :raises ~azure.core.exceptions.HttpResponseError: @@ -119381,11 +126652,14 @@ async def get_logsink( # response body for status code(s): 200 response == { - "config": {}, - "sink_id": "str", # Optional. A unique identifier for Logsink. - "sink_name": "str", # Optional. The name of the Logsink. - "sink_type": "str" # Optional. Known values are: "rsyslog", "elasticsearch", - and "opensearch". + "credentials": { + "credentials": { + "basic_auth_password": "str", # Optional. basic + authentication password for metrics HTTP endpoint. + "basic_auth_username": "str" # Optional. basic + authentication username for metrics HTTP endpoint. + } + } } # response body for status code(s): 404 response == { @@ -119417,9 +126691,7 @@ async def get_logsink( cls: ClsType[JSON] = kwargs.pop("cls", None) - _request = build_databases_get_logsink_request( - database_cluster_uuid=database_cluster_uuid, - logsink_id=logsink_id, + _request = build_databases_get_cluster_metrics_credentials_request( headers=_headers, params=_params, ) @@ -119479,32 +126751,26 @@ async def get_logsink( return cast(JSON, deserialized) # type: ignore @overload - async def update_logsink( + async def update_cluster_metrics_credentials( # pylint: disable=inconsistent-return-statements self, - database_cluster_uuid: str, - logsink_id: str, - body: JSON, + body: Optional[JSON] = None, *, content_type: str = "application/json", **kwargs: Any - ) -> Optional[JSON]: - # pylint: disable=line-too-long - """Update Logsink for a Database Cluster. + ) -> None: + """Update Database Clusters' Metrics Endpoint Credentials. - To update a logsink for a database cluster, send a PUT request to - ``/v2/databases/$DATABASE_ID/logsink/$LOGSINK_ID``. + To update the credentials for all database clusters' metrics endpoints, send a PUT request to + ``/v2/databases/metrics/credentials``. A successful request will receive a 204 No Content + status code with no body in response. - :param database_cluster_uuid: A unique identifier for a database cluster. Required. - :type database_cluster_uuid: str - :param logsink_id: A unique identifier for a logsink of a database cluster. Required. - :type logsink_id: str - :param body: Required. + :param body: Default value is None. :type body: JSON :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str - :return: JSON object or None - :rtype: JSON or None + :return: None + :rtype: None :raises ~azure.core.exceptions.HttpResponseError: Example: @@ -119512,99 +126778,179 @@ async def update_logsink( # JSON input template you can fill out and use as your body input. body = { - "config": {} - } - - # response body for status code(s): 404 - response == { - "id": "str", # A short identifier corresponding to the HTTP status code - returned. For example, the ID for a response returning a 404 status code would - be "not_found.". Required. - "message": "str", # A message providing additional information about the - error, including details to help resolve it when possible. Required. - "request_id": "str" # Optional. Optionally, some endpoints may include a - request ID that should be provided when reporting bugs or opening support - tickets to help identify the issue. + "credentials": { + "basic_auth_password": "str", # Optional. basic authentication + password for metrics HTTP endpoint. + "basic_auth_username": "str" # Optional. basic authentication + username for metrics HTTP endpoint. + } } """ @overload - async def update_logsink( + async def update_cluster_metrics_credentials( # pylint: disable=inconsistent-return-statements self, - database_cluster_uuid: str, - logsink_id: str, - body: IO[bytes], + body: Optional[IO[bytes]] = None, *, content_type: str = "application/json", **kwargs: Any - ) -> Optional[JSON]: - # pylint: disable=line-too-long - """Update Logsink for a Database Cluster. + ) -> None: + """Update Database Clusters' Metrics Endpoint Credentials. - To update a logsink for a database cluster, send a PUT request to - ``/v2/databases/$DATABASE_ID/logsink/$LOGSINK_ID``. + To update the credentials for all database clusters' metrics endpoints, send a PUT request to + ``/v2/databases/metrics/credentials``. A successful request will receive a 204 No Content + status code with no body in response. - :param database_cluster_uuid: A unique identifier for a database cluster. Required. - :type database_cluster_uuid: str - :param logsink_id: A unique identifier for a logsink of a database cluster. Required. - :type logsink_id: str - :param body: Required. + :param body: Default value is None. :type body: IO[bytes] :keyword content_type: Body Parameter content-type. Content type parameter for binary body. Default value is "application/json". :paramtype content_type: str - :return: JSON object or None - :rtype: JSON or None + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace_async + async def update_cluster_metrics_credentials( # pylint: disable=inconsistent-return-statements + self, body: Optional[Union[JSON, IO[bytes]]] = None, **kwargs: Any + ) -> None: + """Update Database Clusters' Metrics Endpoint Credentials. + + To update the credentials for all database clusters' metrics endpoints, send a PUT request to + ``/v2/databases/metrics/credentials``. A successful request will receive a 204 No Content + status code with no body in response. + + :param body: Is either a JSON type or a IO[bytes] type. Default value is None. + :type body: JSON or IO[bytes] + :return: None + :rtype: None :raises ~azure.core.exceptions.HttpResponseError: Example: .. code-block:: python - # response body for status code(s): 404 - response == { - "id": "str", # A short identifier corresponding to the HTTP status code - returned. For example, the ID for a response returning a 404 status code would - be "not_found.". Required. - "message": "str", # A message providing additional information about the - error, including details to help resolve it when possible. Required. - "request_id": "str" # Optional. Optionally, some endpoints may include a - request ID that should be provided when reporting bugs or opening support - tickets to help identify the issue. + # JSON input template you can fill out and use as your body input. + body = { + "credentials": { + "basic_auth_password": "str", # Optional. basic authentication + password for metrics HTTP endpoint. + "basic_auth_username": "str" # Optional. basic authentication + username for metrics HTTP endpoint. + } } """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + 401: cast( + Type[HttpResponseError], + lambda response: ClientAuthenticationError(response=response), + ), + 429: HttpResponseError, + 500: HttpResponseError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop( + "content_type", _headers.pop("Content-Type", None) + ) + cls: ClsType[None] = kwargs.pop("cls", None) + + content_type = content_type or "application/json" + _json = None + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + if body is not None: + _json = body + else: + _json = None + + _request = build_databases_update_cluster_metrics_credentials_request( + content_type=content_type, + json=_json, + content=_content, + headers=_headers, + params=_params, + ) + _request.url = self._client.format_url(_request.url) + + _stream = False + pipeline_response: PipelineResponse = ( + await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + ) + + response = pipeline_response.http_response + + if response.status_code not in [204]: + if _stream: + await response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore + raise HttpResponseError(response=response) + + response_headers = {} + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if cls: + return cls(pipeline_response, None, response_headers) # type: ignore @distributed_trace_async - async def update_logsink( - self, - database_cluster_uuid: str, - logsink_id: str, - body: Union[JSON, IO[bytes]], - **kwargs: Any - ) -> Optional[JSON]: + async def list_opeasearch_indexes( + self, database_cluster_uuid: str, **kwargs: Any + ) -> JSON: # pylint: disable=line-too-long - """Update Logsink for a Database Cluster. + """List Indexes for a OpenSearch Cluster. - To update a logsink for a database cluster, send a PUT request to - ``/v2/databases/$DATABASE_ID/logsink/$LOGSINK_ID``. + To list all of a OpenSearch cluster's indexes, send a GET request to + ``/v2/databases/$DATABASE_ID/indexes``. + + The result will be a JSON object with a ``indexes`` key. :param database_cluster_uuid: A unique identifier for a database cluster. Required. :type database_cluster_uuid: str - :param logsink_id: A unique identifier for a logsink of a database cluster. Required. - :type logsink_id: str - :param body: Is either a JSON type or a IO[bytes] type. Required. - :type body: JSON or IO[bytes] - :return: JSON object or None - :rtype: JSON or None + :return: JSON object + :rtype: JSON :raises ~azure.core.exceptions.HttpResponseError: Example: .. code-block:: python - # JSON input template you can fill out and use as your body input. - body = { - "config": {} + # response body for status code(s): 200 + response == { + "indexes": [ + { + "created_time": "2020-02-20 00:00:00", # Optional. The date + and time the index was created. + "health": "str", # Optional. The health of the OpenSearch + index. Known values are: "unknown", "green", "yellow", "red", and "red*". + "index_name": "str", # Optional. The name of the opensearch + index. + "number_of_replicas": 0, # Optional. The number of replicas + for the index. + "number_of_shards": 0, # Optional. The number of shards for + the index. + "size": 0, # Optional. The size of the index. + "status": "str" # Optional. The status of the OpenSearch + index. Known values are: "unknown", "open", "close", and "none". + } + ] } - # response body for status code(s): 404 response == { "id": "str", # A short identifier corresponding to the HTTP status code @@ -119630,28 +126976,13 @@ async def update_logsink( } error_map.update(kwargs.pop("error_map", {}) or {}) - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _headers = kwargs.pop("headers", {}) or {} _params = kwargs.pop("params", {}) or {} - content_type: Optional[str] = kwargs.pop( - "content_type", _headers.pop("Content-Type", None) - ) - cls: ClsType[Optional[JSON]] = kwargs.pop("cls", None) - - content_type = content_type or "application/json" - _json = None - _content = None - if isinstance(body, (IOBase, bytes)): - _content = body - else: - _json = body + cls: ClsType[JSON] = kwargs.pop("cls", None) - _request = build_databases_update_logsink_request( + _request = build_databases_list_opeasearch_indexes_request( database_cluster_uuid=database_cluster_uuid, - logsink_id=logsink_id, - content_type=content_type, - json=_json, - content=_content, headers=_headers, params=_params, ) @@ -119672,7 +127003,6 @@ async def update_logsink( map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore raise HttpResponseError(response=response) - deserialized = None response_headers = {} if response.status_code == 200: response_headers["ratelimit-limit"] = self._deserialize( @@ -119685,6 +127015,11 @@ async def update_logsink( "int", response.headers.get("ratelimit-reset") ) + if response.content: + deserialized = response.json() + else: + deserialized = None + if response.status_code == 404: response_headers["ratelimit-limit"] = self._deserialize( "int", response.headers.get("ratelimit-limit") @@ -119702,24 +127037,27 @@ async def update_logsink( deserialized = None if cls: - return cls(pipeline_response, deserialized, response_headers) # type: ignore + return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore - return deserialized # type: ignore + return cast(JSON, deserialized) # type: ignore @distributed_trace_async - async def delete_logsink( - self, database_cluster_uuid: str, logsink_id: str, **kwargs: Any + async def delete_opensearch_index( + self, database_cluster_uuid: str, index_name: str, **kwargs: Any ) -> Optional[JSON]: # pylint: disable=line-too-long - """Delete Logsink for a Database Cluster. + """Delete Index for OpenSearch Cluster. - To delete a logsink for a database cluster, send a DELETE request to - ``/v2/databases/$DATABASE_ID/logsink/$LOGSINK_ID``. + To delete a single index within OpenSearch cluster, send a DELETE request + to ``/v2/databases/$DATABASE_ID/indexes/$INDEX_NAME``. + + A status of 204 will be given. This indicates that the request was + processed successfully, but that no response body is needed. :param database_cluster_uuid: A unique identifier for a database cluster. Required. :type database_cluster_uuid: str - :param logsink_id: A unique identifier for a logsink of a database cluster. Required. - :type logsink_id: str + :param index_name: The name of the OpenSearch index. Required. + :type index_name: str :return: JSON object or None :rtype: JSON or None :raises ~azure.core.exceptions.HttpResponseError: @@ -119757,9 +127095,9 @@ async def delete_logsink( cls: ClsType[Optional[JSON]] = kwargs.pop("cls", None) - _request = build_databases_delete_logsink_request( + _request = build_databases_delete_opensearch_index_request( database_cluster_uuid=database_cluster_uuid, - logsink_id=logsink_id, + index_name=index_name, headers=_headers, params=_params, ) @@ -119774,7 +127112,7 @@ async def delete_logsink( response = pipeline_response.http_response - if response.status_code not in [200, 404]: + if response.status_code not in [204, 404]: if _stream: await response.read() # Load the body in memory and close the socket map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore @@ -119782,7 +127120,7 @@ async def delete_logsink( deserialized = None response_headers = {} - if response.status_code == 200: + if response.status_code == 204: response_headers["ratelimit-limit"] = self._deserialize( "int", response.headers.get("ratelimit-limit") ) @@ -119814,18 +127152,38 @@ async def delete_logsink( return deserialized # type: ignore + +class DedicatedInferencesOperations: + """ + .. warning:: + **DO NOT** instantiate this class directly. + + Instead, you should access the following operations through + :class:`~pydo.aio.GeneratedClient`'s + :attr:`dedicated_inferences` attribute. + """ + + def __init__(self, *args, **kwargs) -> None: + input_args = list(args) + self._client = input_args.pop(0) if input_args else kwargs.pop("client") + self._config = input_args.pop(0) if input_args else kwargs.pop("config") + self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer") + self._deserialize = ( + input_args.pop(0) if input_args else kwargs.pop("deserializer") + ) + @distributed_trace_async - async def list_kafka_schemas( - self, database_cluster_uuid: str, **kwargs: Any - ) -> JSON: + async def get(self, dedicated_inference_id: str, **kwargs: Any) -> JSON: # pylint: disable=line-too-long - """List Schemas for Kafka Cluster. + """Get a Dedicated Inference. - To list all schemas for a Kafka cluster, send a GET request to - ``/v2/databases/$DATABASE_ID/schema-registry``. + Retrieve an existing Dedicated Inference by ID. Send a GET request to + ``/v2/dedicated-inferences/{dedicated_inference_id}``. The status in the response + is one of active, new, provisioning, updating, deleting, or error. - :param database_cluster_uuid: A unique identifier for a database cluster. Required. - :type database_cluster_uuid: str + :param dedicated_inference_id: A unique identifier for a Dedicated Inference instance. + Required. + :type dedicated_inference_id: str :return: JSON object :rtype: JSON :raises ~azure.core.exceptions.HttpResponseError: @@ -119835,17 +127193,109 @@ async def list_kafka_schemas( # response body for status code(s): 200 response == { - "subjects": [ - { - "schema": "str", # Optional. The schema definition in the - specified format. - "schema_id": 0, # Optional. The id for schema. - "schema_type": "str", # Optional. The type of the schema. - Known values are: "AVRO", "JSON", and "PROTOBUF". - "subject_name": "str" # Optional. The name of the schema - subject. - } - ] + "dedicated_inference": { + "created_at": "2020-02-20 00:00:00", # Optional. When the Dedicated + Inference was created. + "endpoints": { + "private_endpoint_fqdn": "str", # Optional. Private VPC FQDN + of the Dedicated Inference instance. + "public_endpoint_fqdn": "str" # Optional. Public FQDN of the + Dedicated Inference instance. + }, + "id": "str", # Optional. Unique ID of the Dedicated Inference. + "pending_deployment_spec": { + "created_at": "2020-02-20 00:00:00", # Optional. Pending + deployment when status is provisioning or updating. + "enable_public_endpoint": bool, # Optional. Whether to + expose a public LLM endpoint. + "id": "str", # Optional. Deployment UUID. + "model_deployments": [ + { + "accelerators": [ + { + "accelerator_slug": "str", # + DigitalOcean GPU slug. Required. + "scale": 0, # Number of + accelerator instances. Required. + "type": "str", # Accelerator + type (e.g. prefill_decode). Required. + "status": "str" # Optional. + Current state of the Accelerator. Known values are: + "new", "provisioning", and "active". + } + ], + "model_id": "str", # Optional. Used to + identify an existing deployment when updating; empty means create + new. + "model_provider": "str", # Optional. Model + provider. "hugging_face" + "model_slug": "str", # Optional. Model + identifier (e.g. Hugging Face slug). + "workload_config": {} # Optional. + Workload-specific configuration (e.g. ISL/OSL in future). + } + ], + "name": "str", # Optional. Name of the Dedicated Inference. + Must be unique within the team. + "status": "str", # Optional. Known values are: + "provisioning" and "updating". + "updated_at": "2020-02-20 00:00:00", # Optional. Pending + deployment when status is provisioning or updating. + "version": 0, # Optional. Spec version. + "vpc": { + "uuid": "str" # VPC UUID for the Dedicated + Inference. Required. + } + }, + "region": "str", # Optional. DigitalOcean region where the Dedicated + Inference is hosted. + "spec": { + "enable_public_endpoint": bool, # Whether to expose a public + LLM endpoint. Required. + "model_deployments": [ + { + "accelerators": [ + { + "accelerator_slug": "str", # + DigitalOcean GPU slug. Required. + "scale": 0, # Number of + accelerator instances. Required. + "type": "str", # Accelerator + type (e.g. prefill_decode). Required. + "status": "str" # Optional. + Current state of the Accelerator. Known values are: + "new", "provisioning", and "active". + } + ], + "model_id": "str", # Optional. Used to + identify an existing deployment when updating; empty means create + new. + "model_provider": "str", # Optional. Model + provider. "hugging_face" + "model_slug": "str", # Optional. Model + identifier (e.g. Hugging Face slug). + "workload_config": {} # Optional. + Workload-specific configuration (e.g. ISL/OSL in future). + } + ], + "name": "str", # Name of the Dedicated Inference. Must be + unique within the team. Required. + "region": "str", # DigitalOcean region where the Dedicated + Inference is hosted. Required. Known values are: "atl1", "nyc2", and + "tor1". + "version": 0, # Spec version. Required. + "vpc": { + "uuid": "str" # VPC UUID for the Dedicated + Inference. Required. + } + }, + "status": "str", # Optional. Current state of the Dedicated + Inference. Known values are: "active", "new", "provisioning", "updating", + "deleting", and "error". + "updated_at": "2020-02-20 00:00:00", # Optional. When the Dedicated + Inference was last updated. + "vpc_uuid": "str" # Optional. VPC UUID of the Dedicated Inference. + } } # response body for status code(s): 404 response == { @@ -119877,8 +127327,8 @@ async def list_kafka_schemas( cls: ClsType[JSON] = kwargs.pop("cls", None) - _request = build_databases_list_kafka_schemas_request( - database_cluster_uuid=database_cluster_uuid, + _request = build_dedicated_inferences_get_request( + dedicated_inference_id=dedicated_inference_id, headers=_headers, params=_params, ) @@ -119938,22 +127388,24 @@ async def list_kafka_schemas( return cast(JSON, deserialized) # type: ignore @overload - async def create_kafka_schema( + async def patch( self, - database_cluster_uuid: str, + dedicated_inference_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any ) -> JSON: # pylint: disable=line-too-long - """Create Schema Registry for Kafka Cluster. + """Update a Dedicated Inference. - To create a Kafka schema for a database cluster, send a POST request to - ``/v2/databases/$DATABASE_ID/schema-registry``. + Update an existing Dedicated Inference. Send a PATCH request to + ``/v2/dedicated-inferences/{dedicated_inference_id}`` with updated ``spec`` and/or + ``access_tokens``. Status will move to updating and return to active when done. - :param database_cluster_uuid: A unique identifier for a database cluster. Required. - :type database_cluster_uuid: str + :param dedicated_inference_id: A unique identifier for a Dedicated Inference instance. + Required. + :type dedicated_inference_id: str :param body: Required. :type body: JSON :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. @@ -119968,19 +127420,155 @@ async def create_kafka_schema( # JSON input template you can fill out and use as your body input. body = { - "schema": "str", # Optional. The schema definition in the specified format. - "schema_type": "str", # Optional. The type of the schema. Known values are: - "AVRO", "JSON", and "PROTOBUF". - "subject_name": "str" # Optional. The name of the schema subject. + "access_tokens": { + "hugging_face_token": "str" # Optional. Hugging Face token required + for gated models. + }, + "spec": { + "enable_public_endpoint": bool, # Whether to expose a public LLM + endpoint. Required. + "model_deployments": [ + { + "accelerators": [ + { + "accelerator_slug": "str", # + DigitalOcean GPU slug. Required. + "scale": 0, # Number of accelerator + instances. Required. + "type": "str", # Accelerator type + (e.g. prefill_decode). Required. + "status": "str" # Optional. Current + state of the Accelerator. Known values are: "new", + "provisioning", and "active". + } + ], + "model_id": "str", # Optional. Used to identify an + existing deployment when updating; empty means create new. + "model_provider": "str", # Optional. Model provider. + "hugging_face" + "model_slug": "str", # Optional. Model identifier + (e.g. Hugging Face slug). + "workload_config": {} # Optional. Workload-specific + configuration (e.g. ISL/OSL in future). + } + ], + "name": "str", # Name of the Dedicated Inference. Must be unique + within the team. Required. + "region": "str", # DigitalOcean region where the Dedicated Inference + is hosted. Required. Known values are: "atl1", "nyc2", and "tor1". + "version": 0, # Spec version. Required. + "vpc": { + "uuid": "str" # VPC UUID for the Dedicated Inference. + Required. + } + } } - # response body for status code(s): 201 + # response body for status code(s): 202 response == { - "schema": "str", # Optional. The schema definition in the specified format. - "schema_id": 0, # Optional. The id for schema. - "schema_type": "str", # Optional. The type of the schema. Known values are: - "AVRO", "JSON", and "PROTOBUF". - "subject_name": "str" # Optional. The name of the schema subject. + "dedicated_inference": { + "created_at": "2020-02-20 00:00:00", # Optional. When the Dedicated + Inference was created. + "endpoints": { + "private_endpoint_fqdn": "str", # Optional. Private VPC FQDN + of the Dedicated Inference instance. + "public_endpoint_fqdn": "str" # Optional. Public FQDN of the + Dedicated Inference instance. + }, + "id": "str", # Optional. Unique ID of the Dedicated Inference. + "pending_deployment_spec": { + "created_at": "2020-02-20 00:00:00", # Optional. Pending + deployment when status is provisioning or updating. + "enable_public_endpoint": bool, # Optional. Whether to + expose a public LLM endpoint. + "id": "str", # Optional. Deployment UUID. + "model_deployments": [ + { + "accelerators": [ + { + "accelerator_slug": "str", # + DigitalOcean GPU slug. Required. + "scale": 0, # Number of + accelerator instances. Required. + "type": "str", # Accelerator + type (e.g. prefill_decode). Required. + "status": "str" # Optional. + Current state of the Accelerator. Known values are: + "new", "provisioning", and "active". + } + ], + "model_id": "str", # Optional. Used to + identify an existing deployment when updating; empty means create + new. + "model_provider": "str", # Optional. Model + provider. "hugging_face" + "model_slug": "str", # Optional. Model + identifier (e.g. Hugging Face slug). + "workload_config": {} # Optional. + Workload-specific configuration (e.g. ISL/OSL in future). + } + ], + "name": "str", # Optional. Name of the Dedicated Inference. + Must be unique within the team. + "status": "str", # Optional. Known values are: + "provisioning" and "updating". + "updated_at": "2020-02-20 00:00:00", # Optional. Pending + deployment when status is provisioning or updating. + "version": 0, # Optional. Spec version. + "vpc": { + "uuid": "str" # VPC UUID for the Dedicated + Inference. Required. + } + }, + "region": "str", # Optional. DigitalOcean region where the Dedicated + Inference is hosted. + "spec": { + "enable_public_endpoint": bool, # Whether to expose a public + LLM endpoint. Required. + "model_deployments": [ + { + "accelerators": [ + { + "accelerator_slug": "str", # + DigitalOcean GPU slug. Required. + "scale": 0, # Number of + accelerator instances. Required. + "type": "str", # Accelerator + type (e.g. prefill_decode). Required. + "status": "str" # Optional. + Current state of the Accelerator. Known values are: + "new", "provisioning", and "active". + } + ], + "model_id": "str", # Optional. Used to + identify an existing deployment when updating; empty means create + new. + "model_provider": "str", # Optional. Model + provider. "hugging_face" + "model_slug": "str", # Optional. Model + identifier (e.g. Hugging Face slug). + "workload_config": {} # Optional. + Workload-specific configuration (e.g. ISL/OSL in future). + } + ], + "name": "str", # Name of the Dedicated Inference. Must be + unique within the team. Required. + "region": "str", # DigitalOcean region where the Dedicated + Inference is hosted. Required. Known values are: "atl1", "nyc2", and + "tor1". + "version": 0, # Spec version. Required. + "vpc": { + "uuid": "str" # VPC UUID for the Dedicated + Inference. Required. + } + }, + "status": "str", # Optional. Current state of the Dedicated + Inference. Known values are: "active", "new", "provisioning", "updating", + "deleting", and "error". + "updated_at": "2020-02-20 00:00:00", # Optional. When the Dedicated + Inference was last updated. + "vpc_uuid": "str" # Optional. VPC UUID of the Dedicated Inference. + } } # response body for status code(s): 404 response == { @@ -119996,22 +127584,24 @@ async def create_kafka_schema( """ @overload - async def create_kafka_schema( + async def patch( self, - database_cluster_uuid: str, + dedicated_inference_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any ) -> JSON: # pylint: disable=line-too-long - """Create Schema Registry for Kafka Cluster. + """Update a Dedicated Inference. - To create a Kafka schema for a database cluster, send a POST request to - ``/v2/databases/$DATABASE_ID/schema-registry``. + Update an existing Dedicated Inference. Send a PATCH request to + ``/v2/dedicated-inferences/{dedicated_inference_id}`` with updated ``spec`` and/or + ``access_tokens``. Status will move to updating and return to active when done. - :param database_cluster_uuid: A unique identifier for a database cluster. Required. - :type database_cluster_uuid: str + :param dedicated_inference_id: A unique identifier for a Dedicated Inference instance. + Required. + :type dedicated_inference_id: str :param body: Required. :type body: IO[bytes] :keyword content_type: Body Parameter content-type. Content type parameter for binary body. @@ -120024,13 +127614,111 @@ async def create_kafka_schema( Example: .. code-block:: python - # response body for status code(s): 201 + # response body for status code(s): 202 response == { - "schema": "str", # Optional. The schema definition in the specified format. - "schema_id": 0, # Optional. The id for schema. - "schema_type": "str", # Optional. The type of the schema. Known values are: - "AVRO", "JSON", and "PROTOBUF". - "subject_name": "str" # Optional. The name of the schema subject. + "dedicated_inference": { + "created_at": "2020-02-20 00:00:00", # Optional. When the Dedicated + Inference was created. + "endpoints": { + "private_endpoint_fqdn": "str", # Optional. Private VPC FQDN + of the Dedicated Inference instance. + "public_endpoint_fqdn": "str" # Optional. Public FQDN of the + Dedicated Inference instance. + }, + "id": "str", # Optional. Unique ID of the Dedicated Inference. + "pending_deployment_spec": { + "created_at": "2020-02-20 00:00:00", # Optional. Pending + deployment when status is provisioning or updating. + "enable_public_endpoint": bool, # Optional. Whether to + expose a public LLM endpoint. + "id": "str", # Optional. Deployment UUID. + "model_deployments": [ + { + "accelerators": [ + { + "accelerator_slug": "str", # + DigitalOcean GPU slug. Required. + "scale": 0, # Number of + accelerator instances. Required. + "type": "str", # Accelerator + type (e.g. prefill_decode). Required. + "status": "str" # Optional. + Current state of the Accelerator. Known values are: + "new", "provisioning", and "active". + } + ], + "model_id": "str", # Optional. Used to + identify an existing deployment when updating; empty means create + new. + "model_provider": "str", # Optional. Model + provider. "hugging_face" + "model_slug": "str", # Optional. Model + identifier (e.g. Hugging Face slug). + "workload_config": {} # Optional. + Workload-specific configuration (e.g. ISL/OSL in future). + } + ], + "name": "str", # Optional. Name of the Dedicated Inference. + Must be unique within the team. + "status": "str", # Optional. Known values are: + "provisioning" and "updating". + "updated_at": "2020-02-20 00:00:00", # Optional. Pending + deployment when status is provisioning or updating. + "version": 0, # Optional. Spec version. + "vpc": { + "uuid": "str" # VPC UUID for the Dedicated + Inference. Required. + } + }, + "region": "str", # Optional. DigitalOcean region where the Dedicated + Inference is hosted. + "spec": { + "enable_public_endpoint": bool, # Whether to expose a public + LLM endpoint. Required. + "model_deployments": [ + { + "accelerators": [ + { + "accelerator_slug": "str", # + DigitalOcean GPU slug. Required. + "scale": 0, # Number of + accelerator instances. Required. + "type": "str", # Accelerator + type (e.g. prefill_decode). Required. + "status": "str" # Optional. + Current state of the Accelerator. Known values are: + "new", "provisioning", and "active". + } + ], + "model_id": "str", # Optional. Used to + identify an existing deployment when updating; empty means create + new. + "model_provider": "str", # Optional. Model + provider. "hugging_face" + "model_slug": "str", # Optional. Model + identifier (e.g. Hugging Face slug). + "workload_config": {} # Optional. + Workload-specific configuration (e.g. ISL/OSL in future). + } + ], + "name": "str", # Name of the Dedicated Inference. Must be + unique within the team. Required. + "region": "str", # DigitalOcean region where the Dedicated + Inference is hosted. Required. Known values are: "atl1", "nyc2", and + "tor1". + "version": 0, # Spec version. Required. + "vpc": { + "uuid": "str" # VPC UUID for the Dedicated + Inference. Required. + } + }, + "status": "str", # Optional. Current state of the Dedicated + Inference. Known values are: "active", "new", "provisioning", "updating", + "deleting", and "error". + "updated_at": "2020-02-20 00:00:00", # Optional. When the Dedicated + Inference was last updated. + "vpc_uuid": "str" # Optional. VPC UUID of the Dedicated Inference. + } } # response body for status code(s): 404 response == { @@ -120046,17 +127734,19 @@ async def create_kafka_schema( """ @distributed_trace_async - async def create_kafka_schema( - self, database_cluster_uuid: str, body: Union[JSON, IO[bytes]], **kwargs: Any + async def patch( + self, dedicated_inference_id: str, body: Union[JSON, IO[bytes]], **kwargs: Any ) -> JSON: # pylint: disable=line-too-long - """Create Schema Registry for Kafka Cluster. + """Update a Dedicated Inference. - To create a Kafka schema for a database cluster, send a POST request to - ``/v2/databases/$DATABASE_ID/schema-registry``. + Update an existing Dedicated Inference. Send a PATCH request to + ``/v2/dedicated-inferences/{dedicated_inference_id}`` with updated ``spec`` and/or + ``access_tokens``. Status will move to updating and return to active when done. - :param database_cluster_uuid: A unique identifier for a database cluster. Required. - :type database_cluster_uuid: str + :param dedicated_inference_id: A unique identifier for a Dedicated Inference instance. + Required. + :type dedicated_inference_id: str :param body: Is either a JSON type or a IO[bytes] type. Required. :type body: JSON or IO[bytes] :return: JSON object @@ -120068,19 +127758,155 @@ async def create_kafka_schema( # JSON input template you can fill out and use as your body input. body = { - "schema": "str", # Optional. The schema definition in the specified format. - "schema_type": "str", # Optional. The type of the schema. Known values are: - "AVRO", "JSON", and "PROTOBUF". - "subject_name": "str" # Optional. The name of the schema subject. + "access_tokens": { + "hugging_face_token": "str" # Optional. Hugging Face token required + for gated models. + }, + "spec": { + "enable_public_endpoint": bool, # Whether to expose a public LLM + endpoint. Required. + "model_deployments": [ + { + "accelerators": [ + { + "accelerator_slug": "str", # + DigitalOcean GPU slug. Required. + "scale": 0, # Number of accelerator + instances. Required. + "type": "str", # Accelerator type + (e.g. prefill_decode). Required. + "status": "str" # Optional. Current + state of the Accelerator. Known values are: "new", + "provisioning", and "active". + } + ], + "model_id": "str", # Optional. Used to identify an + existing deployment when updating; empty means create new. + "model_provider": "str", # Optional. Model provider. + "hugging_face" + "model_slug": "str", # Optional. Model identifier + (e.g. Hugging Face slug). + "workload_config": {} # Optional. Workload-specific + configuration (e.g. ISL/OSL in future). + } + ], + "name": "str", # Name of the Dedicated Inference. Must be unique + within the team. Required. + "region": "str", # DigitalOcean region where the Dedicated Inference + is hosted. Required. Known values are: "atl1", "nyc2", and "tor1". + "version": 0, # Spec version. Required. + "vpc": { + "uuid": "str" # VPC UUID for the Dedicated Inference. + Required. + } + } } - # response body for status code(s): 201 + # response body for status code(s): 202 response == { - "schema": "str", # Optional. The schema definition in the specified format. - "schema_id": 0, # Optional. The id for schema. - "schema_type": "str", # Optional. The type of the schema. Known values are: - "AVRO", "JSON", and "PROTOBUF". - "subject_name": "str" # Optional. The name of the schema subject. + "dedicated_inference": { + "created_at": "2020-02-20 00:00:00", # Optional. When the Dedicated + Inference was created. + "endpoints": { + "private_endpoint_fqdn": "str", # Optional. Private VPC FQDN + of the Dedicated Inference instance. + "public_endpoint_fqdn": "str" # Optional. Public FQDN of the + Dedicated Inference instance. + }, + "id": "str", # Optional. Unique ID of the Dedicated Inference. + "pending_deployment_spec": { + "created_at": "2020-02-20 00:00:00", # Optional. Pending + deployment when status is provisioning or updating. + "enable_public_endpoint": bool, # Optional. Whether to + expose a public LLM endpoint. + "id": "str", # Optional. Deployment UUID. + "model_deployments": [ + { + "accelerators": [ + { + "accelerator_slug": "str", # + DigitalOcean GPU slug. Required. + "scale": 0, # Number of + accelerator instances. Required. + "type": "str", # Accelerator + type (e.g. prefill_decode). Required. + "status": "str" # Optional. + Current state of the Accelerator. Known values are: + "new", "provisioning", and "active". + } + ], + "model_id": "str", # Optional. Used to + identify an existing deployment when updating; empty means create + new. + "model_provider": "str", # Optional. Model + provider. "hugging_face" + "model_slug": "str", # Optional. Model + identifier (e.g. Hugging Face slug). + "workload_config": {} # Optional. + Workload-specific configuration (e.g. ISL/OSL in future). + } + ], + "name": "str", # Optional. Name of the Dedicated Inference. + Must be unique within the team. + "status": "str", # Optional. Known values are: + "provisioning" and "updating". + "updated_at": "2020-02-20 00:00:00", # Optional. Pending + deployment when status is provisioning or updating. + "version": 0, # Optional. Spec version. + "vpc": { + "uuid": "str" # VPC UUID for the Dedicated + Inference. Required. + } + }, + "region": "str", # Optional. DigitalOcean region where the Dedicated + Inference is hosted. + "spec": { + "enable_public_endpoint": bool, # Whether to expose a public + LLM endpoint. Required. + "model_deployments": [ + { + "accelerators": [ + { + "accelerator_slug": "str", # + DigitalOcean GPU slug. Required. + "scale": 0, # Number of + accelerator instances. Required. + "type": "str", # Accelerator + type (e.g. prefill_decode). Required. + "status": "str" # Optional. + Current state of the Accelerator. Known values are: + "new", "provisioning", and "active". + } + ], + "model_id": "str", # Optional. Used to + identify an existing deployment when updating; empty means create + new. + "model_provider": "str", # Optional. Model + provider. "hugging_face" + "model_slug": "str", # Optional. Model + identifier (e.g. Hugging Face slug). + "workload_config": {} # Optional. + Workload-specific configuration (e.g. ISL/OSL in future). + } + ], + "name": "str", # Name of the Dedicated Inference. Must be + unique within the team. Required. + "region": "str", # DigitalOcean region where the Dedicated + Inference is hosted. Required. Known values are: "atl1", "nyc2", and + "tor1". + "version": 0, # Spec version. Required. + "vpc": { + "uuid": "str" # VPC UUID for the Dedicated + Inference. Required. + } + }, + "status": "str", # Optional. Current state of the Dedicated + Inference. Known values are: "active", "new", "provisioning", "updating", + "deleting", and "error". + "updated_at": "2020-02-20 00:00:00", # Optional. When the Dedicated + Inference was last updated. + "vpc_uuid": "str" # Optional. VPC UUID of the Dedicated Inference. + } } # response body for status code(s): 404 response == { @@ -120123,8 +127949,8 @@ async def create_kafka_schema( else: _json = body - _request = build_databases_create_kafka_schema_request( - database_cluster_uuid=database_cluster_uuid, + _request = build_dedicated_inferences_patch_request( + dedicated_inference_id=dedicated_inference_id, content_type=content_type, json=_json, content=_content, @@ -120142,14 +127968,126 @@ async def create_kafka_schema( response = pipeline_response.http_response - if response.status_code not in [201, 404]: + if response.status_code not in [202, 404]: + if _stream: + await response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore + raise HttpResponseError(response=response) + + response_headers = {} + if response.status_code == 202: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.content: + deserialized = response.json() + else: + deserialized = None + + if response.status_code == 404: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.content: + deserialized = response.json() + else: + deserialized = None + + if cls: + return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore + + return cast(JSON, deserialized) # type: ignore + + @distributed_trace_async + async def delete( + self, dedicated_inference_id: str, **kwargs: Any + ) -> Optional[JSON]: + # pylint: disable=line-too-long + """Delete a Dedicated Inference. + + Delete an existing Dedicated Inference. Send a DELETE request to + ``/v2/dedicated-inferences/{dedicated_inference_id}``. The response 202 Accepted + indicates the request was accepted for processing. + + :param dedicated_inference_id: A unique identifier for a Dedicated Inference instance. + Required. + :type dedicated_inference_id: str + :return: JSON object or None + :rtype: JSON or None + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 404 + response == { + "id": "str", # A short identifier corresponding to the HTTP status code + returned. For example, the ID for a response returning a 404 status code would + be "not_found.". Required. + "message": "str", # A message providing additional information about the + error, including details to help resolve it when possible. Required. + "request_id": "str" # Optional. Optionally, some endpoints may include a + request ID that should be provided when reporting bugs or opening support + tickets to help identify the issue. + } + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + 401: cast( + Type[HttpResponseError], + lambda response: ClientAuthenticationError(response=response), + ), + 429: HttpResponseError, + 500: HttpResponseError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[Optional[JSON]] = kwargs.pop("cls", None) + + _request = build_dedicated_inferences_delete_request( + dedicated_inference_id=dedicated_inference_id, + headers=_headers, + params=_params, + ) + _request.url = self._client.format_url(_request.url) + + _stream = False + pipeline_response: PipelineResponse = ( + await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + ) + + response = pipeline_response.http_response + + if response.status_code not in [202, 404]: if _stream: await response.read() # Load the body in memory and close the socket map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore raise HttpResponseError(response=response) + deserialized = None response_headers = {} - if response.status_code == 201: + if response.status_code == 202: response_headers["ratelimit-limit"] = self._deserialize( "int", response.headers.get("ratelimit-limit") ) @@ -120160,11 +128098,6 @@ async def create_kafka_schema( "int", response.headers.get("ratelimit-reset") ) - if response.content: - deserialized = response.json() - else: - deserialized = None - if response.status_code == 404: response_headers["ratelimit-limit"] = self._deserialize( "int", response.headers.get("ratelimit-limit") @@ -120182,24 +128115,33 @@ async def create_kafka_schema( deserialized = None if cls: - return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore + return cls(pipeline_response, deserialized, response_headers) # type: ignore - return cast(JSON, deserialized) # type: ignore + return deserialized # type: ignore @distributed_trace_async - async def get_kafka_schema( - self, database_cluster_uuid: str, subject_name: str, **kwargs: Any + async def list( + self, + *, + per_page: int = 20, + page: int = 1, + region: Optional[str] = None, + **kwargs: Any ) -> JSON: # pylint: disable=line-too-long - """Get a Kafka Schema by Subject Name. + """List Dedicated Inferences. - To get a specific schema by subject name for a Kafka cluster, send a GET request to - ``/v2/databases/$DATABASE_ID/schema-registry/$SUBJECT_NAME``. + List all Dedicated Inference instances for your team. Send a GET request to + ``/v2/dedicated-inferences``. You may filter by region and use page and per_page + for pagination. - :param database_cluster_uuid: A unique identifier for a database cluster. Required. - :type database_cluster_uuid: str - :param subject_name: The name of the Kafka schema subject. Required. - :type subject_name: str + :keyword per_page: Number of items returned per page. Default value is 20. + :paramtype per_page: int + :keyword page: Which 'page' of paginated results to return. Default value is 1. + :paramtype page: int + :keyword region: Filter by region. Dedicated Inference is only available in nyc2, tor1, and + atl1. Known values are: "nyc2", "tor1", and "atl1". Default value is None. + :paramtype region: str :return: JSON object :rtype: JSON :raises ~azure.core.exceptions.HttpResponseError: @@ -120209,23 +128151,122 @@ async def get_kafka_schema( # response body for status code(s): 200 response == { - "schema": "str", # Optional. The schema definition in the specified format. - "schema_id": 0, # Optional. The id for schema. - "schema_type": "str", # Optional. The type of the schema. Known values are: - "AVRO", "JSON", and "PROTOBUF". - "subject_name": "str", # Optional. The name of the schema subject. - "version": "str" # Optional. The version of the schema. - } - # response body for status code(s): 404 - response == { - "id": "str", # A short identifier corresponding to the HTTP status code - returned. For example, the ID for a response returning a 404 status code would - be "not_found.". Required. - "message": "str", # A message providing additional information about the - error, including details to help resolve it when possible. Required. - "request_id": "str" # Optional. Optionally, some endpoints may include a - request ID that should be provided when reporting bugs or opening support - tickets to help identify the issue. + "dedicated_inferences": [ + { + "created_at": "2020-02-20 00:00:00", # Optional. When the + Dedicated Inference was created. + "endpoints": { + "private_endpoint_fqdn": "str", # Optional. Private + VPC FQDN of the Dedicated Inference instance. + "public_endpoint_fqdn": "str" # Optional. Public + FQDN of the Dedicated Inference instance. + }, + "id": "str", # Optional. Unique ID of the Dedicated + Inference. + "pending_deployment_spec": { + "created_at": "2020-02-20 00:00:00", # Optional. + Pending deployment when status is provisioning or updating. + "enable_public_endpoint": bool, # Optional. Whether + to expose a public LLM endpoint. + "id": "str", # Optional. Deployment UUID. + "model_deployments": [ + { + "accelerators": [ + { + "accelerator_slug": + "str", # DigitalOcean GPU slug. Required. + "scale": 0, # Number + of accelerator instances. Required. + "type": "str", # + Accelerator type (e.g. prefill_decode). Required. + "status": "str" # + Optional. Current state of the Accelerator. Known + values are: "new", "provisioning", and "active". + } + ], + "model_id": "str", # Optional. Used + to identify an existing deployment when updating; empty means + create new. + "model_provider": "str", # Optional. + Model provider. "hugging_face" + "model_slug": "str", # Optional. + Model identifier (e.g. Hugging Face slug). + "workload_config": {} # Optional. + Workload-specific configuration (e.g. ISL/OSL in future). + } + ], + "name": "str", # Optional. Name of the Dedicated + Inference. Must be unique within the team. + "status": "str", # Optional. Known values are: + "provisioning" and "updating". + "updated_at": "2020-02-20 00:00:00", # Optional. + Pending deployment when status is provisioning or updating. + "version": 0, # Optional. Spec version. + "vpc": { + "uuid": "str" # VPC UUID for the Dedicated + Inference. Required. + } + }, + "region": "str", # Optional. DigitalOcean region where the + Dedicated Inference is hosted. + "spec": { + "enable_public_endpoint": bool, # Whether to expose + a public LLM endpoint. Required. + "model_deployments": [ + { + "accelerators": [ + { + "accelerator_slug": + "str", # DigitalOcean GPU slug. Required. + "scale": 0, # Number + of accelerator instances. Required. + "type": "str", # + Accelerator type (e.g. prefill_decode). Required. + "status": "str" # + Optional. Current state of the Accelerator. Known + values are: "new", "provisioning", and "active". + } + ], + "model_id": "str", # Optional. Used + to identify an existing deployment when updating; empty means + create new. + "model_provider": "str", # Optional. + Model provider. "hugging_face" + "model_slug": "str", # Optional. + Model identifier (e.g. Hugging Face slug). + "workload_config": {} # Optional. + Workload-specific configuration (e.g. ISL/OSL in future). + } + ], + "name": "str", # Name of the Dedicated Inference. + Must be unique within the team. Required. + "region": "str", # DigitalOcean region where the + Dedicated Inference is hosted. Required. Known values are: "atl1", + "nyc2", and "tor1". + "version": 0, # Spec version. Required. + "vpc": { + "uuid": "str" # VPC UUID for the Dedicated + Inference. Required. + } + }, + "status": "str", # Optional. Current state of the Dedicated + Inference. Known values are: "active", "new", "provisioning", "updating", + "deleting", and "error". + "updated_at": "2020-02-20 00:00:00", # Optional. When the + Dedicated Inference was last updated. + "vpc_uuid": "str" # Optional. VPC UUID of the Dedicated + Inference. + } + ], + "links": { + "pages": { + "str": "str" # Optional. Pagination links (first, prev, + next, last). + } + }, + "meta": { + "total": 0 # Total number of results. Required. + } } """ error_map: MutableMapping[int, Type[HttpResponseError]] = { @@ -120246,9 +128287,10 @@ async def get_kafka_schema( cls: ClsType[JSON] = kwargs.pop("cls", None) - _request = build_databases_get_kafka_schema_request( - database_cluster_uuid=database_cluster_uuid, - subject_name=subject_name, + _request = build_dedicated_inferences_list_request( + per_page=per_page, + page=page, + region=region, headers=_headers, params=_params, ) @@ -120263,81 +128305,543 @@ async def get_kafka_schema( response = pipeline_response.http_response - if response.status_code not in [200, 404]: + if response.status_code not in [200]: if _stream: await response.read() # Load the body in memory and close the socket map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore raise HttpResponseError(response=response) response_headers = {} - if response.status_code == 200: - response_headers["ratelimit-limit"] = self._deserialize( - "int", response.headers.get("ratelimit-limit") - ) - response_headers["ratelimit-remaining"] = self._deserialize( - "int", response.headers.get("ratelimit-remaining") - ) - response_headers["ratelimit-reset"] = self._deserialize( - "int", response.headers.get("ratelimit-reset") - ) - - if response.content: - deserialized = response.json() - else: - deserialized = None - - if response.status_code == 404: - response_headers["ratelimit-limit"] = self._deserialize( - "int", response.headers.get("ratelimit-limit") - ) - response_headers["ratelimit-remaining"] = self._deserialize( - "int", response.headers.get("ratelimit-remaining") - ) - response_headers["ratelimit-reset"] = self._deserialize( - "int", response.headers.get("ratelimit-reset") - ) + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) - if response.content: - deserialized = response.json() - else: - deserialized = None + if response.content: + deserialized = response.json() + else: + deserialized = None if cls: return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore return cast(JSON, deserialized) # type: ignore + @overload + async def create( + self, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> JSON: + # pylint: disable=line-too-long + """Create a Dedicated Inference. + + Create a new Dedicated Inference for your team. Send a POST request to + ``/v2/dedicated-inferences`` with a ``spec`` object (version, name, region, vpc, + enable_public_endpoint, model_deployments) and optional ``access_tokens`` (e.g. + hugging_face_token for gated models). The response code 202 Accepted indicates + the request was accepted for processing; it does not indicate success or failure. + The token value is returned only on create; store it securely. + + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: JSON object + :rtype: JSON + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "spec": { + "enable_public_endpoint": bool, # Whether to expose a public LLM + endpoint. Required. + "model_deployments": [ + { + "accelerators": [ + { + "accelerator_slug": "str", # + DigitalOcean GPU slug. Required. + "scale": 0, # Number of accelerator + instances. Required. + "type": "str", # Accelerator type + (e.g. prefill_decode). Required. + "status": "str" # Optional. Current + state of the Accelerator. Known values are: "new", + "provisioning", and "active". + } + ], + "model_id": "str", # Optional. Used to identify an + existing deployment when updating; empty means create new. + "model_provider": "str", # Optional. Model provider. + "hugging_face" + "model_slug": "str", # Optional. Model identifier + (e.g. Hugging Face slug). + "workload_config": {} # Optional. Workload-specific + configuration (e.g. ISL/OSL in future). + } + ], + "name": "str", # Name of the Dedicated Inference. Must be unique + within the team. Required. + "region": "str", # DigitalOcean region where the Dedicated Inference + is hosted. Required. Known values are: "atl1", "nyc2", and "tor1". + "version": 0, # Spec version. Required. + "vpc": { + "uuid": "str" # VPC UUID for the Dedicated Inference. + Required. + } + }, + "access_tokens": { + "str": "str" # Optional. Key-value pairs for provider tokens (e.g. + Hugging Face). + } + } + + # response body for status code(s): 202 + response == { + "dedicated_inference": { + "created_at": "2020-02-20 00:00:00", # Optional. When the Dedicated + Inference was created. + "endpoints": { + "private_endpoint_fqdn": "str", # Optional. Private VPC FQDN + of the Dedicated Inference instance. + "public_endpoint_fqdn": "str" # Optional. Public FQDN of the + Dedicated Inference instance. + }, + "id": "str", # Optional. Unique ID of the Dedicated Inference. + "pending_deployment_spec": { + "created_at": "2020-02-20 00:00:00", # Optional. Pending + deployment when status is provisioning or updating. + "enable_public_endpoint": bool, # Optional. Whether to + expose a public LLM endpoint. + "id": "str", # Optional. Deployment UUID. + "model_deployments": [ + { + "accelerators": [ + { + "accelerator_slug": "str", # + DigitalOcean GPU slug. Required. + "scale": 0, # Number of + accelerator instances. Required. + "type": "str", # Accelerator + type (e.g. prefill_decode). Required. + "status": "str" # Optional. + Current state of the Accelerator. Known values are: + "new", "provisioning", and "active". + } + ], + "model_id": "str", # Optional. Used to + identify an existing deployment when updating; empty means create + new. + "model_provider": "str", # Optional. Model + provider. "hugging_face" + "model_slug": "str", # Optional. Model + identifier (e.g. Hugging Face slug). + "workload_config": {} # Optional. + Workload-specific configuration (e.g. ISL/OSL in future). + } + ], + "name": "str", # Optional. Name of the Dedicated Inference. + Must be unique within the team. + "status": "str", # Optional. Known values are: + "provisioning" and "updating". + "updated_at": "2020-02-20 00:00:00", # Optional. Pending + deployment when status is provisioning or updating. + "version": 0, # Optional. Spec version. + "vpc": { + "uuid": "str" # VPC UUID for the Dedicated + Inference. Required. + } + }, + "region": "str", # Optional. DigitalOcean region where the Dedicated + Inference is hosted. + "spec": { + "enable_public_endpoint": bool, # Whether to expose a public + LLM endpoint. Required. + "model_deployments": [ + { + "accelerators": [ + { + "accelerator_slug": "str", # + DigitalOcean GPU slug. Required. + "scale": 0, # Number of + accelerator instances. Required. + "type": "str", # Accelerator + type (e.g. prefill_decode). Required. + "status": "str" # Optional. + Current state of the Accelerator. Known values are: + "new", "provisioning", and "active". + } + ], + "model_id": "str", # Optional. Used to + identify an existing deployment when updating; empty means create + new. + "model_provider": "str", # Optional. Model + provider. "hugging_face" + "model_slug": "str", # Optional. Model + identifier (e.g. Hugging Face slug). + "workload_config": {} # Optional. + Workload-specific configuration (e.g. ISL/OSL in future). + } + ], + "name": "str", # Name of the Dedicated Inference. Must be + unique within the team. Required. + "region": "str", # DigitalOcean region where the Dedicated + Inference is hosted. Required. Known values are: "atl1", "nyc2", and + "tor1". + "version": 0, # Spec version. Required. + "vpc": { + "uuid": "str" # VPC UUID for the Dedicated + Inference. Required. + } + }, + "status": "str", # Optional. Current state of the Dedicated + Inference. Known values are: "active", "new", "provisioning", "updating", + "deleting", and "error". + "updated_at": "2020-02-20 00:00:00", # Optional. When the Dedicated + Inference was last updated. + "vpc_uuid": "str" # Optional. VPC UUID of the Dedicated Inference. + }, + "token": { + "created_at": "2020-02-20 00:00:00", # Optional. Access token for + authenticating to Dedicated Inference endpoints. + "id": "str", # Optional. Unique ID of the token. + "name": "str", # Optional. Name of the token. + "value": "str" # Optional. Token value; only returned once on + create. Store securely. + } + } + """ + + @overload + async def create( + self, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> JSON: + # pylint: disable=line-too-long + """Create a Dedicated Inference. + + Create a new Dedicated Inference for your team. Send a POST request to + ``/v2/dedicated-inferences`` with a ``spec`` object (version, name, region, vpc, + enable_public_endpoint, model_deployments) and optional ``access_tokens`` (e.g. + hugging_face_token for gated models). The response code 202 Accepted indicates + the request was accepted for processing; it does not indicate success or failure. + The token value is returned only on create; store it securely. + + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: JSON object + :rtype: JSON + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 202 + response == { + "dedicated_inference": { + "created_at": "2020-02-20 00:00:00", # Optional. When the Dedicated + Inference was created. + "endpoints": { + "private_endpoint_fqdn": "str", # Optional. Private VPC FQDN + of the Dedicated Inference instance. + "public_endpoint_fqdn": "str" # Optional. Public FQDN of the + Dedicated Inference instance. + }, + "id": "str", # Optional. Unique ID of the Dedicated Inference. + "pending_deployment_spec": { + "created_at": "2020-02-20 00:00:00", # Optional. Pending + deployment when status is provisioning or updating. + "enable_public_endpoint": bool, # Optional. Whether to + expose a public LLM endpoint. + "id": "str", # Optional. Deployment UUID. + "model_deployments": [ + { + "accelerators": [ + { + "accelerator_slug": "str", # + DigitalOcean GPU slug. Required. + "scale": 0, # Number of + accelerator instances. Required. + "type": "str", # Accelerator + type (e.g. prefill_decode). Required. + "status": "str" # Optional. + Current state of the Accelerator. Known values are: + "new", "provisioning", and "active". + } + ], + "model_id": "str", # Optional. Used to + identify an existing deployment when updating; empty means create + new. + "model_provider": "str", # Optional. Model + provider. "hugging_face" + "model_slug": "str", # Optional. Model + identifier (e.g. Hugging Face slug). + "workload_config": {} # Optional. + Workload-specific configuration (e.g. ISL/OSL in future). + } + ], + "name": "str", # Optional. Name of the Dedicated Inference. + Must be unique within the team. + "status": "str", # Optional. Known values are: + "provisioning" and "updating". + "updated_at": "2020-02-20 00:00:00", # Optional. Pending + deployment when status is provisioning or updating. + "version": 0, # Optional. Spec version. + "vpc": { + "uuid": "str" # VPC UUID for the Dedicated + Inference. Required. + } + }, + "region": "str", # Optional. DigitalOcean region where the Dedicated + Inference is hosted. + "spec": { + "enable_public_endpoint": bool, # Whether to expose a public + LLM endpoint. Required. + "model_deployments": [ + { + "accelerators": [ + { + "accelerator_slug": "str", # + DigitalOcean GPU slug. Required. + "scale": 0, # Number of + accelerator instances. Required. + "type": "str", # Accelerator + type (e.g. prefill_decode). Required. + "status": "str" # Optional. + Current state of the Accelerator. Known values are: + "new", "provisioning", and "active". + } + ], + "model_id": "str", # Optional. Used to + identify an existing deployment when updating; empty means create + new. + "model_provider": "str", # Optional. Model + provider. "hugging_face" + "model_slug": "str", # Optional. Model + identifier (e.g. Hugging Face slug). + "workload_config": {} # Optional. + Workload-specific configuration (e.g. ISL/OSL in future). + } + ], + "name": "str", # Name of the Dedicated Inference. Must be + unique within the team. Required. + "region": "str", # DigitalOcean region where the Dedicated + Inference is hosted. Required. Known values are: "atl1", "nyc2", and + "tor1". + "version": 0, # Spec version. Required. + "vpc": { + "uuid": "str" # VPC UUID for the Dedicated + Inference. Required. + } + }, + "status": "str", # Optional. Current state of the Dedicated + Inference. Known values are: "active", "new", "provisioning", "updating", + "deleting", and "error". + "updated_at": "2020-02-20 00:00:00", # Optional. When the Dedicated + Inference was last updated. + "vpc_uuid": "str" # Optional. VPC UUID of the Dedicated Inference. + }, + "token": { + "created_at": "2020-02-20 00:00:00", # Optional. Access token for + authenticating to Dedicated Inference endpoints. + "id": "str", # Optional. Unique ID of the token. + "name": "str", # Optional. Name of the token. + "value": "str" # Optional. Token value; only returned once on + create. Store securely. + } + } + """ + @distributed_trace_async - async def delete_kafka_schema( - self, database_cluster_uuid: str, subject_name: str, **kwargs: Any - ) -> Optional[JSON]: + async def create(self, body: Union[JSON, IO[bytes]], **kwargs: Any) -> JSON: # pylint: disable=line-too-long - """Delete a Kafka Schema by Subject Name. + """Create a Dedicated Inference. - To delete a specific schema by subject name for a Kafka cluster, send a DELETE request to - ``/v2/databases/$DATABASE_ID/schema-registry/$SUBJECT_NAME``. + Create a new Dedicated Inference for your team. Send a POST request to + ``/v2/dedicated-inferences`` with a ``spec`` object (version, name, region, vpc, + enable_public_endpoint, model_deployments) and optional ``access_tokens`` (e.g. + hugging_face_token for gated models). The response code 202 Accepted indicates + the request was accepted for processing; it does not indicate success or failure. + The token value is returned only on create; store it securely. - :param database_cluster_uuid: A unique identifier for a database cluster. Required. - :type database_cluster_uuid: str - :param subject_name: The name of the Kafka schema subject. Required. - :type subject_name: str - :return: JSON object or None - :rtype: JSON or None + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :return: JSON object + :rtype: JSON :raises ~azure.core.exceptions.HttpResponseError: Example: .. code-block:: python - # response body for status code(s): 404 + # JSON input template you can fill out and use as your body input. + body = { + "spec": { + "enable_public_endpoint": bool, # Whether to expose a public LLM + endpoint. Required. + "model_deployments": [ + { + "accelerators": [ + { + "accelerator_slug": "str", # + DigitalOcean GPU slug. Required. + "scale": 0, # Number of accelerator + instances. Required. + "type": "str", # Accelerator type + (e.g. prefill_decode). Required. + "status": "str" # Optional. Current + state of the Accelerator. Known values are: "new", + "provisioning", and "active". + } + ], + "model_id": "str", # Optional. Used to identify an + existing deployment when updating; empty means create new. + "model_provider": "str", # Optional. Model provider. + "hugging_face" + "model_slug": "str", # Optional. Model identifier + (e.g. Hugging Face slug). + "workload_config": {} # Optional. Workload-specific + configuration (e.g. ISL/OSL in future). + } + ], + "name": "str", # Name of the Dedicated Inference. Must be unique + within the team. Required. + "region": "str", # DigitalOcean region where the Dedicated Inference + is hosted. Required. Known values are: "atl1", "nyc2", and "tor1". + "version": 0, # Spec version. Required. + "vpc": { + "uuid": "str" # VPC UUID for the Dedicated Inference. + Required. + } + }, + "access_tokens": { + "str": "str" # Optional. Key-value pairs for provider tokens (e.g. + Hugging Face). + } + } + + # response body for status code(s): 202 response == { - "id": "str", # A short identifier corresponding to the HTTP status code - returned. For example, the ID for a response returning a 404 status code would - be "not_found.". Required. - "message": "str", # A message providing additional information about the - error, including details to help resolve it when possible. Required. - "request_id": "str" # Optional. Optionally, some endpoints may include a - request ID that should be provided when reporting bugs or opening support - tickets to help identify the issue. + "dedicated_inference": { + "created_at": "2020-02-20 00:00:00", # Optional. When the Dedicated + Inference was created. + "endpoints": { + "private_endpoint_fqdn": "str", # Optional. Private VPC FQDN + of the Dedicated Inference instance. + "public_endpoint_fqdn": "str" # Optional. Public FQDN of the + Dedicated Inference instance. + }, + "id": "str", # Optional. Unique ID of the Dedicated Inference. + "pending_deployment_spec": { + "created_at": "2020-02-20 00:00:00", # Optional. Pending + deployment when status is provisioning or updating. + "enable_public_endpoint": bool, # Optional. Whether to + expose a public LLM endpoint. + "id": "str", # Optional. Deployment UUID. + "model_deployments": [ + { + "accelerators": [ + { + "accelerator_slug": "str", # + DigitalOcean GPU slug. Required. + "scale": 0, # Number of + accelerator instances. Required. + "type": "str", # Accelerator + type (e.g. prefill_decode). Required. + "status": "str" # Optional. + Current state of the Accelerator. Known values are: + "new", "provisioning", and "active". + } + ], + "model_id": "str", # Optional. Used to + identify an existing deployment when updating; empty means create + new. + "model_provider": "str", # Optional. Model + provider. "hugging_face" + "model_slug": "str", # Optional. Model + identifier (e.g. Hugging Face slug). + "workload_config": {} # Optional. + Workload-specific configuration (e.g. ISL/OSL in future). + } + ], + "name": "str", # Optional. Name of the Dedicated Inference. + Must be unique within the team. + "status": "str", # Optional. Known values are: + "provisioning" and "updating". + "updated_at": "2020-02-20 00:00:00", # Optional. Pending + deployment when status is provisioning or updating. + "version": 0, # Optional. Spec version. + "vpc": { + "uuid": "str" # VPC UUID for the Dedicated + Inference. Required. + } + }, + "region": "str", # Optional. DigitalOcean region where the Dedicated + Inference is hosted. + "spec": { + "enable_public_endpoint": bool, # Whether to expose a public + LLM endpoint. Required. + "model_deployments": [ + { + "accelerators": [ + { + "accelerator_slug": "str", # + DigitalOcean GPU slug. Required. + "scale": 0, # Number of + accelerator instances. Required. + "type": "str", # Accelerator + type (e.g. prefill_decode). Required. + "status": "str" # Optional. + Current state of the Accelerator. Known values are: + "new", "provisioning", and "active". + } + ], + "model_id": "str", # Optional. Used to + identify an existing deployment when updating; empty means create + new. + "model_provider": "str", # Optional. Model + provider. "hugging_face" + "model_slug": "str", # Optional. Model + identifier (e.g. Hugging Face slug). + "workload_config": {} # Optional. + Workload-specific configuration (e.g. ISL/OSL in future). + } + ], + "name": "str", # Name of the Dedicated Inference. Must be + unique within the team. Required. + "region": "str", # DigitalOcean region where the Dedicated + Inference is hosted. Required. Known values are: "atl1", "nyc2", and + "tor1". + "version": 0, # Spec version. Required. + "vpc": { + "uuid": "str" # VPC UUID for the Dedicated + Inference. Required. + } + }, + "status": "str", # Optional. Current state of the Dedicated + Inference. Known values are: "active", "new", "provisioning", "updating", + "deleting", and "error". + "updated_at": "2020-02-20 00:00:00", # Optional. When the Dedicated + Inference was last updated. + "vpc_uuid": "str" # Optional. VPC UUID of the Dedicated Inference. + }, + "token": { + "created_at": "2020-02-20 00:00:00", # Optional. Access token for + authenticating to Dedicated Inference endpoints. + "id": "str", # Optional. Unique ID of the token. + "name": "str", # Optional. Name of the token. + "value": "str" # Optional. Token value; only returned once on + create. Store securely. + } } """ error_map: MutableMapping[int, Type[HttpResponseError]] = { @@ -120353,14 +128857,26 @@ async def delete_kafka_schema( } error_map.update(kwargs.pop("error_map", {}) or {}) - _headers = kwargs.pop("headers", {}) or {} + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = kwargs.pop("params", {}) or {} - cls: ClsType[Optional[JSON]] = kwargs.pop("cls", None) + content_type: Optional[str] = kwargs.pop( + "content_type", _headers.pop("Content-Type", None) + ) + cls: ClsType[JSON] = kwargs.pop("cls", None) - _request = build_databases_delete_kafka_schema_request( - database_cluster_uuid=database_cluster_uuid, - subject_name=subject_name, + content_type = content_type or "application/json" + _json = None + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _json = body + + _request = build_dedicated_inferences_create_request( + content_type=content_type, + json=_json, + content=_content, headers=_headers, params=_params, ) @@ -120375,62 +128891,59 @@ async def delete_kafka_schema( response = pipeline_response.http_response - if response.status_code not in [204, 404]: + if response.status_code not in [202]: if _stream: await response.read() # Load the body in memory and close the socket map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore raise HttpResponseError(response=response) - deserialized = None response_headers = {} - if response.status_code == 204: - response_headers["ratelimit-limit"] = self._deserialize( - "int", response.headers.get("ratelimit-limit") - ) - response_headers["ratelimit-remaining"] = self._deserialize( - "int", response.headers.get("ratelimit-remaining") - ) - response_headers["ratelimit-reset"] = self._deserialize( - "int", response.headers.get("ratelimit-reset") - ) - - if response.status_code == 404: - response_headers["ratelimit-limit"] = self._deserialize( - "int", response.headers.get("ratelimit-limit") - ) - response_headers["ratelimit-remaining"] = self._deserialize( - "int", response.headers.get("ratelimit-remaining") - ) - response_headers["ratelimit-reset"] = self._deserialize( - "int", response.headers.get("ratelimit-reset") - ) + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) - if response.content: - deserialized = response.json() - else: - deserialized = None + if response.content: + deserialized = response.json() + else: + deserialized = None if cls: - return cls(pipeline_response, deserialized, response_headers) # type: ignore + return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore - return deserialized # type: ignore + return cast(JSON, deserialized) # type: ignore @distributed_trace_async - async def get_kafka_schema_version( - self, database_cluster_uuid: str, subject_name: str, version: str, **kwargs: Any + async def list_accelerators( + self, + dedicated_inference_id: str, + *, + per_page: int = 20, + page: int = 1, + slug: Optional[str] = None, + **kwargs: Any ) -> JSON: # pylint: disable=line-too-long - """Get Kafka Schema by Subject Version. + """List Dedicated Inference Accelerators. - To get a specific schema by subject name for a Kafka cluster, send a GET request to - ``/v2/databases/$DATABASE_ID/schema-registry/$SUBJECT_NAME/versions/$VERSION``. + List all accelerators (GPUs) in use by a Dedicated Inference instance. Send a + GET request to ``/v2/dedicated-inferences/{dedicated_inference_id}/accelerators``. + Optionally filter by slug and use page/per_page for pagination. - :param database_cluster_uuid: A unique identifier for a database cluster. Required. - :type database_cluster_uuid: str - :param subject_name: The name of the Kafka schema subject. Required. - :type subject_name: str - :param version: The version of the Kafka schema subject. Required. - :type version: str + :param dedicated_inference_id: A unique identifier for a Dedicated Inference instance. + Required. + :type dedicated_inference_id: str + :keyword per_page: Number of items returned per page. Default value is 20. + :paramtype per_page: int + :keyword page: Which 'page' of paginated results to return. Default value is 1. + :paramtype page: int + :keyword slug: Filter accelerators by GPU slug. Default value is None. + :paramtype slug: str :return: JSON object :rtype: JSON :raises ~azure.core.exceptions.HttpResponseError: @@ -120440,12 +128953,23 @@ async def get_kafka_schema_version( # response body for status code(s): 200 response == { - "schema": "str", # Optional. The schema definition in the specified format. - "schema_id": 0, # Optional. The id for schema. - "schema_type": "str", # Optional. The type of the schema. Known values are: - "AVRO", "JSON", and "PROTOBUF". - "subject_name": "str", # Optional. The name of the schema subject. - "version": "str" # Optional. The version of the schema. + "meta": { + "total": 0 # Optional. Number of objects returned by the request. + }, + "accelerators": [ + { + "created_at": "2020-02-20 00:00:00", # Optional. + "id": "str", # Optional. Unique ID of the accelerator. + "name": "str", # Optional. Name of the accelerator. + "role": "str", # Optional. Role of the accelerator (e.g. + prefill_decode). + "slug": "str", # Optional. DigitalOcean GPU slug. + "status": "str" # Optional. Status of the accelerator. + } + ], + "links": { + "pages": {} + } } # response body for status code(s): 404 response == { @@ -120477,10 +129001,11 @@ async def get_kafka_schema_version( cls: ClsType[JSON] = kwargs.pop("cls", None) - _request = build_databases_get_kafka_schema_version_request( - database_cluster_uuid=database_cluster_uuid, - subject_name=subject_name, - version=version, + _request = build_dedicated_inferences_list_accelerators_request( + dedicated_inference_id=dedicated_inference_id, + per_page=per_page, + page=page, + slug=slug, headers=_headers, params=_params, ) @@ -120540,19 +129065,21 @@ async def get_kafka_schema_version( return cast(JSON, deserialized) # type: ignore @distributed_trace_async - async def get_kafka_schema_config( - self, database_cluster_uuid: str, **kwargs: Any + async def get_accelerator( + self, dedicated_inference_id: str, accelerator_id: str, **kwargs: Any ) -> JSON: # pylint: disable=line-too-long - """Retrieve Schema Registry Configuration for a kafka Cluster. + """Get a Dedicated Inference Accelerator. - To retrieve the Schema Registry configuration for a Kafka cluster, send a GET request to - ``/v2/databases/$DATABASE_ID/schema-registry/config``. - The response is a JSON object with a ``compatibility_level`` key, which is set to an object - containing any database configuration parameters. + Retrieve a single accelerator by ID for a Dedicated Inference instance. Send a + GET request to + ``/v2/dedicated-inferences/{dedicated_inference_id}/accelerators/{accelerator_id}``. - :param database_cluster_uuid: A unique identifier for a database cluster. Required. - :type database_cluster_uuid: str + :param dedicated_inference_id: A unique identifier for a Dedicated Inference instance. + Required. + :type dedicated_inference_id: str + :param accelerator_id: A unique identifier for a Dedicated Inference accelerator. Required. + :type accelerator_id: str :return: JSON object :rtype: JSON :raises ~azure.core.exceptions.HttpResponseError: @@ -120562,9 +129089,12 @@ async def get_kafka_schema_config( # response body for status code(s): 200 response == { - "compatibility_level": "str" # The compatibility level of the schema - registry. Required. Known values are: "NONE", "BACKWARD", "BACKWARD_TRANSITIVE", - "FORWARD", "FORWARD_TRANSITIVE", "FULL", and "FULL_TRANSITIVE". + "created_at": "2020-02-20 00:00:00", # Optional. + "id": "str", # Optional. Unique ID of the accelerator. + "name": "str", # Optional. Name of the accelerator. + "role": "str", # Optional. Role of the accelerator (e.g. prefill_decode). + "slug": "str", # Optional. DigitalOcean GPU slug. + "status": "str" # Optional. Status of the accelerator. } # response body for status code(s): 404 response == { @@ -120596,8 +129126,9 @@ async def get_kafka_schema_config( cls: ClsType[JSON] = kwargs.pop("cls", None) - _request = build_databases_get_kafka_schema_config_request( - database_cluster_uuid=database_cluster_uuid, + _request = build_dedicated_inferences_get_accelerator_request( + dedicated_inference_id=dedicated_inference_id, + accelerator_id=accelerator_id, headers=_headers, params=_params, ) @@ -120656,132 +129187,18 @@ async def get_kafka_schema_config( return cast(JSON, deserialized) # type: ignore - @overload - async def update_kafka_schema_config( - self, - database_cluster_uuid: str, - body: Optional[JSON] = None, - *, - content_type: str = "application/json", - **kwargs: Any - ) -> JSON: - # pylint: disable=line-too-long - """Update Schema Registry Configuration for a kafka Cluster. - - To update the Schema Registry configuration for a Kafka cluster, send a PUT request to - ``/v2/databases/$DATABASE_ID/schema-registry/config``. - The response is a JSON object with a ``compatibility_level`` key, which is set to an object - containing any database configuration parameters. - - :param database_cluster_uuid: A unique identifier for a database cluster. Required. - :type database_cluster_uuid: str - :param body: Default value is None. - :type body: JSON - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: JSON object - :rtype: JSON - :raises ~azure.core.exceptions.HttpResponseError: - - Example: - .. code-block:: python - - # JSON input template you can fill out and use as your body input. - body = { - "compatibility_level": "str" # The compatibility level of the schema - registry. Required. Known values are: "NONE", "BACKWARD", "BACKWARD_TRANSITIVE", - "FORWARD", "FORWARD_TRANSITIVE", "FULL", and "FULL_TRANSITIVE". - } - - # response body for status code(s): 200 - response == { - "compatibility_level": "str" # The compatibility level of the schema - registry. Required. Known values are: "NONE", "BACKWARD", "BACKWARD_TRANSITIVE", - "FORWARD", "FORWARD_TRANSITIVE", "FULL", and "FULL_TRANSITIVE". - } - # response body for status code(s): 404 - response == { - "id": "str", # A short identifier corresponding to the HTTP status code - returned. For example, the ID for a response returning a 404 status code would - be "not_found.". Required. - "message": "str", # A message providing additional information about the - error, including details to help resolve it when possible. Required. - "request_id": "str" # Optional. Optionally, some endpoints may include a - request ID that should be provided when reporting bugs or opening support - tickets to help identify the issue. - } - """ - - @overload - async def update_kafka_schema_config( - self, - database_cluster_uuid: str, - body: Optional[IO[bytes]] = None, - *, - content_type: str = "application/json", - **kwargs: Any - ) -> JSON: - # pylint: disable=line-too-long - """Update Schema Registry Configuration for a kafka Cluster. - - To update the Schema Registry configuration for a Kafka cluster, send a PUT request to - ``/v2/databases/$DATABASE_ID/schema-registry/config``. - The response is a JSON object with a ``compatibility_level`` key, which is set to an object - containing any database configuration parameters. - - :param database_cluster_uuid: A unique identifier for a database cluster. Required. - :type database_cluster_uuid: str - :param body: Default value is None. - :type body: IO[bytes] - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str - :return: JSON object - :rtype: JSON - :raises ~azure.core.exceptions.HttpResponseError: - - Example: - .. code-block:: python - - # response body for status code(s): 200 - response == { - "compatibility_level": "str" # The compatibility level of the schema - registry. Required. Known values are: "NONE", "BACKWARD", "BACKWARD_TRANSITIVE", - "FORWARD", "FORWARD_TRANSITIVE", "FULL", and "FULL_TRANSITIVE". - } - # response body for status code(s): 404 - response == { - "id": "str", # A short identifier corresponding to the HTTP status code - returned. For example, the ID for a response returning a 404 status code would - be "not_found.". Required. - "message": "str", # A message providing additional information about the - error, including details to help resolve it when possible. Required. - "request_id": "str" # Optional. Optionally, some endpoints may include a - request ID that should be provided when reporting bugs or opening support - tickets to help identify the issue. - } - """ - @distributed_trace_async - async def update_kafka_schema_config( - self, - database_cluster_uuid: str, - body: Optional[Union[JSON, IO[bytes]]] = None, - **kwargs: Any - ) -> JSON: + async def get_ca(self, dedicated_inference_id: str, **kwargs: Any) -> JSON: # pylint: disable=line-too-long - """Update Schema Registry Configuration for a kafka Cluster. + """Get Dedicated Inference CA Certificate. - To update the Schema Registry configuration for a Kafka cluster, send a PUT request to - ``/v2/databases/$DATABASE_ID/schema-registry/config``. - The response is a JSON object with a ``compatibility_level`` key, which is set to an object - containing any database configuration parameters. + Get the CA certificate for a Dedicated Inference instance (base64-encoded). + Required for private endpoint connectivity. Send a GET request to + ``/v2/dedicated-inferences/{dedicated_inference_id}/ca``. - :param database_cluster_uuid: A unique identifier for a database cluster. Required. - :type database_cluster_uuid: str - :param body: Is either a JSON type or a IO[bytes] type. Default value is None. - :type body: JSON or IO[bytes] + :param dedicated_inference_id: A unique identifier for a Dedicated Inference instance. + Required. + :type dedicated_inference_id: str :return: JSON object :rtype: JSON :raises ~azure.core.exceptions.HttpResponseError: @@ -120789,18 +129206,9 @@ async def update_kafka_schema_config( Example: .. code-block:: python - # JSON input template you can fill out and use as your body input. - body = { - "compatibility_level": "str" # The compatibility level of the schema - registry. Required. Known values are: "NONE", "BACKWARD", "BACKWARD_TRANSITIVE", - "FORWARD", "FORWARD_TRANSITIVE", "FULL", and "FULL_TRANSITIVE". - } - # response body for status code(s): 200 response == { - "compatibility_level": "str" # The compatibility level of the schema - registry. Required. Known values are: "NONE", "BACKWARD", "BACKWARD_TRANSITIVE", - "FORWARD", "FORWARD_TRANSITIVE", "FULL", and "FULL_TRANSITIVE". + "cert": "str" # Base64-encoded CA certificate. Required. } # response body for status code(s): 404 response == { @@ -120827,30 +129235,13 @@ async def update_kafka_schema_config( } error_map.update(kwargs.pop("error_map", {}) or {}) - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _headers = kwargs.pop("headers", {}) or {} _params = kwargs.pop("params", {}) or {} - content_type: Optional[str] = kwargs.pop( - "content_type", _headers.pop("Content-Type", None) - ) cls: ClsType[JSON] = kwargs.pop("cls", None) - content_type = content_type or "application/json" - _json = None - _content = None - if isinstance(body, (IOBase, bytes)): - _content = body - else: - if body is not None: - _json = body - else: - _json = None - - _request = build_databases_update_kafka_schema_config_request( - database_cluster_uuid=database_cluster_uuid, - content_type=content_type, - json=_json, - content=_content, + _request = build_dedicated_inferences_get_ca_request( + dedicated_inference_id=dedicated_inference_id, headers=_headers, params=_params, ) @@ -120910,22 +129301,28 @@ async def update_kafka_schema_config( return cast(JSON, deserialized) # type: ignore @distributed_trace_async - async def get_kafka_schema_subject_config( - self, database_cluster_uuid: str, subject_name: str, **kwargs: Any + async def list_tokens( + self, + dedicated_inference_id: str, + *, + per_page: int = 20, + page: int = 1, + **kwargs: Any ) -> JSON: # pylint: disable=line-too-long - """Retrieve Schema Registry Configuration for a Subject of kafka Cluster. + """List Dedicated Inference Tokens. - To retrieve the Schema Registry configuration for a Subject of a Kafka cluster, send a GET - request to - ``/v2/databases/$DATABASE_ID/schema-registry/config/$SUBJECT_NAME``. - The response is a JSON object with a ``compatibility_level`` key, which is set to an object - containing any database configuration parameters. + List all access tokens for a Dedicated Inference instance. Token values are + not returned; only id, name, and created_at. Send a GET request to + ``/v2/dedicated-inferences/{dedicated_inference_id}/tokens``. - :param database_cluster_uuid: A unique identifier for a database cluster. Required. - :type database_cluster_uuid: str - :param subject_name: The name of the Kafka schema subject. Required. - :type subject_name: str + :param dedicated_inference_id: A unique identifier for a Dedicated Inference instance. + Required. + :type dedicated_inference_id: str + :keyword per_page: Number of items returned per page. Default value is 20. + :paramtype per_page: int + :keyword page: Which 'page' of paginated results to return. Default value is 1. + :paramtype page: int :return: JSON object :rtype: JSON :raises ~azure.core.exceptions.HttpResponseError: @@ -120935,10 +129332,21 @@ async def get_kafka_schema_subject_config( # response body for status code(s): 200 response == { - "compatibility_level": "str", # The compatibility level of the schema - registry. Required. Known values are: "NONE", "BACKWARD", "BACKWARD_TRANSITIVE", - "FORWARD", "FORWARD_TRANSITIVE", "FULL", and "FULL_TRANSITIVE". - "subject_name": "str" # The name of the schema subject. Required. + "meta": { + "total": 0 # Optional. Number of objects returned by the request. + }, + "links": { + "pages": {} + }, + "tokens": [ + { + "created_at": "2020-02-20 00:00:00", # Optional. + "id": "str", # Optional. Unique ID of the token. + "name": "str", # Optional. Name of the token. + "value": "str" # Optional. Token value; only returned once + on create. Store securely. + } + ] } # response body for status code(s): 404 response == { @@ -120970,9 +129378,10 @@ async def get_kafka_schema_subject_config( cls: ClsType[JSON] = kwargs.pop("cls", None) - _request = build_databases_get_kafka_schema_subject_config_request( - database_cluster_uuid=database_cluster_uuid, - subject_name=subject_name, + _request = build_dedicated_inferences_list_tokens_request( + dedicated_inference_id=dedicated_inference_id, + per_page=per_page, + page=page, headers=_headers, params=_params, ) @@ -121032,29 +129441,25 @@ async def get_kafka_schema_subject_config( return cast(JSON, deserialized) # type: ignore @overload - async def update_kafka_schema_subject_config( + async def create_tokens( self, - database_cluster_uuid: str, - subject_name: str, - body: Optional[JSON] = None, + dedicated_inference_id: str, + body: JSON, *, content_type: str = "application/json", **kwargs: Any ) -> JSON: # pylint: disable=line-too-long - """Update Schema Registry Configuration for a Subject of kafka Cluster. + """Create a Dedicated Inference Token. - To update the Schema Registry configuration for a Subject of a Kafka cluster, send a PUT - request to - ``/v2/databases/$DATABASE_ID/schema-registry/config/$SUBJECT_NAME``. - The response is a JSON object with a ``compatibility_level`` key, which is set to an object - containing any database configuration parameters. + Create a new access token for a Dedicated Inference instance. Send a POST + request to ``/v2/dedicated-inferences/{dedicated_inference_id}/tokens`` with a + ``name``. The token value is returned only once in the response; store it securely. - :param database_cluster_uuid: A unique identifier for a database cluster. Required. - :type database_cluster_uuid: str - :param subject_name: The name of the Kafka schema subject. Required. - :type subject_name: str - :param body: Default value is None. + :param dedicated_inference_id: A unique identifier for a Dedicated Inference instance. + Required. + :type dedicated_inference_id: str + :param body: Required. :type body: JSON :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". @@ -121068,17 +129473,19 @@ async def update_kafka_schema_subject_config( # JSON input template you can fill out and use as your body input. body = { - "compatibility_level": "str" # The compatibility level of the schema - registry. Required. Known values are: "NONE", "BACKWARD", "BACKWARD_TRANSITIVE", - "FORWARD", "FORWARD_TRANSITIVE", "FULL", and "FULL_TRANSITIVE". + "name": "str" # Name for the new token. Required. } - # response body for status code(s): 200 + # response body for status code(s): 202 response == { - "compatibility_level": "str", # The compatibility level of the schema - registry. Required. Known values are: "NONE", "BACKWARD", "BACKWARD_TRANSITIVE", - "FORWARD", "FORWARD_TRANSITIVE", "FULL", and "FULL_TRANSITIVE". - "subject_name": "str" # The name of the schema subject. Required. + "token": { + "created_at": "2020-02-20 00:00:00", # Optional. Access token for + authenticating to Dedicated Inference endpoints. + "id": "str", # Optional. Unique ID of the token. + "name": "str", # Optional. Name of the token. + "value": "str" # Optional. Token value; only returned once on + create. Store securely. + } } # response body for status code(s): 404 response == { @@ -121094,29 +129501,25 @@ async def update_kafka_schema_subject_config( """ @overload - async def update_kafka_schema_subject_config( + async def create_tokens( self, - database_cluster_uuid: str, - subject_name: str, - body: Optional[IO[bytes]] = None, + dedicated_inference_id: str, + body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any ) -> JSON: # pylint: disable=line-too-long - """Update Schema Registry Configuration for a Subject of kafka Cluster. + """Create a Dedicated Inference Token. - To update the Schema Registry configuration for a Subject of a Kafka cluster, send a PUT - request to - ``/v2/databases/$DATABASE_ID/schema-registry/config/$SUBJECT_NAME``. - The response is a JSON object with a ``compatibility_level`` key, which is set to an object - containing any database configuration parameters. + Create a new access token for a Dedicated Inference instance. Send a POST + request to ``/v2/dedicated-inferences/{dedicated_inference_id}/tokens`` with a + ``name``. The token value is returned only once in the response; store it securely. - :param database_cluster_uuid: A unique identifier for a database cluster. Required. - :type database_cluster_uuid: str - :param subject_name: The name of the Kafka schema subject. Required. - :type subject_name: str - :param body: Default value is None. + :param dedicated_inference_id: A unique identifier for a Dedicated Inference instance. + Required. + :type dedicated_inference_id: str + :param body: Required. :type body: IO[bytes] :keyword content_type: Body Parameter content-type. Content type parameter for binary body. Default value is "application/json". @@ -121128,12 +129531,16 @@ async def update_kafka_schema_subject_config( Example: .. code-block:: python - # response body for status code(s): 200 + # response body for status code(s): 202 response == { - "compatibility_level": "str", # The compatibility level of the schema - registry. Required. Known values are: "NONE", "BACKWARD", "BACKWARD_TRANSITIVE", - "FORWARD", "FORWARD_TRANSITIVE", "FULL", and "FULL_TRANSITIVE". - "subject_name": "str" # The name of the schema subject. Required. + "token": { + "created_at": "2020-02-20 00:00:00", # Optional. Access token for + authenticating to Dedicated Inference endpoints. + "id": "str", # Optional. Unique ID of the token. + "name": "str", # Optional. Name of the token. + "value": "str" # Optional. Token value; only returned once on + create. Store securely. + } } # response body for status code(s): 404 response == { @@ -121149,27 +129556,20 @@ async def update_kafka_schema_subject_config( """ @distributed_trace_async - async def update_kafka_schema_subject_config( - self, - database_cluster_uuid: str, - subject_name: str, - body: Optional[Union[JSON, IO[bytes]]] = None, - **kwargs: Any + async def create_tokens( + self, dedicated_inference_id: str, body: Union[JSON, IO[bytes]], **kwargs: Any ) -> JSON: # pylint: disable=line-too-long - """Update Schema Registry Configuration for a Subject of kafka Cluster. + """Create a Dedicated Inference Token. - To update the Schema Registry configuration for a Subject of a Kafka cluster, send a PUT - request to - ``/v2/databases/$DATABASE_ID/schema-registry/config/$SUBJECT_NAME``. - The response is a JSON object with a ``compatibility_level`` key, which is set to an object - containing any database configuration parameters. + Create a new access token for a Dedicated Inference instance. Send a POST + request to ``/v2/dedicated-inferences/{dedicated_inference_id}/tokens`` with a + ``name``. The token value is returned only once in the response; store it securely. - :param database_cluster_uuid: A unique identifier for a database cluster. Required. - :type database_cluster_uuid: str - :param subject_name: The name of the Kafka schema subject. Required. - :type subject_name: str - :param body: Is either a JSON type or a IO[bytes] type. Default value is None. + :param dedicated_inference_id: A unique identifier for a Dedicated Inference instance. + Required. + :type dedicated_inference_id: str + :param body: Is either a JSON type or a IO[bytes] type. Required. :type body: JSON or IO[bytes] :return: JSON object :rtype: JSON @@ -121180,17 +129580,19 @@ async def update_kafka_schema_subject_config( # JSON input template you can fill out and use as your body input. body = { - "compatibility_level": "str" # The compatibility level of the schema - registry. Required. Known values are: "NONE", "BACKWARD", "BACKWARD_TRANSITIVE", - "FORWARD", "FORWARD_TRANSITIVE", "FULL", and "FULL_TRANSITIVE". + "name": "str" # Name for the new token. Required. } - # response body for status code(s): 200 + # response body for status code(s): 202 response == { - "compatibility_level": "str", # The compatibility level of the schema - registry. Required. Known values are: "NONE", "BACKWARD", "BACKWARD_TRANSITIVE", - "FORWARD", "FORWARD_TRANSITIVE", "FULL", and "FULL_TRANSITIVE". - "subject_name": "str" # The name of the schema subject. Required. + "token": { + "created_at": "2020-02-20 00:00:00", # Optional. Access token for + authenticating to Dedicated Inference endpoints. + "id": "str", # Optional. Unique ID of the token. + "name": "str", # Optional. Name of the token. + "value": "str" # Optional. Token value; only returned once on + create. Store securely. + } } # response body for status code(s): 404 response == { @@ -121231,14 +129633,10 @@ async def update_kafka_schema_subject_config( if isinstance(body, (IOBase, bytes)): _content = body else: - if body is not None: - _json = body - else: - _json = None + _json = body - _request = build_databases_update_kafka_schema_subject_config_request( - database_cluster_uuid=database_cluster_uuid, - subject_name=subject_name, + _request = build_dedicated_inferences_create_tokens_request( + dedicated_inference_id=dedicated_inference_id, content_type=content_type, json=_json, content=_content, @@ -121256,14 +129654,14 @@ async def update_kafka_schema_subject_config( response = pipeline_response.http_response - if response.status_code not in [200, 404]: + if response.status_code not in [202, 404]: if _stream: await response.read() # Load the body in memory and close the socket map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore raise HttpResponseError(response=response) response_headers = {} - if response.status_code == 200: + if response.status_code == 202: response_headers["ratelimit-limit"] = self._deserialize( "int", response.headers.get("ratelimit-limit") ) @@ -121301,32 +129699,27 @@ async def update_kafka_schema_subject_config( return cast(JSON, deserialized) # type: ignore @distributed_trace_async - async def get_cluster_metrics_credentials(self, **kwargs: Any) -> JSON: + async def delete_tokens( + self, dedicated_inference_id: str, token_id: str, **kwargs: Any + ) -> Optional[JSON]: # pylint: disable=line-too-long - """Retrieve Database Clusters' Metrics Endpoint Credentials. + """Revoke a Dedicated Inference Token. - To show the credentials for all database clusters' metrics endpoints, send a GET request to - ``/v2/databases/metrics/credentials``. The result will be a JSON object with a ``credentials`` - key. + Revoke (delete) an access token for a Dedicated Inference instance. Send a + DELETE request to ``/v2/dedicated-inferences/{dedicated_inference_id}/tokens/{token_id}``. - :return: JSON object - :rtype: JSON + :param dedicated_inference_id: A unique identifier for a Dedicated Inference instance. + Required. + :type dedicated_inference_id: str + :param token_id: A unique identifier for a Dedicated Inference access token. Required. + :type token_id: str + :return: JSON object or None + :rtype: JSON or None :raises ~azure.core.exceptions.HttpResponseError: Example: .. code-block:: python - # response body for status code(s): 200 - response == { - "credentials": { - "credentials": { - "basic_auth_password": "str", # Optional. basic - authentication password for metrics HTTP endpoint. - "basic_auth_username": "str" # Optional. basic - authentication username for metrics HTTP endpoint. - } - } - } # response body for status code(s): 404 response == { "id": "str", # A short identifier corresponding to the HTTP status code @@ -121355,9 +129748,11 @@ async def get_cluster_metrics_credentials(self, **kwargs: Any) -> JSON: _headers = kwargs.pop("headers", {}) or {} _params = kwargs.pop("params", {}) or {} - cls: ClsType[JSON] = kwargs.pop("cls", None) + cls: ClsType[Optional[JSON]] = kwargs.pop("cls", None) - _request = build_databases_get_cluster_metrics_credentials_request( + _request = build_dedicated_inferences_delete_tokens_request( + dedicated_inference_id=dedicated_inference_id, + token_id=token_id, headers=_headers, params=_params, ) @@ -121372,14 +129767,15 @@ async def get_cluster_metrics_credentials(self, **kwargs: Any) -> JSON: response = pipeline_response.http_response - if response.status_code not in [200, 404]: + if response.status_code not in [204, 404]: if _stream: await response.read() # Load the body in memory and close the socket map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore raise HttpResponseError(response=response) + deserialized = None response_headers = {} - if response.status_code == 200: + if response.status_code == 204: response_headers["ratelimit-limit"] = self._deserialize( "int", response.headers.get("ratelimit-limit") ) @@ -121390,11 +129786,6 @@ async def get_cluster_metrics_credentials(self, **kwargs: Any) -> JSON: "int", response.headers.get("ratelimit-reset") ) - if response.content: - deserialized = response.json() - else: - deserialized = None - if response.status_code == 404: response_headers["ratelimit-limit"] = self._deserialize( "int", response.headers.get("ratelimit-limit") @@ -121412,98 +129803,37 @@ async def get_cluster_metrics_credentials(self, **kwargs: Any) -> JSON: deserialized = None if cls: - return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore - - return cast(JSON, deserialized) # type: ignore - - @overload - async def update_cluster_metrics_credentials( # pylint: disable=inconsistent-return-statements - self, - body: Optional[JSON] = None, - *, - content_type: str = "application/json", - **kwargs: Any - ) -> None: - """Update Database Clusters' Metrics Endpoint Credentials. - - To update the credentials for all database clusters' metrics endpoints, send a PUT request to - ``/v2/databases/metrics/credentials``. A successful request will receive a 204 No Content - status code with no body in response. - - :param body: Default value is None. - :type body: JSON - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: None - :rtype: None - :raises ~azure.core.exceptions.HttpResponseError: - - Example: - .. code-block:: python - - # JSON input template you can fill out and use as your body input. - body = { - "credentials": { - "basic_auth_password": "str", # Optional. basic authentication - password for metrics HTTP endpoint. - "basic_auth_username": "str" # Optional. basic authentication - username for metrics HTTP endpoint. - } - } - """ - - @overload - async def update_cluster_metrics_credentials( # pylint: disable=inconsistent-return-statements - self, - body: Optional[IO[bytes]] = None, - *, - content_type: str = "application/json", - **kwargs: Any - ) -> None: - """Update Database Clusters' Metrics Endpoint Credentials. - - To update the credentials for all database clusters' metrics endpoints, send a PUT request to - ``/v2/databases/metrics/credentials``. A successful request will receive a 204 No Content - status code with no body in response. + return cls(pipeline_response, deserialized, response_headers) # type: ignore - :param body: Default value is None. - :type body: IO[bytes] - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str - :return: None - :rtype: None - :raises ~azure.core.exceptions.HttpResponseError: - """ + return deserialized # type: ignore @distributed_trace_async - async def update_cluster_metrics_credentials( # pylint: disable=inconsistent-return-statements - self, body: Optional[Union[JSON, IO[bytes]]] = None, **kwargs: Any - ) -> None: - """Update Database Clusters' Metrics Endpoint Credentials. + async def list_sizes(self, **kwargs: Any) -> JSON: + """List Dedicated Inference Sizes. - To update the credentials for all database clusters' metrics endpoints, send a PUT request to - ``/v2/databases/metrics/credentials``. A successful request will receive a 204 No Content - status code with no body in response. + Get available Dedicated Inference sizes and pricing for supported GPUs. Send a + GET request to ``/v2/dedicated-inferences/sizes``. - :param body: Is either a JSON type or a IO[bytes] type. Default value is None. - :type body: JSON or IO[bytes] - :return: None - :rtype: None + :return: JSON object + :rtype: JSON :raises ~azure.core.exceptions.HttpResponseError: Example: .. code-block:: python - # JSON input template you can fill out and use as your body input. - body = { - "credentials": { - "basic_auth_password": "str", # Optional. basic authentication - password for metrics HTTP endpoint. - "basic_auth_username": "str" # Optional. basic authentication - username for metrics HTTP endpoint. - } + # response body for status code(s): 200 + response == { + "enabled_regions": [ + "str" # Optional. Regions where Dedicated Inference is available. + ], + "sizes": [ + { + "currency": "str", # Optional. + "gpu_slug": "str", # Optional. + "price_per_hour": "str", # Optional. + "region": "str" # Optional. + } + ] } """ error_map: MutableMapping[int, Type[HttpResponseError]] = { @@ -121519,29 +129849,12 @@ async def update_cluster_metrics_credentials( # pylint: disable=inconsistent-re } error_map.update(kwargs.pop("error_map", {}) or {}) - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _headers = kwargs.pop("headers", {}) or {} _params = kwargs.pop("params", {}) or {} - content_type: Optional[str] = kwargs.pop( - "content_type", _headers.pop("Content-Type", None) - ) - cls: ClsType[None] = kwargs.pop("cls", None) - - content_type = content_type or "application/json" - _json = None - _content = None - if isinstance(body, (IOBase, bytes)): - _content = body - else: - if body is not None: - _json = body - else: - _json = None + cls: ClsType[JSON] = kwargs.pop("cls", None) - _request = build_databases_update_cluster_metrics_credentials_request( - content_type=content_type, - json=_json, - content=_content, + _request = build_dedicated_inferences_list_sizes_request( headers=_headers, params=_params, ) @@ -121556,7 +129869,7 @@ async def update_cluster_metrics_credentials( # pylint: disable=inconsistent-re response = pipeline_response.http_response - if response.status_code not in [204]: + if response.status_code not in [200]: if _stream: await response.read() # Load the body in memory and close the socket map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore @@ -121573,23 +129886,24 @@ async def update_cluster_metrics_credentials( # pylint: disable=inconsistent-re "int", response.headers.get("ratelimit-reset") ) + if response.content: + deserialized = response.json() + else: + deserialized = None + if cls: - return cls(pipeline_response, None, response_headers) # type: ignore + return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore - @distributed_trace_async - async def list_opeasearch_indexes( - self, database_cluster_uuid: str, **kwargs: Any - ) -> JSON: - # pylint: disable=line-too-long - """List Indexes for a OpenSearch Cluster. + return cast(JSON, deserialized) # type: ignore - To list all of a OpenSearch cluster's indexes, send a GET request to - ``/v2/databases/$DATABASE_ID/indexes``. + @distributed_trace_async + async def get_gpu_model_config(self, **kwargs: Any) -> JSON: + """Get Dedicated Inference GPU Model Config. - The result will be a JSON object with a ``indexes`` key. + Get supported GPU and model configurations for Dedicated Inference. Use this to + discover supported GPU slugs and model slugs (e.g. Hugging Face). Send a GET + request to ``/v2/dedicated-inferences/gpu-model-config``. - :param database_cluster_uuid: A unique identifier for a database cluster. Required. - :type database_cluster_uuid: str :return: JSON object :rtype: JSON :raises ~azure.core.exceptions.HttpResponseError: @@ -121599,35 +129913,18 @@ async def list_opeasearch_indexes( # response body for status code(s): 200 response == { - "indexes": [ + "gpu_model_configs": [ { - "created_time": "2020-02-20 00:00:00", # Optional. The date - and time the index was created. - "health": "str", # Optional. The health of the OpenSearch - index. Known values are: "unknown", "green", "yellow", "red", and "red*". - "index_name": "str", # Optional. The name of the opensearch - index. - "number_of_replicas": 0, # Optional. The number of replicas - for the index. - "number_of_shards": 0, # Optional. The number of shards for - the index. - "size": 0, # Optional. The size of the index. - "status": "str" # Optional. The status of the OpenSearch - index. Known values are: "unknown", "open", "close", and "none". + "gpu_slugs": [ + "str" # Optional. + ], + "is_gated_model": bool, # Optional. Whether the model + requires gated access (e.g. Hugging Face token). + "model_name": "str", # Optional. + "model_slug": "str" # Optional. } ] } - # response body for status code(s): 404 - response == { - "id": "str", # A short identifier corresponding to the HTTP status code - returned. For example, the ID for a response returning a 404 status code would - be "not_found.". Required. - "message": "str", # A message providing additional information about the - error, including details to help resolve it when possible. Required. - "request_id": "str" # Optional. Optionally, some endpoints may include a - request ID that should be provided when reporting bugs or opening support - tickets to help identify the issue. - } """ error_map: MutableMapping[int, Type[HttpResponseError]] = { 404: ResourceNotFoundError, @@ -121647,8 +129944,7 @@ async def list_opeasearch_indexes( cls: ClsType[JSON] = kwargs.pop("cls", None) - _request = build_databases_list_opeasearch_indexes_request( - database_cluster_uuid=database_cluster_uuid, + _request = build_dedicated_inferences_get_gpu_model_config_request( headers=_headers, params=_params, ) @@ -121663,160 +129959,32 @@ async def list_opeasearch_indexes( response = pipeline_response.http_response - if response.status_code not in [200, 404]: + if response.status_code not in [200]: if _stream: await response.read() # Load the body in memory and close the socket map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore raise HttpResponseError(response=response) response_headers = {} - if response.status_code == 200: - response_headers["ratelimit-limit"] = self._deserialize( - "int", response.headers.get("ratelimit-limit") - ) - response_headers["ratelimit-remaining"] = self._deserialize( - "int", response.headers.get("ratelimit-remaining") - ) - response_headers["ratelimit-reset"] = self._deserialize( - "int", response.headers.get("ratelimit-reset") - ) - - if response.content: - deserialized = response.json() - else: - deserialized = None - - if response.status_code == 404: - response_headers["ratelimit-limit"] = self._deserialize( - "int", response.headers.get("ratelimit-limit") - ) - response_headers["ratelimit-remaining"] = self._deserialize( - "int", response.headers.get("ratelimit-remaining") - ) - response_headers["ratelimit-reset"] = self._deserialize( - "int", response.headers.get("ratelimit-reset") - ) - - if response.content: - deserialized = response.json() - else: - deserialized = None - - if cls: - return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore - - return cast(JSON, deserialized) # type: ignore - - @distributed_trace_async - async def delete_opensearch_index( - self, database_cluster_uuid: str, index_name: str, **kwargs: Any - ) -> Optional[JSON]: - # pylint: disable=line-too-long - """Delete Index for OpenSearch Cluster. - - To delete a single index within OpenSearch cluster, send a DELETE request - to ``/v2/databases/$DATABASE_ID/indexes/$INDEX_NAME``. - - A status of 204 will be given. This indicates that the request was - processed successfully, but that no response body is needed. - - :param database_cluster_uuid: A unique identifier for a database cluster. Required. - :type database_cluster_uuid: str - :param index_name: The name of the OpenSearch index. Required. - :type index_name: str - :return: JSON object or None - :rtype: JSON or None - :raises ~azure.core.exceptions.HttpResponseError: - - Example: - .. code-block:: python - - # response body for status code(s): 404 - response == { - "id": "str", # A short identifier corresponding to the HTTP status code - returned. For example, the ID for a response returning a 404 status code would - be "not_found.". Required. - "message": "str", # A message providing additional information about the - error, including details to help resolve it when possible. Required. - "request_id": "str" # Optional. Optionally, some endpoints may include a - request ID that should be provided when reporting bugs or opening support - tickets to help identify the issue. - } - """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - 401: cast( - Type[HttpResponseError], - lambda response: ClientAuthenticationError(response=response), - ), - 429: HttpResponseError, - 500: HttpResponseError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = kwargs.pop("headers", {}) or {} - _params = kwargs.pop("params", {}) or {} - - cls: ClsType[Optional[JSON]] = kwargs.pop("cls", None) - - _request = build_databases_delete_opensearch_index_request( - database_cluster_uuid=database_cluster_uuid, - index_name=index_name, - headers=_headers, - params=_params, + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") ) - _request.url = self._client.format_url(_request.url) - - _stream = False - pipeline_response: PipelineResponse = ( - await self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") ) - response = pipeline_response.http_response - - if response.status_code not in [204, 404]: - if _stream: - await response.read() # Load the body in memory and close the socket - map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore - raise HttpResponseError(response=response) - - deserialized = None - response_headers = {} - if response.status_code == 204: - response_headers["ratelimit-limit"] = self._deserialize( - "int", response.headers.get("ratelimit-limit") - ) - response_headers["ratelimit-remaining"] = self._deserialize( - "int", response.headers.get("ratelimit-remaining") - ) - response_headers["ratelimit-reset"] = self._deserialize( - "int", response.headers.get("ratelimit-reset") - ) - - if response.status_code == 404: - response_headers["ratelimit-limit"] = self._deserialize( - "int", response.headers.get("ratelimit-limit") - ) - response_headers["ratelimit-remaining"] = self._deserialize( - "int", response.headers.get("ratelimit-remaining") - ) - response_headers["ratelimit-reset"] = self._deserialize( - "int", response.headers.get("ratelimit-reset") - ) - - if response.content: - deserialized = response.json() - else: - deserialized = None + if response.content: + deserialized = response.json() + else: + deserialized = None if cls: - return cls(pipeline_response, deserialized, response_headers) # type: ignore + return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore - return deserialized # type: ignore + return cast(JSON, deserialized) # type: ignore class DomainsOperations: @@ -136325,6 +144493,897 @@ async def delete_trigger( return deserialized # type: ignore +class FunctionsAccessKeyOperations: + """ + .. warning:: + **DO NOT** instantiate this class directly. + + Instead, you should access the following operations through + :class:`~pydo.aio.GeneratedClient`'s + :attr:`functions_access_key` attribute. + """ + + def __init__(self, *args, **kwargs) -> None: + input_args = list(args) + self._client = input_args.pop(0) if input_args else kwargs.pop("client") + self._config = input_args.pop(0) if input_args else kwargs.pop("config") + self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer") + self._deserialize = ( + input_args.pop(0) if input_args else kwargs.pop("deserializer") + ) + + @distributed_trace_async + async def list(self, namespace_id: str, **kwargs: Any) -> JSON: + # pylint: disable=line-too-long + """List Namespace Access Keys. + + Lists all access keys for a serverless functions namespace. + + To list access keys, send a GET request to ``/v2/functions/namespaces/{namespace_id}/keys``. + + :param namespace_id: The ID of the namespace to be managed. Required. + :type namespace_id: str + :return: JSON object + :rtype: JSON + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 200 + response == { + "access_keys": [ + { + "created_at": "2020-02-20 00:00:00", # Optional. The date + and time the key was created. + "expires_at": "2020-02-20 00:00:00", # Optional. When the + key expires (null for non-expiring keys). + "id": "str", # Optional. The access key's unique identifier + with prefix 'dof"" *v1*"" '. + "name": "str", # Optional. The access key's name. + "updated_at": "2020-02-20 00:00:00" # Optional. The date and + time the key was last updated. + } + ], + "count": 0 # Optional. Total number of access keys. + } + # response body for status code(s): 404 + response == { + "id": "str", # A short identifier corresponding to the HTTP status code + returned. For example, the ID for a response returning a 404 status code would + be "not_found.". Required. + "message": "str", # A message providing additional information about the + error, including details to help resolve it when possible. Required. + "request_id": "str" # Optional. Optionally, some endpoints may include a + request ID that should be provided when reporting bugs or opening support + tickets to help identify the issue. + } + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + 401: cast( + Type[HttpResponseError], + lambda response: ClientAuthenticationError(response=response), + ), + 429: HttpResponseError, + 500: HttpResponseError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[JSON] = kwargs.pop("cls", None) + + _request = build_functions_access_key_list_request( + namespace_id=namespace_id, + headers=_headers, + params=_params, + ) + _request.url = self._client.format_url(_request.url) + + _stream = False + pipeline_response: PipelineResponse = ( + await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + ) + + response = pipeline_response.http_response + + if response.status_code not in [200, 404]: + if _stream: + await response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore + raise HttpResponseError(response=response) + + response_headers = {} + if response.status_code == 200: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.content: + deserialized = response.json() + else: + deserialized = None + + if response.status_code == 404: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.content: + deserialized = response.json() + else: + deserialized = None + + if cls: + return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore + + return cast(JSON, deserialized) # type: ignore + + @overload + async def create( + self, + namespace_id: str, + body: JSON, + *, + content_type: str = "application/json", + **kwargs: Any + ) -> JSON: + # pylint: disable=line-too-long + """Create a Namespace Access Key. + + Creates a new access key for a serverless functions namespace. + The access key can be used to authenticate requests to the namespace's functions. + The secret key is only returned once upon creation. + + To create an access key, send a POST request to + ``/v2/functions/namespaces/{namespace_id}/keys``. + + :param namespace_id: The ID of the namespace to be managed. Required. + :type namespace_id: str + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: JSON object + :rtype: JSON + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "name": "str", # The access key's name. Required. + "expires_in": "str" # Optional. The duration after which the access key + expires, specified as a human-readable duration string in the format ``h`` + (hours) or ``d`` (days). Minimum value is ``1h``. If omitted, the key will + never expire. + } + + # response body for status code(s): 201 + response == { + "access_key": { + "created_at": "2020-02-20 00:00:00", # Optional. The date and time + the key was created. + "expires_at": "2020-02-20 00:00:00", # Optional. When the key + expires (null for non-expiring keys). + "id": "str", # Optional. The access key's unique identifier with + prefix 'dof"" *v1*"" '. + "name": "str", # Optional. The access key's name. + "secret": "str", # Optional. The secret key used to authenticate. + This is only returned once upon creation. Make sure to copy and securely + store it. + "updated_at": "2020-02-20 00:00:00" # Optional. The date and time + the key was last updated. + } + } + # response body for status code(s): 400, 404, 409 + response == { + "id": "str", # A short identifier corresponding to the HTTP status code + returned. For example, the ID for a response returning a 404 status code would + be "not_found.". Required. + "message": "str", # A message providing additional information about the + error, including details to help resolve it when possible. Required. + "request_id": "str" # Optional. Optionally, some endpoints may include a + request ID that should be provided when reporting bugs or opening support + tickets to help identify the issue. + } + """ + + @overload + async def create( + self, + namespace_id: str, + body: IO[bytes], + *, + content_type: str = "application/json", + **kwargs: Any + ) -> JSON: + # pylint: disable=line-too-long + """Create a Namespace Access Key. + + Creates a new access key for a serverless functions namespace. + The access key can be used to authenticate requests to the namespace's functions. + The secret key is only returned once upon creation. + + To create an access key, send a POST request to + ``/v2/functions/namespaces/{namespace_id}/keys``. + + :param namespace_id: The ID of the namespace to be managed. Required. + :type namespace_id: str + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: JSON object + :rtype: JSON + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 201 + response == { + "access_key": { + "created_at": "2020-02-20 00:00:00", # Optional. The date and time + the key was created. + "expires_at": "2020-02-20 00:00:00", # Optional. When the key + expires (null for non-expiring keys). + "id": "str", # Optional. The access key's unique identifier with + prefix 'dof"" *v1*"" '. + "name": "str", # Optional. The access key's name. + "secret": "str", # Optional. The secret key used to authenticate. + This is only returned once upon creation. Make sure to copy and securely + store it. + "updated_at": "2020-02-20 00:00:00" # Optional. The date and time + the key was last updated. + } + } + # response body for status code(s): 400, 404, 409 + response == { + "id": "str", # A short identifier corresponding to the HTTP status code + returned. For example, the ID for a response returning a 404 status code would + be "not_found.". Required. + "message": "str", # A message providing additional information about the + error, including details to help resolve it when possible. Required. + "request_id": "str" # Optional. Optionally, some endpoints may include a + request ID that should be provided when reporting bugs or opening support + tickets to help identify the issue. + } + """ + + @distributed_trace_async + async def create( + self, namespace_id: str, body: Union[JSON, IO[bytes]], **kwargs: Any + ) -> JSON: + # pylint: disable=line-too-long + """Create a Namespace Access Key. + + Creates a new access key for a serverless functions namespace. + The access key can be used to authenticate requests to the namespace's functions. + The secret key is only returned once upon creation. + + To create an access key, send a POST request to + ``/v2/functions/namespaces/{namespace_id}/keys``. + + :param namespace_id: The ID of the namespace to be managed. Required. + :type namespace_id: str + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :return: JSON object + :rtype: JSON + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "name": "str", # The access key's name. Required. + "expires_in": "str" # Optional. The duration after which the access key + expires, specified as a human-readable duration string in the format ``h`` + (hours) or ``d`` (days). Minimum value is ``1h``. If omitted, the key will + never expire. + } + + # response body for status code(s): 201 + response == { + "access_key": { + "created_at": "2020-02-20 00:00:00", # Optional. The date and time + the key was created. + "expires_at": "2020-02-20 00:00:00", # Optional. When the key + expires (null for non-expiring keys). + "id": "str", # Optional. The access key's unique identifier with + prefix 'dof"" *v1*"" '. + "name": "str", # Optional. The access key's name. + "secret": "str", # Optional. The secret key used to authenticate. + This is only returned once upon creation. Make sure to copy and securely + store it. + "updated_at": "2020-02-20 00:00:00" # Optional. The date and time + the key was last updated. + } + } + # response body for status code(s): 400, 404, 409 + response == { + "id": "str", # A short identifier corresponding to the HTTP status code + returned. For example, the ID for a response returning a 404 status code would + be "not_found.". Required. + "message": "str", # A message providing additional information about the + error, including details to help resolve it when possible. Required. + "request_id": "str" # Optional. Optionally, some endpoints may include a + request ID that should be provided when reporting bugs or opening support + tickets to help identify the issue. + } + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + 401: cast( + Type[HttpResponseError], + lambda response: ClientAuthenticationError(response=response), + ), + 429: HttpResponseError, + 500: HttpResponseError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop( + "content_type", _headers.pop("Content-Type", None) + ) + cls: ClsType[JSON] = kwargs.pop("cls", None) + + content_type = content_type or "application/json" + _json = None + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _json = body + + _request = build_functions_access_key_create_request( + namespace_id=namespace_id, + content_type=content_type, + json=_json, + content=_content, + headers=_headers, + params=_params, + ) + _request.url = self._client.format_url(_request.url) + + _stream = False + pipeline_response: PipelineResponse = ( + await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + ) + + response = pipeline_response.http_response + + if response.status_code not in [201, 400, 404, 409]: + if _stream: + await response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore + raise HttpResponseError(response=response) + + response_headers = {} + if response.status_code == 201: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.content: + deserialized = response.json() + else: + deserialized = None + + if response.status_code == 400: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.content: + deserialized = response.json() + else: + deserialized = None + + if response.status_code == 404: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.content: + deserialized = response.json() + else: + deserialized = None + + if response.status_code == 409: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.content: + deserialized = response.json() + else: + deserialized = None + + if cls: + return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore + + return cast(JSON, deserialized) # type: ignore + + @overload + async def update( + self, + namespace_id: str, + key_id: str, + body: JSON, + *, + content_type: str = "application/json", + **kwargs: Any + ) -> JSON: + # pylint: disable=line-too-long + """Update a Namespace Access Key. + + Updates the name of an access key for a serverless functions namespace. + + To update an access key, send a PUT request to + ``/v2/functions/namespaces/{namespace_id}/keys/{key_id}``. + + :param namespace_id: The ID of the namespace to be managed. Required. + :type namespace_id: str + :param key_id: The ID of the access key to be managed. Required. + :type key_id: str + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: JSON object + :rtype: JSON + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "name": "str" # The new name for the access key. Required. + } + + # response body for status code(s): 200 + response == { + "access_key": { + "created_at": "2020-02-20 00:00:00", # Optional. The date and time + the key was created. + "expires_at": "2020-02-20 00:00:00", # Optional. When the key + expires (null for non-expiring keys). + "id": "str", # Optional. The access key's unique identifier with + prefix 'dof"" *v1*"" '. + "name": "str", # Optional. The access key's name. + "updated_at": "2020-02-20 00:00:00" # Optional. The date and time + the key was last updated. + } + } + # response body for status code(s): 400, 404, 409 + response == { + "id": "str", # A short identifier corresponding to the HTTP status code + returned. For example, the ID for a response returning a 404 status code would + be "not_found.". Required. + "message": "str", # A message providing additional information about the + error, including details to help resolve it when possible. Required. + "request_id": "str" # Optional. Optionally, some endpoints may include a + request ID that should be provided when reporting bugs or opening support + tickets to help identify the issue. + } + """ + + @overload + async def update( + self, + namespace_id: str, + key_id: str, + body: IO[bytes], + *, + content_type: str = "application/json", + **kwargs: Any + ) -> JSON: + # pylint: disable=line-too-long + """Update a Namespace Access Key. + + Updates the name of an access key for a serverless functions namespace. + + To update an access key, send a PUT request to + ``/v2/functions/namespaces/{namespace_id}/keys/{key_id}``. + + :param namespace_id: The ID of the namespace to be managed. Required. + :type namespace_id: str + :param key_id: The ID of the access key to be managed. Required. + :type key_id: str + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: JSON object + :rtype: JSON + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 200 + response == { + "access_key": { + "created_at": "2020-02-20 00:00:00", # Optional. The date and time + the key was created. + "expires_at": "2020-02-20 00:00:00", # Optional. When the key + expires (null for non-expiring keys). + "id": "str", # Optional. The access key's unique identifier with + prefix 'dof"" *v1*"" '. + "name": "str", # Optional. The access key's name. + "updated_at": "2020-02-20 00:00:00" # Optional. The date and time + the key was last updated. + } + } + # response body for status code(s): 400, 404, 409 + response == { + "id": "str", # A short identifier corresponding to the HTTP status code + returned. For example, the ID for a response returning a 404 status code would + be "not_found.". Required. + "message": "str", # A message providing additional information about the + error, including details to help resolve it when possible. Required. + "request_id": "str" # Optional. Optionally, some endpoints may include a + request ID that should be provided when reporting bugs or opening support + tickets to help identify the issue. + } + """ + + @distributed_trace_async + async def update( + self, + namespace_id: str, + key_id: str, + body: Union[JSON, IO[bytes]], + **kwargs: Any + ) -> JSON: + # pylint: disable=line-too-long + """Update a Namespace Access Key. + + Updates the name of an access key for a serverless functions namespace. + + To update an access key, send a PUT request to + ``/v2/functions/namespaces/{namespace_id}/keys/{key_id}``. + + :param namespace_id: The ID of the namespace to be managed. Required. + :type namespace_id: str + :param key_id: The ID of the access key to be managed. Required. + :type key_id: str + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :return: JSON object + :rtype: JSON + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "name": "str" # The new name for the access key. Required. + } + + # response body for status code(s): 200 + response == { + "access_key": { + "created_at": "2020-02-20 00:00:00", # Optional. The date and time + the key was created. + "expires_at": "2020-02-20 00:00:00", # Optional. When the key + expires (null for non-expiring keys). + "id": "str", # Optional. The access key's unique identifier with + prefix 'dof"" *v1*"" '. + "name": "str", # Optional. The access key's name. + "updated_at": "2020-02-20 00:00:00" # Optional. The date and time + the key was last updated. + } + } + # response body for status code(s): 400, 404, 409 + response == { + "id": "str", # A short identifier corresponding to the HTTP status code + returned. For example, the ID for a response returning a 404 status code would + be "not_found.". Required. + "message": "str", # A message providing additional information about the + error, including details to help resolve it when possible. Required. + "request_id": "str" # Optional. Optionally, some endpoints may include a + request ID that should be provided when reporting bugs or opening support + tickets to help identify the issue. + } + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + 401: cast( + Type[HttpResponseError], + lambda response: ClientAuthenticationError(response=response), + ), + 429: HttpResponseError, + 500: HttpResponseError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop( + "content_type", _headers.pop("Content-Type", None) + ) + cls: ClsType[JSON] = kwargs.pop("cls", None) + + content_type = content_type or "application/json" + _json = None + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _json = body + + _request = build_functions_access_key_update_request( + namespace_id=namespace_id, + key_id=key_id, + content_type=content_type, + json=_json, + content=_content, + headers=_headers, + params=_params, + ) + _request.url = self._client.format_url(_request.url) + + _stream = False + pipeline_response: PipelineResponse = ( + await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + ) + + response = pipeline_response.http_response + + if response.status_code not in [200, 400, 404, 409]: + if _stream: + await response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore + raise HttpResponseError(response=response) + + response_headers = {} + if response.status_code == 200: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.content: + deserialized = response.json() + else: + deserialized = None + + if response.status_code == 400: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.content: + deserialized = response.json() + else: + deserialized = None + + if response.status_code == 404: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.content: + deserialized = response.json() + else: + deserialized = None + + if response.status_code == 409: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.content: + deserialized = response.json() + else: + deserialized = None + + if cls: + return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore + + return cast(JSON, deserialized) # type: ignore + + @distributed_trace_async + async def delete(self, namespace_id: str, key_id: str, **kwargs: Any) -> JSON: + # pylint: disable=line-too-long + """Delete a Namespace Access Key. + + Deletes an access key for a serverless functions namespace. + + To delete an access key, send a DELETE request to + ``/v2/functions/namespaces/{namespace_id}/keys/{key_id}``. + + :param namespace_id: The ID of the namespace to be managed. Required. + :type namespace_id: str + :param key_id: The ID of the access key to be managed. Required. + :type key_id: str + :return: JSON or JSON object + :rtype: JSON + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 404 + response == { + "id": "str", # A short identifier corresponding to the HTTP status code + returned. For example, the ID for a response returning a 404 status code would + be "not_found.". Required. + "message": "str", # A message providing additional information about the + error, including details to help resolve it when possible. Required. + "request_id": "str" # Optional. Optionally, some endpoints may include a + request ID that should be provided when reporting bugs or opening support + tickets to help identify the issue. + } + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + 401: cast( + Type[HttpResponseError], + lambda response: ClientAuthenticationError(response=response), + ), + 429: HttpResponseError, + 500: HttpResponseError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[JSON] = kwargs.pop("cls", None) + + _request = build_functions_access_key_delete_request( + namespace_id=namespace_id, + key_id=key_id, + headers=_headers, + params=_params, + ) + _request.url = self._client.format_url(_request.url) + + _stream = False + pipeline_response: PipelineResponse = ( + await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + ) + + response = pipeline_response.http_response + + if response.status_code not in [200, 404]: + if _stream: + await response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore + raise HttpResponseError(response=response) + + response_headers = {} + if response.status_code == 200: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.content: + deserialized = response.json() + else: + deserialized = None + + if response.status_code == 404: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.content: + deserialized = response.json() + else: + deserialized = None + + if cls: + return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore + + return cast(JSON, deserialized) # type: ignore + + class ImagesOperations: """ .. warning:: @@ -169341,88 +178400,1004 @@ async def create( To create your container registry, send a POST request to ``/v2/registry``. - The ``name`` becomes part of the URL for images stored in the registry. For - example, if your registry is called ``example``\\ , an image in it will have the - URL ``registry.digitalocean.com/example/image:tag``. + The ``name`` becomes part of the URL for images stored in the registry. For + example, if your registry is called ``example``\\ , an image in it will have the + URL ``registry.digitalocean.com/example/image:tag``. + + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: JSON object + :rtype: JSON + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 201 + response == { + "registry": { + "created_at": "2020-02-20 00:00:00", # Optional. A time value given + in ISO8601 combined date and time format that represents when the registry + was created. + "name": "str", # Optional. A globally unique name for the container + registry. Must be lowercase and be composed only of numbers, letters and + ``-``"" , up to a limit of 63 characters. + "region": "str", # Optional. Slug of the region where registry data + is stored. + "storage_usage_bytes": 0, # Optional. The amount of storage used in + the registry in bytes. + "storage_usage_bytes_updated_at": "2020-02-20 00:00:00", # Optional. + The time at which the storage usage was updated. Storage usage is calculated + asynchronously, and may not immediately reflect pushes to the registry. + "subscription": { + "created_at": "2020-02-20 00:00:00", # Optional. The time at + which the subscription was created. + "tier": { + "allow_storage_overage": bool, # Optional. A boolean + indicating whether the subscription tier supports additional storage + above what is included in the base plan at an additional cost per GiB + used. + "included_bandwidth_bytes": 0, # Optional. The + amount of outbound data transfer included in the subscription tier in + bytes. + "included_repositories": 0, # Optional. The number + of repositories included in the subscription tier. ``0`` indicates + that the subscription tier includes unlimited repositories. + "included_storage_bytes": 0, # Optional. The amount + of storage included in the subscription tier in bytes. + "monthly_price_in_cents": 0, # Optional. The monthly + cost of the subscription tier in cents. + "name": "str", # Optional. The name of the + subscription tier. + "slug": "str", # Optional. The slug identifier of + the subscription tier. + "storage_overage_price_in_cents": 0 # Optional. The + price paid in cents per GiB for additional storage beyond what is + included in the subscription plan. + }, + "updated_at": "2020-02-20 00:00:00" # Optional. The time at + which the subscription was last updated. + } + } + } + """ + + @distributed_trace_async + async def create(self, body: Union[JSON, IO[bytes]], **kwargs: Any) -> JSON: + # pylint: disable=line-too-long + """Create Container Registry. + + **Note: This endpoint is deprecated. Please use the ``/v2/registries`` endpoint instead.** + + To create your container registry, send a POST request to ``/v2/registry``. + + The ``name`` becomes part of the URL for images stored in the registry. For + example, if your registry is called ``example``\\ , an image in it will have the + URL ``registry.digitalocean.com/example/image:tag``. + + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :return: JSON object + :rtype: JSON + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "name": "str", # A globally unique name for the container registry. Must be + lowercase and be composed only of numbers, letters and ``-``"" , up to a limit of + 63 characters. Required. + "subscription_tier_slug": "str", # The slug of the subscription tier to sign + up for. Valid values can be retrieved using the options endpoint. Required. Known + values are: "starter", "basic", and "professional". + "region": "str" # Optional. Slug of the region where registry data is + stored. When not provided, a region will be selected. Known values are: "nyc3", + "sfo3", "ams3", "sgp1", and "fra1". + } + + # response body for status code(s): 201 + response == { + "registry": { + "created_at": "2020-02-20 00:00:00", # Optional. A time value given + in ISO8601 combined date and time format that represents when the registry + was created. + "name": "str", # Optional. A globally unique name for the container + registry. Must be lowercase and be composed only of numbers, letters and + ``-``"" , up to a limit of 63 characters. + "region": "str", # Optional. Slug of the region where registry data + is stored. + "storage_usage_bytes": 0, # Optional. The amount of storage used in + the registry in bytes. + "storage_usage_bytes_updated_at": "2020-02-20 00:00:00", # Optional. + The time at which the storage usage was updated. Storage usage is calculated + asynchronously, and may not immediately reflect pushes to the registry. + "subscription": { + "created_at": "2020-02-20 00:00:00", # Optional. The time at + which the subscription was created. + "tier": { + "allow_storage_overage": bool, # Optional. A boolean + indicating whether the subscription tier supports additional storage + above what is included in the base plan at an additional cost per GiB + used. + "included_bandwidth_bytes": 0, # Optional. The + amount of outbound data transfer included in the subscription tier in + bytes. + "included_repositories": 0, # Optional. The number + of repositories included in the subscription tier. ``0`` indicates + that the subscription tier includes unlimited repositories. + "included_storage_bytes": 0, # Optional. The amount + of storage included in the subscription tier in bytes. + "monthly_price_in_cents": 0, # Optional. The monthly + cost of the subscription tier in cents. + "name": "str", # Optional. The name of the + subscription tier. + "slug": "str", # Optional. The slug identifier of + the subscription tier. + "storage_overage_price_in_cents": 0 # Optional. The + price paid in cents per GiB for additional storage beyond what is + included in the subscription plan. + }, + "updated_at": "2020-02-20 00:00:00" # Optional. The time at + which the subscription was last updated. + } + } + } + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + 401: cast( + Type[HttpResponseError], + lambda response: ClientAuthenticationError(response=response), + ), + 429: HttpResponseError, + 500: HttpResponseError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop( + "content_type", _headers.pop("Content-Type", None) + ) + cls: ClsType[JSON] = kwargs.pop("cls", None) + + content_type = content_type or "application/json" + _json = None + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _json = body + + _request = build_registry_create_request( + content_type=content_type, + json=_json, + content=_content, + headers=_headers, + params=_params, + ) + _request.url = self._client.format_url(_request.url) + + _stream = False + pipeline_response: PipelineResponse = ( + await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + ) + + response = pipeline_response.http_response + + if response.status_code not in [201]: + if _stream: + await response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore + raise HttpResponseError(response=response) + + response_headers = {} + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.content: + deserialized = response.json() + else: + deserialized = None + + if cls: + return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore + + return cast(JSON, deserialized) # type: ignore + + @distributed_trace_async + async def delete(self, **kwargs: Any) -> Optional[JSON]: + # pylint: disable=line-too-long + """Delete Container Registry. + + **Note: This endpoint is deprecated. Please use the ``/v2/registries`` endpoint instead.** + + To delete your container registry, destroying all container image + data stored in it, send a DELETE request to ``/v2/registry``. + + This operation is not compatible with multiple registries in a DO account. You should use + ``/v2/registries/{registry_name}`` instead. + + :return: JSON object or None + :rtype: JSON or None + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 404, 412 + response == { + "id": "str", # A short identifier corresponding to the HTTP status code + returned. For example, the ID for a response returning a 404 status code would + be "not_found.". Required. + "message": "str", # A message providing additional information about the + error, including details to help resolve it when possible. Required. + "request_id": "str" # Optional. Optionally, some endpoints may include a + request ID that should be provided when reporting bugs or opening support + tickets to help identify the issue. + } + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + 401: cast( + Type[HttpResponseError], + lambda response: ClientAuthenticationError(response=response), + ), + 429: HttpResponseError, + 500: HttpResponseError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[Optional[JSON]] = kwargs.pop("cls", None) + + _request = build_registry_delete_request( + headers=_headers, + params=_params, + ) + _request.url = self._client.format_url(_request.url) + + _stream = False + pipeline_response: PipelineResponse = ( + await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + ) + + response = pipeline_response.http_response + + if response.status_code not in [204, 404, 412]: + if _stream: + await response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore + raise HttpResponseError(response=response) + + deserialized = None + response_headers = {} + if response.status_code == 204: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.status_code == 404: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.content: + deserialized = response.json() + else: + deserialized = None + + if response.status_code == 412: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.content: + deserialized = response.json() + else: + deserialized = None + + if cls: + return cls(pipeline_response, deserialized, response_headers) # type: ignore + + return deserialized # type: ignore + + @distributed_trace_async + async def get_subscription(self, **kwargs: Any) -> JSON: + # pylint: disable=line-too-long + """Get Subscription. + + **Note: This endpoint is deprecated. Please use the ``/v2/registries`` endpoint instead.** + + A subscription is automatically created when you configure your + container registry. To get information about your subscription, send a GET + request to ``/v2/registry/subscription``. + + :return: JSON object + :rtype: JSON + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 200 + response == { + "subscription": { + "created_at": "2020-02-20 00:00:00", # Optional. The time at which + the subscription was created. + "tier": { + "allow_storage_overage": bool, # Optional. A boolean + indicating whether the subscription tier supports additional storage + above what is included in the base plan at an additional cost per GiB + used. + "included_bandwidth_bytes": 0, # Optional. The amount of + outbound data transfer included in the subscription tier in bytes. + "included_repositories": 0, # Optional. The number of + repositories included in the subscription tier. ``0`` indicates that the + subscription tier includes unlimited repositories. + "included_storage_bytes": 0, # Optional. The amount of + storage included in the subscription tier in bytes. + "monthly_price_in_cents": 0, # Optional. The monthly cost of + the subscription tier in cents. + "name": "str", # Optional. The name of the subscription + tier. + "slug": "str", # Optional. The slug identifier of the + subscription tier. + "storage_overage_price_in_cents": 0 # Optional. The price + paid in cents per GiB for additional storage beyond what is included in + the subscription plan. + }, + "updated_at": "2020-02-20 00:00:00" # Optional. The time at which + the subscription was last updated. + } + } + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + 401: cast( + Type[HttpResponseError], + lambda response: ClientAuthenticationError(response=response), + ), + 429: HttpResponseError, + 500: HttpResponseError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[JSON] = kwargs.pop("cls", None) + + _request = build_registry_get_subscription_request( + headers=_headers, + params=_params, + ) + _request.url = self._client.format_url(_request.url) + + _stream = False + pipeline_response: PipelineResponse = ( + await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + await response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore + raise HttpResponseError(response=response) + + response_headers = {} + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.content: + deserialized = response.json() + else: + deserialized = None + + if cls: + return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore + + return cast(JSON, deserialized) # type: ignore + + @overload + async def update_subscription( + self, + body: Optional[JSON] = None, + *, + content_type: str = "application/json", + **kwargs: Any + ) -> JSON: + # pylint: disable=line-too-long + """Update Subscription Tier. + + **Note: This endpoint is deprecated. Please use the ``/v2/registries`` endpoint instead.** + + After creating your registry, you can switch to a different + subscription tier to better suit your needs. To do this, send a POST request + to ``/v2/registry/subscription``. + + :param body: Default value is None. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: JSON object + :rtype: JSON + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "tier_slug": "str" # Optional. The slug of the subscription tier to sign up + for. Known values are: "starter", "basic", and "professional". + } + + # response body for status code(s): 200 + response == { + "subscription": { + "created_at": "2020-02-20 00:00:00", # Optional. The time at which + the subscription was created. + "tier": { + "allow_storage_overage": bool, # Optional. A boolean + indicating whether the subscription tier supports additional storage + above what is included in the base plan at an additional cost per GiB + used. + "included_bandwidth_bytes": 0, # Optional. The amount of + outbound data transfer included in the subscription tier in bytes. + "included_repositories": 0, # Optional. The number of + repositories included in the subscription tier. ``0`` indicates that the + subscription tier includes unlimited repositories. + "included_storage_bytes": 0, # Optional. The amount of + storage included in the subscription tier in bytes. + "monthly_price_in_cents": 0, # Optional. The monthly cost of + the subscription tier in cents. + "name": "str", # Optional. The name of the subscription + tier. + "slug": "str", # Optional. The slug identifier of the + subscription tier. + "storage_overage_price_in_cents": 0 # Optional. The price + paid in cents per GiB for additional storage beyond what is included in + the subscription plan. + }, + "updated_at": "2020-02-20 00:00:00" # Optional. The time at which + the subscription was last updated. + } + } + # response body for status code(s): 412 + response == { + "id": "str", # A short identifier corresponding to the HTTP status code + returned. For example, the ID for a response returning a 404 status code would + be "not_found.". Required. + "message": "str", # A message providing additional information about the + error, including details to help resolve it when possible. Required. + "request_id": "str" # Optional. Optionally, some endpoints may include a + request ID that should be provided when reporting bugs or opening support + tickets to help identify the issue. + } + """ + + @overload + async def update_subscription( + self, + body: Optional[IO[bytes]] = None, + *, + content_type: str = "application/json", + **kwargs: Any + ) -> JSON: + # pylint: disable=line-too-long + """Update Subscription Tier. + + **Note: This endpoint is deprecated. Please use the ``/v2/registries`` endpoint instead.** + + After creating your registry, you can switch to a different + subscription tier to better suit your needs. To do this, send a POST request + to ``/v2/registry/subscription``. + + :param body: Default value is None. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: JSON object + :rtype: JSON + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 200 + response == { + "subscription": { + "created_at": "2020-02-20 00:00:00", # Optional. The time at which + the subscription was created. + "tier": { + "allow_storage_overage": bool, # Optional. A boolean + indicating whether the subscription tier supports additional storage + above what is included in the base plan at an additional cost per GiB + used. + "included_bandwidth_bytes": 0, # Optional. The amount of + outbound data transfer included in the subscription tier in bytes. + "included_repositories": 0, # Optional. The number of + repositories included in the subscription tier. ``0`` indicates that the + subscription tier includes unlimited repositories. + "included_storage_bytes": 0, # Optional. The amount of + storage included in the subscription tier in bytes. + "monthly_price_in_cents": 0, # Optional. The monthly cost of + the subscription tier in cents. + "name": "str", # Optional. The name of the subscription + tier. + "slug": "str", # Optional. The slug identifier of the + subscription tier. + "storage_overage_price_in_cents": 0 # Optional. The price + paid in cents per GiB for additional storage beyond what is included in + the subscription plan. + }, + "updated_at": "2020-02-20 00:00:00" # Optional. The time at which + the subscription was last updated. + } + } + # response body for status code(s): 412 + response == { + "id": "str", # A short identifier corresponding to the HTTP status code + returned. For example, the ID for a response returning a 404 status code would + be "not_found.". Required. + "message": "str", # A message providing additional information about the + error, including details to help resolve it when possible. Required. + "request_id": "str" # Optional. Optionally, some endpoints may include a + request ID that should be provided when reporting bugs or opening support + tickets to help identify the issue. + } + """ + + @distributed_trace_async + async def update_subscription( + self, body: Optional[Union[JSON, IO[bytes]]] = None, **kwargs: Any + ) -> JSON: + # pylint: disable=line-too-long + """Update Subscription Tier. + + **Note: This endpoint is deprecated. Please use the ``/v2/registries`` endpoint instead.** + + After creating your registry, you can switch to a different + subscription tier to better suit your needs. To do this, send a POST request + to ``/v2/registry/subscription``. + + :param body: Is either a JSON type or a IO[bytes] type. Default value is None. + :type body: JSON or IO[bytes] + :return: JSON object + :rtype: JSON + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "tier_slug": "str" # Optional. The slug of the subscription tier to sign up + for. Known values are: "starter", "basic", and "professional". + } + + # response body for status code(s): 200 + response == { + "subscription": { + "created_at": "2020-02-20 00:00:00", # Optional. The time at which + the subscription was created. + "tier": { + "allow_storage_overage": bool, # Optional. A boolean + indicating whether the subscription tier supports additional storage + above what is included in the base plan at an additional cost per GiB + used. + "included_bandwidth_bytes": 0, # Optional. The amount of + outbound data transfer included in the subscription tier in bytes. + "included_repositories": 0, # Optional. The number of + repositories included in the subscription tier. ``0`` indicates that the + subscription tier includes unlimited repositories. + "included_storage_bytes": 0, # Optional. The amount of + storage included in the subscription tier in bytes. + "monthly_price_in_cents": 0, # Optional. The monthly cost of + the subscription tier in cents. + "name": "str", # Optional. The name of the subscription + tier. + "slug": "str", # Optional. The slug identifier of the + subscription tier. + "storage_overage_price_in_cents": 0 # Optional. The price + paid in cents per GiB for additional storage beyond what is included in + the subscription plan. + }, + "updated_at": "2020-02-20 00:00:00" # Optional. The time at which + the subscription was last updated. + } + } + # response body for status code(s): 412 + response == { + "id": "str", # A short identifier corresponding to the HTTP status code + returned. For example, the ID for a response returning a 404 status code would + be "not_found.". Required. + "message": "str", # A message providing additional information about the + error, including details to help resolve it when possible. Required. + "request_id": "str" # Optional. Optionally, some endpoints may include a + request ID that should be provided when reporting bugs or opening support + tickets to help identify the issue. + } + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + 401: cast( + Type[HttpResponseError], + lambda response: ClientAuthenticationError(response=response), + ), + 429: HttpResponseError, + 500: HttpResponseError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop( + "content_type", _headers.pop("Content-Type", None) + ) + cls: ClsType[JSON] = kwargs.pop("cls", None) + + content_type = content_type or "application/json" + _json = None + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + if body is not None: + _json = body + else: + _json = None + + _request = build_registry_update_subscription_request( + content_type=content_type, + json=_json, + content=_content, + headers=_headers, + params=_params, + ) + _request.url = self._client.format_url(_request.url) + + _stream = False + pipeline_response: PipelineResponse = ( + await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + ) + + response = pipeline_response.http_response + + if response.status_code not in [200, 412]: + if _stream: + await response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore + raise HttpResponseError(response=response) + + response_headers = {} + if response.status_code == 200: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.content: + deserialized = response.json() + else: + deserialized = None + + if response.status_code == 412: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.content: + deserialized = response.json() + else: + deserialized = None + + if cls: + return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore + + return cast(JSON, deserialized) # type: ignore + + @distributed_trace_async + async def get_docker_credentials( + self, *, expiry_seconds: int = 0, read_write: bool = False, **kwargs: Any + ) -> JSON: + """Get Docker Credentials for Container Registry. + + **Note: This endpoint is deprecated. Please use the ``/v2/registries`` endpoint instead.** + + In order to access your container registry with the Docker client or from a + Kubernetes cluster, you will need to configure authentication. The necessary + JSON configuration can be retrieved by sending a GET request to + ``/v2/registry/docker-credentials``. + + The response will be in the format of a Docker ``config.json`` file. To use the + config in your Kubernetes cluster, create a Secret with: + + .. code-block:: + + kubectl create secret generic docr \\ + --from-file=.dockerconfigjson=config.json \\ + --type=kubernetes.io/dockerconfigjson + + + By default, the returned credentials have read-only access to your registry + and cannot be used to push images. This is appropriate for most Kubernetes + clusters. To retrieve read/write credentials, suitable for use with the Docker + client or in a CI system, read_write may be provided as query parameter. For + example: ``/v2/registry/docker-credentials?read_write=true`` + + By default, the returned credentials will not expire. To retrieve credentials + with an expiry set, expiry_seconds may be provided as a query parameter. For + example: ``/v2/registry/docker-credentials?expiry_seconds=3600`` will return + credentials that expire after one hour. + + :keyword expiry_seconds: The duration in seconds that the returned registry credentials will be + valid. If not set or 0, the credentials will not expire. Default value is 0. + :paramtype expiry_seconds: int + :keyword read_write: By default, the registry credentials allow for read-only access. Set this + query parameter to ``true`` to obtain read-write credentials. Default value is False. + :paramtype read_write: bool + :return: JSON object + :rtype: JSON + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 200 + response == { + "auths": { + "registry.digitalocean.com": { + "auth": "str" # Optional. A base64 encoded string containing + credentials for the container registry. + } + } + } + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + 401: cast( + Type[HttpResponseError], + lambda response: ClientAuthenticationError(response=response), + ), + 429: HttpResponseError, + 500: HttpResponseError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[JSON] = kwargs.pop("cls", None) + + _request = build_registry_get_docker_credentials_request( + expiry_seconds=expiry_seconds, + read_write=read_write, + headers=_headers, + params=_params, + ) + _request.url = self._client.format_url(_request.url) + + _stream = False + pipeline_response: PipelineResponse = ( + await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + await response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore + raise HttpResponseError(response=response) + + response_headers = {} + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.content: + deserialized = response.json() + else: + deserialized = None + + if cls: + return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore + + return cast(JSON, deserialized) # type: ignore + + @overload + async def validate_name( + self, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> Optional[JSON]: + # pylint: disable=line-too-long + """Validate a Container Registry Name. + + **Note: This endpoint is deprecated. Please use the ``/v2/registries`` endpoint instead.** + + To validate that a container registry name is available for use, send a POST + request to ``/v2/registry/validate-name``. + + If the name is both formatted correctly and available, the response code will + be 204 and contain no body. If the name is already in use, the response will + be a 409 Conflict. + + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: JSON object or None + :rtype: JSON or None + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "name": "str" # A globally unique name for the container registry. Must be + lowercase and be composed only of numbers, letters and ``-``"" , up to a limit of + 63 characters. Required. + } + + # response body for status code(s): 409 + response == { + "id": "str", # A short identifier corresponding to the HTTP status code + returned. For example, the ID for a response returning a 404 status code would + be "not_found.". Required. + "message": "str", # A message providing additional information about the + error, including details to help resolve it when possible. Required. + "request_id": "str" # Optional. Optionally, some endpoints may include a + request ID that should be provided when reporting bugs or opening support + tickets to help identify the issue. + } + """ + + @overload + async def validate_name( + self, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> Optional[JSON]: + # pylint: disable=line-too-long + """Validate a Container Registry Name. + + **Note: This endpoint is deprecated. Please use the ``/v2/registries`` endpoint instead.** + + To validate that a container registry name is available for use, send a POST + request to ``/v2/registry/validate-name``. + + If the name is both formatted correctly and available, the response code will + be 204 and contain no body. If the name is already in use, the response will + be a 409 Conflict. :param body: Required. :type body: IO[bytes] :keyword content_type: Body Parameter content-type. Content type parameter for binary body. Default value is "application/json". :paramtype content_type: str - :return: JSON object - :rtype: JSON + :return: JSON object or None + :rtype: JSON or None :raises ~azure.core.exceptions.HttpResponseError: Example: .. code-block:: python - # response body for status code(s): 201 + # response body for status code(s): 409 response == { - "registry": { - "created_at": "2020-02-20 00:00:00", # Optional. A time value given - in ISO8601 combined date and time format that represents when the registry - was created. - "name": "str", # Optional. A globally unique name for the container - registry. Must be lowercase and be composed only of numbers, letters and - ``-``"" , up to a limit of 63 characters. - "region": "str", # Optional. Slug of the region where registry data - is stored. - "storage_usage_bytes": 0, # Optional. The amount of storage used in - the registry in bytes. - "storage_usage_bytes_updated_at": "2020-02-20 00:00:00", # Optional. - The time at which the storage usage was updated. Storage usage is calculated - asynchronously, and may not immediately reflect pushes to the registry. - "subscription": { - "created_at": "2020-02-20 00:00:00", # Optional. The time at - which the subscription was created. - "tier": { - "allow_storage_overage": bool, # Optional. A boolean - indicating whether the subscription tier supports additional storage - above what is included in the base plan at an additional cost per GiB - used. - "included_bandwidth_bytes": 0, # Optional. The - amount of outbound data transfer included in the subscription tier in - bytes. - "included_repositories": 0, # Optional. The number - of repositories included in the subscription tier. ``0`` indicates - that the subscription tier includes unlimited repositories. - "included_storage_bytes": 0, # Optional. The amount - of storage included in the subscription tier in bytes. - "monthly_price_in_cents": 0, # Optional. The monthly - cost of the subscription tier in cents. - "name": "str", # Optional. The name of the - subscription tier. - "slug": "str", # Optional. The slug identifier of - the subscription tier. - "storage_overage_price_in_cents": 0 # Optional. The - price paid in cents per GiB for additional storage beyond what is - included in the subscription plan. - }, - "updated_at": "2020-02-20 00:00:00" # Optional. The time at - which the subscription was last updated. - } - } + "id": "str", # A short identifier corresponding to the HTTP status code + returned. For example, the ID for a response returning a 404 status code would + be "not_found.". Required. + "message": "str", # A message providing additional information about the + error, including details to help resolve it when possible. Required. + "request_id": "str" # Optional. Optionally, some endpoints may include a + request ID that should be provided when reporting bugs or opening support + tickets to help identify the issue. } """ @distributed_trace_async - async def create(self, body: Union[JSON, IO[bytes]], **kwargs: Any) -> JSON: + async def validate_name( + self, body: Union[JSON, IO[bytes]], **kwargs: Any + ) -> Optional[JSON]: # pylint: disable=line-too-long - """Create Container Registry. + """Validate a Container Registry Name. **Note: This endpoint is deprecated. Please use the ``/v2/registries`` endpoint instead.** - To create your container registry, send a POST request to ``/v2/registry``. + To validate that a container registry name is available for use, send a POST + request to ``/v2/registry/validate-name``. - The ``name`` becomes part of the URL for images stored in the registry. For - example, if your registry is called ``example``\\ , an image in it will have the - URL ``registry.digitalocean.com/example/image:tag``. + If the name is both formatted correctly and available, the response code will + be 204 and contain no body. If the name is already in use, the response will + be a 409 Conflict. :param body: Is either a JSON type or a IO[bytes] type. Required. :type body: JSON or IO[bytes] - :return: JSON object - :rtype: JSON + :return: JSON object or None + :rtype: JSON or None :raises ~azure.core.exceptions.HttpResponseError: Example: @@ -169430,63 +179405,21 @@ async def create(self, body: Union[JSON, IO[bytes]], **kwargs: Any) -> JSON: # JSON input template you can fill out and use as your body input. body = { - "name": "str", # A globally unique name for the container registry. Must be + "name": "str" # A globally unique name for the container registry. Must be lowercase and be composed only of numbers, letters and ``-``"" , up to a limit of 63 characters. Required. - "subscription_tier_slug": "str", # The slug of the subscription tier to sign - up for. Valid values can be retrieved using the options endpoint. Required. Known - values are: "starter", "basic", and "professional". - "region": "str" # Optional. Slug of the region where registry data is - stored. When not provided, a region will be selected. Known values are: "nyc3", - "sfo3", "ams3", "sgp1", and "fra1". } - # response body for status code(s): 201 + # response body for status code(s): 409 response == { - "registry": { - "created_at": "2020-02-20 00:00:00", # Optional. A time value given - in ISO8601 combined date and time format that represents when the registry - was created. - "name": "str", # Optional. A globally unique name for the container - registry. Must be lowercase and be composed only of numbers, letters and - ``-``"" , up to a limit of 63 characters. - "region": "str", # Optional. Slug of the region where registry data - is stored. - "storage_usage_bytes": 0, # Optional. The amount of storage used in - the registry in bytes. - "storage_usage_bytes_updated_at": "2020-02-20 00:00:00", # Optional. - The time at which the storage usage was updated. Storage usage is calculated - asynchronously, and may not immediately reflect pushes to the registry. - "subscription": { - "created_at": "2020-02-20 00:00:00", # Optional. The time at - which the subscription was created. - "tier": { - "allow_storage_overage": bool, # Optional. A boolean - indicating whether the subscription tier supports additional storage - above what is included in the base plan at an additional cost per GiB - used. - "included_bandwidth_bytes": 0, # Optional. The - amount of outbound data transfer included in the subscription tier in - bytes. - "included_repositories": 0, # Optional. The number - of repositories included in the subscription tier. ``0`` indicates - that the subscription tier includes unlimited repositories. - "included_storage_bytes": 0, # Optional. The amount - of storage included in the subscription tier in bytes. - "monthly_price_in_cents": 0, # Optional. The monthly - cost of the subscription tier in cents. - "name": "str", # Optional. The name of the - subscription tier. - "slug": "str", # Optional. The slug identifier of - the subscription tier. - "storage_overage_price_in_cents": 0 # Optional. The - price paid in cents per GiB for additional storage beyond what is - included in the subscription plan. - }, - "updated_at": "2020-02-20 00:00:00" # Optional. The time at - which the subscription was last updated. - } - } + "id": "str", # A short identifier corresponding to the HTTP status code + returned. For example, the ID for a response returning a 404 status code would + be "not_found.". Required. + "message": "str", # A message providing additional information about the + error, including details to help resolve it when possible. Required. + "request_id": "str" # Optional. Optionally, some endpoints may include a + request ID that should be provided when reporting bugs or opening support + tickets to help identify the issue. } """ error_map: MutableMapping[int, Type[HttpResponseError]] = { @@ -169508,7 +179441,7 @@ async def create(self, body: Union[JSON, IO[bytes]], **kwargs: Any) -> JSON: content_type: Optional[str] = kwargs.pop( "content_type", _headers.pop("Content-Type", None) ) - cls: ClsType[JSON] = kwargs.pop("cls", None) + cls: ClsType[Optional[JSON]] = kwargs.pop("cls", None) content_type = content_type or "application/json" _json = None @@ -169518,7 +179451,7 @@ async def create(self, body: Union[JSON, IO[bytes]], **kwargs: Any) -> JSON: else: _json = body - _request = build_registry_create_request( + _request = build_registry_validate_name_request( content_type=content_type, json=_json, content=_content, @@ -169536,54 +179469,109 @@ async def create(self, body: Union[JSON, IO[bytes]], **kwargs: Any) -> JSON: response = pipeline_response.http_response - if response.status_code not in [201]: + if response.status_code not in [204, 409]: if _stream: await response.read() # Load the body in memory and close the socket map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore raise HttpResponseError(response=response) + deserialized = None response_headers = {} - response_headers["ratelimit-limit"] = self._deserialize( - "int", response.headers.get("ratelimit-limit") - ) - response_headers["ratelimit-remaining"] = self._deserialize( - "int", response.headers.get("ratelimit-remaining") - ) - response_headers["ratelimit-reset"] = self._deserialize( - "int", response.headers.get("ratelimit-reset") - ) + if response.status_code == 204: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) - if response.content: - deserialized = response.json() - else: - deserialized = None + if response.status_code == 409: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.content: + deserialized = response.json() + else: + deserialized = None if cls: - return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore + return cls(pipeline_response, deserialized, response_headers) # type: ignore - return cast(JSON, deserialized) # type: ignore + return deserialized # type: ignore @distributed_trace_async - async def delete(self, **kwargs: Any) -> Optional[JSON]: + async def list_repositories( + self, registry_name: str, *, per_page: int = 20, page: int = 1, **kwargs: Any + ) -> JSON: # pylint: disable=line-too-long - """Delete Container Registry. + """List All Container Registry Repositories. **Note: This endpoint is deprecated. Please use the ``/v2/registries`` endpoint instead.** - To delete your container registry, destroying all container image - data stored in it, send a DELETE request to ``/v2/registry``. + This endpoint has been deprecated in favor of the *List All Container Registry Repositories + [V2]* endpoint. - This operation is not compatible with multiple registries in a DO account. You should use - ``/v2/registries/{registry_name}`` instead. + To list all repositories in your container registry, send a GET + request to ``/v2/registry/$REGISTRY_NAME/repositories``. - :return: JSON object or None - :rtype: JSON or None + :param registry_name: The name of a container registry. Required. + :type registry_name: str + :keyword per_page: Number of items returned per page. Default value is 20. + :paramtype per_page: int + :keyword page: Which 'page' of paginated results to return. Default value is 1. + :paramtype page: int + :return: JSON object + :rtype: JSON :raises ~azure.core.exceptions.HttpResponseError: Example: .. code-block:: python - # response body for status code(s): 404, 412 + # response body for status code(s): 200 + response == { + "meta": { + "total": 0 # Optional. Number of objects returned by the request. + }, + "links": { + "pages": {} + }, + "repositories": [ + { + "latest_tag": { + "compressed_size_bytes": 0, # Optional. The + compressed size of the tag in bytes. + "manifest_digest": "str", # Optional. The digest of + the manifest associated with the tag. + "registry_name": "str", # Optional. The name of the + container registry. + "repository": "str", # Optional. The name of the + repository. + "size_bytes": 0, # Optional. The uncompressed size + of the tag in bytes (this size is calculated asynchronously so it may + not be immediately available). + "tag": "str", # Optional. The name of the tag. + "updated_at": "2020-02-20 00:00:00" # Optional. The + time the tag was last updated. + }, + "name": "str", # Optional. The name of the repository. + "registry_name": "str", # Optional. The name of the + container registry. + "tag_count": 0 # Optional. The number of tags in the + repository. + } + ] + } + # response body for status code(s): 404 response == { "id": "str", # A short identifier corresponding to the HTTP status code returned. For example, the ID for a response returning a 404 status code would @@ -169611,9 +179599,12 @@ async def delete(self, **kwargs: Any) -> Optional[JSON]: _headers = kwargs.pop("headers", {}) or {} _params = kwargs.pop("params", {}) or {} - cls: ClsType[Optional[JSON]] = kwargs.pop("cls", None) + cls: ClsType[JSON] = kwargs.pop("cls", None) - _request = build_registry_delete_request( + _request = build_registry_list_repositories_request( + registry_name=registry_name, + per_page=per_page, + page=page, headers=_headers, params=_params, ) @@ -169628,26 +179619,14 @@ async def delete(self, **kwargs: Any) -> Optional[JSON]: response = pipeline_response.http_response - if response.status_code not in [204, 404, 412]: + if response.status_code not in [200, 404]: if _stream: await response.read() # Load the body in memory and close the socket map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore raise HttpResponseError(response=response) - deserialized = None response_headers = {} - if response.status_code == 204: - response_headers["ratelimit-limit"] = self._deserialize( - "int", response.headers.get("ratelimit-limit") - ) - response_headers["ratelimit-remaining"] = self._deserialize( - "int", response.headers.get("ratelimit-remaining") - ) - response_headers["ratelimit-reset"] = self._deserialize( - "int", response.headers.get("ratelimit-reset") - ) - - if response.status_code == 404: + if response.status_code == 200: response_headers["ratelimit-limit"] = self._deserialize( "int", response.headers.get("ratelimit-limit") ) @@ -169663,7 +179642,7 @@ async def delete(self, **kwargs: Any) -> Optional[JSON]: else: deserialized = None - if response.status_code == 412: + if response.status_code == 404: response_headers["ratelimit-limit"] = self._deserialize( "int", response.headers.get("ratelimit-limit") ) @@ -169680,21 +179659,38 @@ async def delete(self, **kwargs: Any) -> Optional[JSON]: deserialized = None if cls: - return cls(pipeline_response, deserialized, response_headers) # type: ignore + return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore - return deserialized # type: ignore + return cast(JSON, deserialized) # type: ignore @distributed_trace_async - async def get_subscription(self, **kwargs: Any) -> JSON: + async def list_repositories_v2( + self, + registry_name: str, + *, + per_page: int = 20, + page: int = 1, + page_token: Optional[str] = None, + **kwargs: Any + ) -> JSON: # pylint: disable=line-too-long - """Get Subscription. + """List All Container Registry Repositories (V2). **Note: This endpoint is deprecated. Please use the ``/v2/registries`` endpoint instead.** - A subscription is automatically created when you configure your - container registry. To get information about your subscription, send a GET - request to ``/v2/registry/subscription``. + To list all repositories in your container registry, send a GET + request to ``/v2/registry/$REGISTRY_NAME/repositoriesV2``. + :param registry_name: The name of a container registry. Required. + :type registry_name: str + :keyword per_page: Number of items returned per page. Default value is 20. + :paramtype per_page: int + :keyword page: Which 'page' of paginated results to return. Ignored when 'page_token' is + provided. Default value is 1. + :paramtype page: int + :keyword page_token: Token to retrieve of the next or previous set of results more quickly than + using 'page'. Default value is None. + :paramtype page_token: str :return: JSON object :rtype: JSON :raises ~azure.core.exceptions.HttpResponseError: @@ -169704,34 +179700,60 @@ async def get_subscription(self, **kwargs: Any) -> JSON: # response body for status code(s): 200 response == { - "subscription": { - "created_at": "2020-02-20 00:00:00", # Optional. The time at which - the subscription was created. - "tier": { - "allow_storage_overage": bool, # Optional. A boolean - indicating whether the subscription tier supports additional storage - above what is included in the base plan at an additional cost per GiB - used. - "included_bandwidth_bytes": 0, # Optional. The amount of - outbound data transfer included in the subscription tier in bytes. - "included_repositories": 0, # Optional. The number of - repositories included in the subscription tier. ``0`` indicates that the - subscription tier includes unlimited repositories. - "included_storage_bytes": 0, # Optional. The amount of - storage included in the subscription tier in bytes. - "monthly_price_in_cents": 0, # Optional. The monthly cost of - the subscription tier in cents. - "name": "str", # Optional. The name of the subscription - tier. - "slug": "str", # Optional. The slug identifier of the - subscription tier. - "storage_overage_price_in_cents": 0 # Optional. The price - paid in cents per GiB for additional storage beyond what is included in - the subscription plan. - }, - "updated_at": "2020-02-20 00:00:00" # Optional. The time at which - the subscription was last updated. - } + "meta": { + "total": 0 # Optional. Number of objects returned by the request. + }, + "links": { + "pages": {} + }, + "repositories": [ + { + "latest_manifest": { + "blobs": [ + { + "compressed_size_bytes": 0, # + Optional. The compressed size of the blob in bytes. + "digest": "str" # Optional. The + digest of the blob. + } + ], + "compressed_size_bytes": 0, # Optional. The + compressed size of the manifest in bytes. + "digest": "str", # Optional. The manifest digest. + "registry_name": "str", # Optional. The name of the + container registry. + "repository": "str", # Optional. The name of the + repository. + "size_bytes": 0, # Optional. The uncompressed size + of the manifest in bytes (this size is calculated asynchronously so + it may not be immediately available). + "tags": [ + "str" # Optional. All tags associated with + this manifest. + ], + "updated_at": "2020-02-20 00:00:00" # Optional. The + time the manifest was last updated. + }, + "manifest_count": 0, # Optional. The number of manifests in + the repository. + "name": "str", # Optional. The name of the repository. + "registry_name": "str", # Optional. The name of the + container registry. + "tag_count": 0 # Optional. The number of tags in the + repository. + } + ] + } + # response body for status code(s): 400, 404 + response == { + "id": "str", # A short identifier corresponding to the HTTP status code + returned. For example, the ID for a response returning a 404 status code would + be "not_found.". Required. + "message": "str", # A message providing additional information about the + error, including details to help resolve it when possible. Required. + "request_id": "str" # Optional. Optionally, some endpoints may include a + request ID that should be provided when reporting bugs or opening support + tickets to help identify the issue. } """ error_map: MutableMapping[int, Type[HttpResponseError]] = { @@ -169752,7 +179774,11 @@ async def get_subscription(self, **kwargs: Any) -> JSON: cls: ClsType[JSON] = kwargs.pop("cls", None) - _request = build_registry_get_subscription_request( + _request = build_registry_list_repositories_v2_request( + registry_name=registry_name, + per_page=per_page, + page=page, + page_token=page_token, headers=_headers, params=_params, ) @@ -169767,55 +179793,98 @@ async def get_subscription(self, **kwargs: Any) -> JSON: response = pipeline_response.http_response - if response.status_code not in [200]: + if response.status_code not in [200, 400, 404]: if _stream: await response.read() # Load the body in memory and close the socket map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore raise HttpResponseError(response=response) response_headers = {} - response_headers["ratelimit-limit"] = self._deserialize( - "int", response.headers.get("ratelimit-limit") - ) - response_headers["ratelimit-remaining"] = self._deserialize( - "int", response.headers.get("ratelimit-remaining") - ) - response_headers["ratelimit-reset"] = self._deserialize( - "int", response.headers.get("ratelimit-reset") - ) + if response.status_code == 200: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) - if response.content: - deserialized = response.json() - else: - deserialized = None + if response.content: + deserialized = response.json() + else: + deserialized = None + + if response.status_code == 400: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.content: + deserialized = response.json() + else: + deserialized = None + + if response.status_code == 404: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.content: + deserialized = response.json() + else: + deserialized = None if cls: return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore return cast(JSON, deserialized) # type: ignore - @overload - async def update_subscription( + @distributed_trace_async + async def list_repository_tags( self, - body: Optional[JSON] = None, + registry_name: str, + repository_name: str, *, - content_type: str = "application/json", + per_page: int = 20, + page: int = 1, **kwargs: Any ) -> JSON: # pylint: disable=line-too-long - """Update Subscription Tier. + """List All Container Registry Repository Tags. **Note: This endpoint is deprecated. Please use the ``/v2/registries`` endpoint instead.** - After creating your registry, you can switch to a different - subscription tier to better suit your needs. To do this, send a POST request - to ``/v2/registry/subscription``. + To list all tags in your container registry repository, send a GET + request to ``/v2/registry/$REGISTRY_NAME/repositories/$REPOSITORY_NAME/tags``. - :param body: Default value is None. - :type body: JSON - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str + Note that if your repository name contains ``/`` characters, it must be + URL-encoded in the request URL. For example, to list tags for + ``registry.digitalocean.com/example/my/repo``\\ , the path would be + ``/v2/registry/example/repositories/my%2Frepo/tags``. + + :param registry_name: The name of a container registry. Required. + :type registry_name: str + :param repository_name: The name of a container registry repository. If the name contains ``/`` + characters, they must be URL-encoded, e.g. ``%2F``. Required. + :type repository_name: str + :keyword per_page: Number of items returned per page. Default value is 20. + :paramtype per_page: int + :keyword page: Which 'page' of paginated results to return. Default value is 1. + :paramtype page: int :return: JSON object :rtype: JSON :raises ~azure.core.exceptions.HttpResponseError: @@ -169823,44 +179892,33 @@ async def update_subscription( Example: .. code-block:: python - # JSON input template you can fill out and use as your body input. - body = { - "tier_slug": "str" # Optional. The slug of the subscription tier to sign up - for. Known values are: "starter", "basic", and "professional". - } - # response body for status code(s): 200 response == { - "subscription": { - "created_at": "2020-02-20 00:00:00", # Optional. The time at which - the subscription was created. - "tier": { - "allow_storage_overage": bool, # Optional. A boolean - indicating whether the subscription tier supports additional storage - above what is included in the base plan at an additional cost per GiB - used. - "included_bandwidth_bytes": 0, # Optional. The amount of - outbound data transfer included in the subscription tier in bytes. - "included_repositories": 0, # Optional. The number of - repositories included in the subscription tier. ``0`` indicates that the - subscription tier includes unlimited repositories. - "included_storage_bytes": 0, # Optional. The amount of - storage included in the subscription tier in bytes. - "monthly_price_in_cents": 0, # Optional. The monthly cost of - the subscription tier in cents. - "name": "str", # Optional. The name of the subscription - tier. - "slug": "str", # Optional. The slug identifier of the - subscription tier. - "storage_overage_price_in_cents": 0 # Optional. The price - paid in cents per GiB for additional storage beyond what is included in - the subscription plan. - }, - "updated_at": "2020-02-20 00:00:00" # Optional. The time at which - the subscription was last updated. - } + "meta": { + "total": 0 # Optional. Number of objects returned by the request. + }, + "links": { + "pages": {} + }, + "tags": [ + { + "compressed_size_bytes": 0, # Optional. The compressed size + of the tag in bytes. + "manifest_digest": "str", # Optional. The digest of the + manifest associated with the tag. + "registry_name": "str", # Optional. The name of the + container registry. + "repository": "str", # Optional. The name of the repository. + "size_bytes": 0, # Optional. The uncompressed size of the + tag in bytes (this size is calculated asynchronously so it may not be + immediately available). + "tag": "str", # Optional. The name of the tag. + "updated_at": "2020-02-20 00:00:00" # Optional. The time the + tag was last updated. + } + ] } - # response body for status code(s): 412 + # response body for status code(s): 404 response == { "id": "str", # A short identifier corresponding to the HTTP status code returned. For example, the ID for a response returning a 404 status code would @@ -169872,68 +179930,126 @@ async def update_subscription( tickets to help identify the issue. } """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + 401: cast( + Type[HttpResponseError], + lambda response: ClientAuthenticationError(response=response), + ), + 429: HttpResponseError, + 500: HttpResponseError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) - @overload - async def update_subscription( + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[JSON] = kwargs.pop("cls", None) + + _request = build_registry_list_repository_tags_request( + registry_name=registry_name, + repository_name=repository_name, + per_page=per_page, + page=page, + headers=_headers, + params=_params, + ) + _request.url = self._client.format_url(_request.url) + + _stream = False + pipeline_response: PipelineResponse = ( + await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + ) + + response = pipeline_response.http_response + + if response.status_code not in [200, 404]: + if _stream: + await response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore + raise HttpResponseError(response=response) + + response_headers = {} + if response.status_code == 200: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.content: + deserialized = response.json() + else: + deserialized = None + + if response.status_code == 404: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.content: + deserialized = response.json() + else: + deserialized = None + + if cls: + return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore + + return cast(JSON, deserialized) # type: ignore + + @distributed_trace_async + async def delete_repository_tag( self, - body: Optional[IO[bytes]] = None, - *, - content_type: str = "application/json", + registry_name: str, + repository_name: str, + repository_tag: str, **kwargs: Any - ) -> JSON: + ) -> Optional[JSON]: # pylint: disable=line-too-long - """Update Subscription Tier. + """Delete Container Registry Repository Tag. **Note: This endpoint is deprecated. Please use the ``/v2/registries`` endpoint instead.** - After creating your registry, you can switch to a different - subscription tier to better suit your needs. To do this, send a POST request - to ``/v2/registry/subscription``. + To delete a container repository tag, send a DELETE request to + ``/v2/registry/$REGISTRY_NAME/repositories/$REPOSITORY_NAME/tags/$TAG``. - :param body: Default value is None. - :type body: IO[bytes] - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str - :return: JSON object - :rtype: JSON + Note that if your repository name contains ``/`` characters, it must be + URL-encoded in the request URL. For example, to delete + ``registry.digitalocean.com/example/my/repo:mytag``\\ , the path would be + ``/v2/registry/example/repositories/my%2Frepo/tags/mytag``. + + A successful request will receive a 204 status code with no body in response. + This indicates that the request was processed successfully. + + :param registry_name: The name of a container registry. Required. + :type registry_name: str + :param repository_name: The name of a container registry repository. If the name contains ``/`` + characters, they must be URL-encoded, e.g. ``%2F``. Required. + :type repository_name: str + :param repository_tag: The name of a container registry repository tag. Required. + :type repository_tag: str + :return: JSON object or None + :rtype: JSON or None :raises ~azure.core.exceptions.HttpResponseError: Example: .. code-block:: python - # response body for status code(s): 200 - response == { - "subscription": { - "created_at": "2020-02-20 00:00:00", # Optional. The time at which - the subscription was created. - "tier": { - "allow_storage_overage": bool, # Optional. A boolean - indicating whether the subscription tier supports additional storage - above what is included in the base plan at an additional cost per GiB - used. - "included_bandwidth_bytes": 0, # Optional. The amount of - outbound data transfer included in the subscription tier in bytes. - "included_repositories": 0, # Optional. The number of - repositories included in the subscription tier. ``0`` indicates that the - subscription tier includes unlimited repositories. - "included_storage_bytes": 0, # Optional. The amount of - storage included in the subscription tier in bytes. - "monthly_price_in_cents": 0, # Optional. The monthly cost of - the subscription tier in cents. - "name": "str", # Optional. The name of the subscription - tier. - "slug": "str", # Optional. The slug identifier of the - subscription tier. - "storage_overage_price_in_cents": 0 # Optional. The price - paid in cents per GiB for additional storage beyond what is included in - the subscription plan. - }, - "updated_at": "2020-02-20 00:00:00" # Optional. The time at which - the subscription was last updated. - } - } - # response body for status code(s): 412 + # response body for status code(s): 404 response == { "id": "str", # A short identifier corresponding to the HTTP status code returned. For example, the ID for a response returning a 404 status code would @@ -169945,22 +180061,114 @@ async def update_subscription( tickets to help identify the issue. } """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + 401: cast( + Type[HttpResponseError], + lambda response: ClientAuthenticationError(response=response), + ), + 429: HttpResponseError, + 500: HttpResponseError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[Optional[JSON]] = kwargs.pop("cls", None) + + _request = build_registry_delete_repository_tag_request( + registry_name=registry_name, + repository_name=repository_name, + repository_tag=repository_tag, + headers=_headers, + params=_params, + ) + _request.url = self._client.format_url(_request.url) + + _stream = False + pipeline_response: PipelineResponse = ( + await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + ) + + response = pipeline_response.http_response + + if response.status_code not in [204, 404]: + if _stream: + await response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore + raise HttpResponseError(response=response) + + deserialized = None + response_headers = {} + if response.status_code == 204: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.status_code == 404: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.content: + deserialized = response.json() + else: + deserialized = None + + if cls: + return cls(pipeline_response, deserialized, response_headers) # type: ignore + + return deserialized # type: ignore @distributed_trace_async - async def update_subscription( - self, body: Optional[Union[JSON, IO[bytes]]] = None, **kwargs: Any + async def list_repository_manifests( + self, + registry_name: str, + repository_name: str, + *, + per_page: int = 20, + page: int = 1, + **kwargs: Any ) -> JSON: # pylint: disable=line-too-long - """Update Subscription Tier. + """List All Container Registry Repository Manifests. **Note: This endpoint is deprecated. Please use the ``/v2/registries`` endpoint instead.** - After creating your registry, you can switch to a different - subscription tier to better suit your needs. To do this, send a POST request - to ``/v2/registry/subscription``. + To list all manifests in your container registry repository, send a GET + request to ``/v2/registry/$REGISTRY_NAME/repositories/$REPOSITORY_NAME/digests``. - :param body: Is either a JSON type or a IO[bytes] type. Default value is None. - :type body: JSON or IO[bytes] + Note that if your repository name contains ``/`` characters, it must be + URL-encoded in the request URL. For example, to list manifests for + ``registry.digitalocean.com/example/my/repo``\\ , the path would be + ``/v2/registry/example/repositories/my%2Frepo/digests``. + + :param registry_name: The name of a container registry. Required. + :type registry_name: str + :param repository_name: The name of a container registry repository. If the name contains ``/`` + characters, they must be URL-encoded, e.g. ``%2F``. Required. + :type repository_name: str + :keyword per_page: Number of items returned per page. Default value is 20. + :paramtype per_page: int + :keyword page: Which 'page' of paginated results to return. Default value is 1. + :paramtype page: int :return: JSON object :rtype: JSON :raises ~azure.core.exceptions.HttpResponseError: @@ -169968,44 +180176,43 @@ async def update_subscription( Example: .. code-block:: python - # JSON input template you can fill out and use as your body input. - body = { - "tier_slug": "str" # Optional. The slug of the subscription tier to sign up - for. Known values are: "starter", "basic", and "professional". - } - # response body for status code(s): 200 response == { - "subscription": { - "created_at": "2020-02-20 00:00:00", # Optional. The time at which - the subscription was created. - "tier": { - "allow_storage_overage": bool, # Optional. A boolean - indicating whether the subscription tier supports additional storage - above what is included in the base plan at an additional cost per GiB - used. - "included_bandwidth_bytes": 0, # Optional. The amount of - outbound data transfer included in the subscription tier in bytes. - "included_repositories": 0, # Optional. The number of - repositories included in the subscription tier. ``0`` indicates that the - subscription tier includes unlimited repositories. - "included_storage_bytes": 0, # Optional. The amount of - storage included in the subscription tier in bytes. - "monthly_price_in_cents": 0, # Optional. The monthly cost of - the subscription tier in cents. - "name": "str", # Optional. The name of the subscription - tier. - "slug": "str", # Optional. The slug identifier of the - subscription tier. - "storage_overage_price_in_cents": 0 # Optional. The price - paid in cents per GiB for additional storage beyond what is included in - the subscription plan. - }, - "updated_at": "2020-02-20 00:00:00" # Optional. The time at which - the subscription was last updated. - } + "meta": { + "total": 0 # Optional. Number of objects returned by the request. + }, + "links": { + "pages": {} + }, + "manifests": [ + { + "blobs": [ + { + "compressed_size_bytes": 0, # Optional. The + compressed size of the blob in bytes. + "digest": "str" # Optional. The digest of + the blob. + } + ], + "compressed_size_bytes": 0, # Optional. The compressed size + of the manifest in bytes. + "digest": "str", # Optional. The manifest digest. + "registry_name": "str", # Optional. The name of the + container registry. + "repository": "str", # Optional. The name of the repository. + "size_bytes": 0, # Optional. The uncompressed size of the + manifest in bytes (this size is calculated asynchronously so it may not + be immediately available). + "tags": [ + "str" # Optional. All tags associated with this + manifest. + ], + "updated_at": "2020-02-20 00:00:00" # Optional. The time the + manifest was last updated. + } + ] } - # response body for status code(s): 412 + # response body for status code(s): 404 response == { "id": "str", # A short identifier corresponding to the HTTP status code returned. For example, the ID for a response returning a 404 status code would @@ -170030,29 +180237,16 @@ async def update_subscription( } error_map.update(kwargs.pop("error_map", {}) or {}) - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _headers = kwargs.pop("headers", {}) or {} _params = kwargs.pop("params", {}) or {} - content_type: Optional[str] = kwargs.pop( - "content_type", _headers.pop("Content-Type", None) - ) cls: ClsType[JSON] = kwargs.pop("cls", None) - content_type = content_type or "application/json" - _json = None - _content = None - if isinstance(body, (IOBase, bytes)): - _content = body - else: - if body is not None: - _json = body - else: - _json = None - - _request = build_registry_update_subscription_request( - content_type=content_type, - json=_json, - content=_content, + _request = build_registry_list_repository_manifests_request( + registry_name=registry_name, + repository_name=repository_name, + per_page=per_page, + page=page, headers=_headers, params=_params, ) @@ -170067,7 +180261,7 @@ async def update_subscription( response = pipeline_response.http_response - if response.status_code not in [200, 412]: + if response.status_code not in [200, 404]: if _stream: await response.read() # Load the body in memory and close the socket map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore @@ -170090,7 +180284,7 @@ async def update_subscription( else: deserialized = None - if response.status_code == 412: + if response.status_code == 404: response_headers["ratelimit-limit"] = self._deserialize( "int", response.headers.get("ratelimit-limit") ) @@ -170112,60 +180306,53 @@ async def update_subscription( return cast(JSON, deserialized) # type: ignore @distributed_trace_async - async def get_docker_credentials( - self, *, expiry_seconds: int = 0, read_write: bool = False, **kwargs: Any - ) -> JSON: - """Get Docker Credentials for Container Registry. + async def delete_repository_manifest( + self, + registry_name: str, + repository_name: str, + manifest_digest: str, + **kwargs: Any + ) -> Optional[JSON]: + # pylint: disable=line-too-long + """Delete Container Registry Repository Manifest. **Note: This endpoint is deprecated. Please use the ``/v2/registries`` endpoint instead.** - In order to access your container registry with the Docker client or from a - Kubernetes cluster, you will need to configure authentication. The necessary - JSON configuration can be retrieved by sending a GET request to - ``/v2/registry/docker-credentials``. - - The response will be in the format of a Docker ``config.json`` file. To use the - config in your Kubernetes cluster, create a Secret with: - - .. code-block:: - - kubectl create secret generic docr \\ - --from-file=.dockerconfigjson=config.json \\ - --type=kubernetes.io/dockerconfigjson - + To delete a container repository manifest by digest, send a DELETE request to + ``/v2/registry/$REGISTRY_NAME/repositories/$REPOSITORY_NAME/digests/$MANIFEST_DIGEST``. - By default, the returned credentials have read-only access to your registry - and cannot be used to push images. This is appropriate for most Kubernetes - clusters. To retrieve read/write credentials, suitable for use with the Docker - client or in a CI system, read_write may be provided as query parameter. For - example: ``/v2/registry/docker-credentials?read_write=true`` + Note that if your repository name contains ``/`` characters, it must be + URL-encoded in the request URL. For example, to delete + ``registry.digitalocean.com/example/my/repo@sha256:abcd``\\ , the path would be + ``/v2/registry/example/repositories/my%2Frepo/digests/sha256:abcd``. - By default, the returned credentials will not expire. To retrieve credentials - with an expiry set, expiry_seconds may be provided as a query parameter. For - example: ``/v2/registry/docker-credentials?expiry_seconds=3600`` will return - credentials that expire after one hour. + A successful request will receive a 204 status code with no body in response. + This indicates that the request was processed successfully. - :keyword expiry_seconds: The duration in seconds that the returned registry credentials will be - valid. If not set or 0, the credentials will not expire. Default value is 0. - :paramtype expiry_seconds: int - :keyword read_write: By default, the registry credentials allow for read-only access. Set this - query parameter to ``true`` to obtain read-write credentials. Default value is False. - :paramtype read_write: bool - :return: JSON object - :rtype: JSON + :param registry_name: The name of a container registry. Required. + :type registry_name: str + :param repository_name: The name of a container registry repository. If the name contains ``/`` + characters, they must be URL-encoded, e.g. ``%2F``. Required. + :type repository_name: str + :param manifest_digest: The manifest digest of a container registry repository tag. Required. + :type manifest_digest: str + :return: JSON object or None + :rtype: JSON or None :raises ~azure.core.exceptions.HttpResponseError: Example: .. code-block:: python - # response body for status code(s): 200 + # response body for status code(s): 404 response == { - "auths": { - "registry.digitalocean.com": { - "auth": "str" # Optional. A base64 encoded string containing - credentials for the container registry. - } - } + "id": "str", # A short identifier corresponding to the HTTP status code + returned. For example, the ID for a response returning a 404 status code would + be "not_found.". Required. + "message": "str", # A message providing additional information about the + error, including details to help resolve it when possible. Required. + "request_id": "str" # Optional. Optionally, some endpoints may include a + request ID that should be provided when reporting bugs or opening support + tickets to help identify the issue. } """ error_map: MutableMapping[int, Type[HttpResponseError]] = { @@ -170184,11 +180371,12 @@ async def get_docker_credentials( _headers = kwargs.pop("headers", {}) or {} _params = kwargs.pop("params", {}) or {} - cls: ClsType[JSON] = kwargs.pop("cls", None) + cls: ClsType[Optional[JSON]] = kwargs.pop("cls", None) - _request = build_registry_get_docker_credentials_request( - expiry_seconds=expiry_seconds, - read_write=read_write, + _request = build_registry_delete_repository_manifest_request( + registry_name=registry_name, + repository_name=repository_name, + manifest_digest=manifest_digest, headers=_headers, params=_params, ) @@ -170203,56 +180391,93 @@ async def get_docker_credentials( response = pipeline_response.http_response - if response.status_code not in [200]: + if response.status_code not in [204, 404]: if _stream: await response.read() # Load the body in memory and close the socket map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore raise HttpResponseError(response=response) + deserialized = None response_headers = {} - response_headers["ratelimit-limit"] = self._deserialize( - "int", response.headers.get("ratelimit-limit") - ) - response_headers["ratelimit-remaining"] = self._deserialize( - "int", response.headers.get("ratelimit-remaining") - ) - response_headers["ratelimit-reset"] = self._deserialize( - "int", response.headers.get("ratelimit-reset") - ) + if response.status_code == 204: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) - if response.content: - deserialized = response.json() - else: - deserialized = None + if response.status_code == 404: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.content: + deserialized = response.json() + else: + deserialized = None if cls: - return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore + return cls(pipeline_response, deserialized, response_headers) # type: ignore - return cast(JSON, deserialized) # type: ignore + return deserialized # type: ignore @overload - async def validate_name( - self, body: JSON, *, content_type: str = "application/json", **kwargs: Any - ) -> Optional[JSON]: + async def run_garbage_collection( + self, + registry_name: str, + body: Optional[JSON] = None, + *, + content_type: str = "application/json", + **kwargs: Any + ) -> JSON: # pylint: disable=line-too-long - """Validate a Container Registry Name. + """Start Garbage Collection. **Note: This endpoint is deprecated. Please use the ``/v2/registries`` endpoint instead.** - To validate that a container registry name is available for use, send a POST - request to ``/v2/registry/validate-name``. + Garbage collection enables users to clear out unreferenced blobs (layer & + manifest data) after deleting one or more manifests from a repository. If + there are no unreferenced blobs resulting from the deletion of one or more + manifests, garbage collection is effectively a noop. + `See here for more information + `_ + about how and why you should clean up your container registry periodically. - If the name is both formatted correctly and available, the response code will - be 204 and contain no body. If the name is already in use, the response will - be a 409 Conflict. + To request a garbage collection run on your registry, send a POST request to + ``/v2/registry/$REGISTRY_NAME/garbage-collection``. This will initiate the + following sequence of events on your registry. - :param body: Required. + + * Set the registry to read-only mode, meaning no further write-scoped + JWTs will be issued to registry clients. Existing write-scoped JWTs will + continue to work until they expire which can take up to 15 minutes. + * Wait until all existing write-scoped JWTs have expired. + * Scan all registry manifests to determine which blobs are unreferenced. + * Delete all unreferenced blobs from the registry. + * Record the number of blobs deleted and bytes freed, mark the garbage + collection status as ``success``. + * Remove the read-only mode restriction from the registry, meaning write-scoped + JWTs will once again be issued to registry clients. + + :param registry_name: The name of a container registry. Required. + :type registry_name: str + :param body: Default value is None. :type body: JSON :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str - :return: JSON object or None - :rtype: JSON or None + :return: JSON object + :rtype: JSON :raises ~azure.core.exceptions.HttpResponseError: Example: @@ -170260,12 +180485,33 @@ async def validate_name( # JSON input template you can fill out and use as your body input. body = { - "name": "str" # A globally unique name for the container registry. Must be - lowercase and be composed only of numbers, letters and ``-``"" , up to a limit of - 63 characters. Required. + "type": "str" # Optional. Type of the garbage collection to run against this + registry. Known values are: "untagged manifests only", "unreferenced blobs only", + and "untagged manifests and unreferenced blobs". } - # response body for status code(s): 409 + # response body for status code(s): 201 + response == { + "garbage_collection": { + "blobs_deleted": 0, # Optional. The number of blobs deleted as a + result of this garbage collection. + "created_at": "2020-02-20 00:00:00", # Optional. The time the + garbage collection was created. + "freed_bytes": 0, # Optional. The number of bytes freed as a result + of this garbage collection. + "registry_name": "str", # Optional. The name of the container + registry. + "status": "str", # Optional. The current status of this garbage + collection. Known values are: "requested", "waiting for write JWTs to + expire", "scanning manifests", "deleting unreferenced blobs", "cancelling", + "failed", "succeeded", and "cancelled". + "updated_at": "2020-02-20 00:00:00", # Optional. The time the + garbage collection was last updated. + "uuid": "str" # Optional. A string specifying the UUID of the + garbage collection. + } + } + # response body for status code(s): 404 response == { "id": "str", # A short identifier corresponding to the HTTP status code returned. For example, the ID for a response returning a 404 status code would @@ -170279,34 +180525,79 @@ async def validate_name( """ @overload - async def validate_name( - self, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any - ) -> Optional[JSON]: + async def run_garbage_collection( + self, + registry_name: str, + body: Optional[IO[bytes]] = None, + *, + content_type: str = "application/json", + **kwargs: Any + ) -> JSON: # pylint: disable=line-too-long - """Validate a Container Registry Name. + """Start Garbage Collection. **Note: This endpoint is deprecated. Please use the ``/v2/registries`` endpoint instead.** - To validate that a container registry name is available for use, send a POST - request to ``/v2/registry/validate-name``. + Garbage collection enables users to clear out unreferenced blobs (layer & + manifest data) after deleting one or more manifests from a repository. If + there are no unreferenced blobs resulting from the deletion of one or more + manifests, garbage collection is effectively a noop. + `See here for more information + `_ + about how and why you should clean up your container registry periodically. - If the name is both formatted correctly and available, the response code will - be 204 and contain no body. If the name is already in use, the response will - be a 409 Conflict. + To request a garbage collection run on your registry, send a POST request to + ``/v2/registry/$REGISTRY_NAME/garbage-collection``. This will initiate the + following sequence of events on your registry. - :param body: Required. + + * Set the registry to read-only mode, meaning no further write-scoped + JWTs will be issued to registry clients. Existing write-scoped JWTs will + continue to work until they expire which can take up to 15 minutes. + * Wait until all existing write-scoped JWTs have expired. + * Scan all registry manifests to determine which blobs are unreferenced. + * Delete all unreferenced blobs from the registry. + * Record the number of blobs deleted and bytes freed, mark the garbage + collection status as ``success``. + * Remove the read-only mode restriction from the registry, meaning write-scoped + JWTs will once again be issued to registry clients. + + :param registry_name: The name of a container registry. Required. + :type registry_name: str + :param body: Default value is None. :type body: IO[bytes] :keyword content_type: Body Parameter content-type. Content type parameter for binary body. Default value is "application/json". :paramtype content_type: str - :return: JSON object or None - :rtype: JSON or None + :return: JSON object + :rtype: JSON :raises ~azure.core.exceptions.HttpResponseError: Example: .. code-block:: python - # response body for status code(s): 409 + # response body for status code(s): 201 + response == { + "garbage_collection": { + "blobs_deleted": 0, # Optional. The number of blobs deleted as a + result of this garbage collection. + "created_at": "2020-02-20 00:00:00", # Optional. The time the + garbage collection was created. + "freed_bytes": 0, # Optional. The number of bytes freed as a result + of this garbage collection. + "registry_name": "str", # Optional. The name of the container + registry. + "status": "str", # Optional. The current status of this garbage + collection. Known values are: "requested", "waiting for write JWTs to + expire", "scanning manifests", "deleting unreferenced blobs", "cancelling", + "failed", "succeeded", and "cancelled". + "updated_at": "2020-02-20 00:00:00", # Optional. The time the + garbage collection was last updated. + "uuid": "str" # Optional. A string specifying the UUID of the + garbage collection. + } + } + # response body for status code(s): 404 response == { "id": "str", # A short identifier corresponding to the HTTP status code returned. For example, the ID for a response returning a 404 status code would @@ -170320,25 +180611,47 @@ async def validate_name( """ @distributed_trace_async - async def validate_name( - self, body: Union[JSON, IO[bytes]], **kwargs: Any - ) -> Optional[JSON]: + async def run_garbage_collection( + self, + registry_name: str, + body: Optional[Union[JSON, IO[bytes]]] = None, + **kwargs: Any + ) -> JSON: # pylint: disable=line-too-long - """Validate a Container Registry Name. + """Start Garbage Collection. **Note: This endpoint is deprecated. Please use the ``/v2/registries`` endpoint instead.** - To validate that a container registry name is available for use, send a POST - request to ``/v2/registry/validate-name``. + Garbage collection enables users to clear out unreferenced blobs (layer & + manifest data) after deleting one or more manifests from a repository. If + there are no unreferenced blobs resulting from the deletion of one or more + manifests, garbage collection is effectively a noop. + `See here for more information + `_ + about how and why you should clean up your container registry periodically. - If the name is both formatted correctly and available, the response code will - be 204 and contain no body. If the name is already in use, the response will - be a 409 Conflict. + To request a garbage collection run on your registry, send a POST request to + ``/v2/registry/$REGISTRY_NAME/garbage-collection``. This will initiate the + following sequence of events on your registry. - :param body: Is either a JSON type or a IO[bytes] type. Required. + + * Set the registry to read-only mode, meaning no further write-scoped + JWTs will be issued to registry clients. Existing write-scoped JWTs will + continue to work until they expire which can take up to 15 minutes. + * Wait until all existing write-scoped JWTs have expired. + * Scan all registry manifests to determine which blobs are unreferenced. + * Delete all unreferenced blobs from the registry. + * Record the number of blobs deleted and bytes freed, mark the garbage + collection status as ``success``. + * Remove the read-only mode restriction from the registry, meaning write-scoped + JWTs will once again be issued to registry clients. + + :param registry_name: The name of a container registry. Required. + :type registry_name: str + :param body: Is either a JSON type or a IO[bytes] type. Default value is None. :type body: JSON or IO[bytes] - :return: JSON object or None - :rtype: JSON or None + :return: JSON object + :rtype: JSON :raises ~azure.core.exceptions.HttpResponseError: Example: @@ -170346,12 +180659,33 @@ async def validate_name( # JSON input template you can fill out and use as your body input. body = { - "name": "str" # A globally unique name for the container registry. Must be - lowercase and be composed only of numbers, letters and ``-``"" , up to a limit of - 63 characters. Required. + "type": "str" # Optional. Type of the garbage collection to run against this + registry. Known values are: "untagged manifests only", "unreferenced blobs only", + and "untagged manifests and unreferenced blobs". } - # response body for status code(s): 409 + # response body for status code(s): 201 + response == { + "garbage_collection": { + "blobs_deleted": 0, # Optional. The number of blobs deleted as a + result of this garbage collection. + "created_at": "2020-02-20 00:00:00", # Optional. The time the + garbage collection was created. + "freed_bytes": 0, # Optional. The number of bytes freed as a result + of this garbage collection. + "registry_name": "str", # Optional. The name of the container + registry. + "status": "str", # Optional. The current status of this garbage + collection. Known values are: "requested", "waiting for write JWTs to + expire", "scanning manifests", "deleting unreferenced blobs", "cancelling", + "failed", "succeeded", and "cancelled". + "updated_at": "2020-02-20 00:00:00", # Optional. The time the + garbage collection was last updated. + "uuid": "str" # Optional. A string specifying the UUID of the + garbage collection. + } + } + # response body for status code(s): 404 response == { "id": "str", # A short identifier corresponding to the HTTP status code returned. For example, the ID for a response returning a 404 status code would @@ -170382,7 +180716,7 @@ async def validate_name( content_type: Optional[str] = kwargs.pop( "content_type", _headers.pop("Content-Type", None) ) - cls: ClsType[Optional[JSON]] = kwargs.pop("cls", None) + cls: ClsType[JSON] = kwargs.pop("cls", None) content_type = content_type or "application/json" _json = None @@ -170390,9 +180724,13 @@ async def validate_name( if isinstance(body, (IOBase, bytes)): _content = body else: - _json = body + if body is not None: + _json = body + else: + _json = None - _request = build_registry_validate_name_request( + _request = build_registry_run_garbage_collection_request( + registry_name=registry_name, content_type=content_type, json=_json, content=_content, @@ -170410,15 +180748,14 @@ async def validate_name( response = pipeline_response.http_response - if response.status_code not in [204, 409]: + if response.status_code not in [201, 404]: if _stream: await response.read() # Load the body in memory and close the socket map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore raise HttpResponseError(response=response) - deserialized = None response_headers = {} - if response.status_code == 204: + if response.status_code == 201: response_headers["ratelimit-limit"] = self._deserialize( "int", response.headers.get("ratelimit-limit") ) @@ -170429,7 +180766,12 @@ async def validate_name( "int", response.headers.get("ratelimit-reset") ) - if response.status_code == 409: + if response.content: + deserialized = response.json() + else: + deserialized = None + + if response.status_code == 404: response_headers["ratelimit-limit"] = self._deserialize( "int", response.headers.get("ratelimit-limit") ) @@ -170446,31 +180788,22 @@ async def validate_name( deserialized = None if cls: - return cls(pipeline_response, deserialized, response_headers) # type: ignore + return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore - return deserialized # type: ignore + return cast(JSON, deserialized) # type: ignore @distributed_trace_async - async def list_repositories( - self, registry_name: str, *, per_page: int = 20, page: int = 1, **kwargs: Any - ) -> JSON: + async def get_garbage_collection(self, registry_name: str, **kwargs: Any) -> JSON: # pylint: disable=line-too-long - """List All Container Registry Repositories. + """Get Active Garbage Collection. **Note: This endpoint is deprecated. Please use the ``/v2/registries`` endpoint instead.** - This endpoint has been deprecated in favor of the *List All Container Registry Repositories - [V2]* endpoint. - - To list all repositories in your container registry, send a GET - request to ``/v2/registry/$REGISTRY_NAME/repositories``. + To get information about the currently-active garbage collection + for a registry, send a GET request to ``/v2/registry/$REGISTRY_NAME/garbage-collection``. :param registry_name: The name of a container registry. Required. :type registry_name: str - :keyword per_page: Number of items returned per page. Default value is 20. - :paramtype per_page: int - :keyword page: Which 'page' of paginated results to return. Default value is 1. - :paramtype page: int :return: JSON object :rtype: JSON :raises ~azure.core.exceptions.HttpResponseError: @@ -170480,37 +180813,24 @@ async def list_repositories( # response body for status code(s): 200 response == { - "meta": { - "total": 0 # Optional. Number of objects returned by the request. - }, - "links": { - "pages": {} - }, - "repositories": [ - { - "latest_tag": { - "compressed_size_bytes": 0, # Optional. The - compressed size of the tag in bytes. - "manifest_digest": "str", # Optional. The digest of - the manifest associated with the tag. - "registry_name": "str", # Optional. The name of the - container registry. - "repository": "str", # Optional. The name of the - repository. - "size_bytes": 0, # Optional. The uncompressed size - of the tag in bytes (this size is calculated asynchronously so it may - not be immediately available). - "tag": "str", # Optional. The name of the tag. - "updated_at": "2020-02-20 00:00:00" # Optional. The - time the tag was last updated. - }, - "name": "str", # Optional. The name of the repository. - "registry_name": "str", # Optional. The name of the - container registry. - "tag_count": 0 # Optional. The number of tags in the - repository. - } - ] + "garbage_collection": { + "blobs_deleted": 0, # Optional. The number of blobs deleted as a + result of this garbage collection. + "created_at": "2020-02-20 00:00:00", # Optional. The time the + garbage collection was created. + "freed_bytes": 0, # Optional. The number of bytes freed as a result + of this garbage collection. + "registry_name": "str", # Optional. The name of the container + registry. + "status": "str", # Optional. The current status of this garbage + collection. Known values are: "requested", "waiting for write JWTs to + expire", "scanning manifests", "deleting unreferenced blobs", "cancelling", + "failed", "succeeded", and "cancelled". + "updated_at": "2020-02-20 00:00:00", # Optional. The time the + garbage collection was last updated. + "uuid": "str" # Optional. A string specifying the UUID of the + garbage collection. + } } # response body for status code(s): 404 response == { @@ -170542,10 +180862,8 @@ async def list_repositories( cls: ClsType[JSON] = kwargs.pop("cls", None) - _request = build_registry_list_repositories_request( + _request = build_registry_get_garbage_collection_request( registry_name=registry_name, - per_page=per_page, - page=page, headers=_headers, params=_params, ) @@ -170605,33 +180923,23 @@ async def list_repositories( return cast(JSON, deserialized) # type: ignore @distributed_trace_async - async def list_repositories_v2( - self, - registry_name: str, - *, - per_page: int = 20, - page: int = 1, - page_token: Optional[str] = None, - **kwargs: Any + async def list_garbage_collections( + self, registry_name: str, *, per_page: int = 20, page: int = 1, **kwargs: Any ) -> JSON: # pylint: disable=line-too-long - """List All Container Registry Repositories (V2). + """List Garbage Collections. **Note: This endpoint is deprecated. Please use the ``/v2/registries`` endpoint instead.** - To list all repositories in your container registry, send a GET - request to ``/v2/registry/$REGISTRY_NAME/repositoriesV2``. + To get information about past garbage collections for a registry, + send a GET request to ``/v2/registry/$REGISTRY_NAME/garbage-collections``. :param registry_name: The name of a container registry. Required. :type registry_name: str :keyword per_page: Number of items returned per page. Default value is 20. :paramtype per_page: int - :keyword page: Which 'page' of paginated results to return. Ignored when 'page_token' is - provided. Default value is 1. + :keyword page: Which 'page' of paginated results to return. Default value is 1. :paramtype page: int - :keyword page_token: Token to retrieve of the next or previous set of results more quickly than - using 'page'. Default value is None. - :paramtype page_token: str :return: JSON object :rtype: JSON :raises ~azure.core.exceptions.HttpResponseError: @@ -170641,51 +180949,28 @@ async def list_repositories_v2( # response body for status code(s): 200 response == { - "meta": { - "total": 0 # Optional. Number of objects returned by the request. - }, - "links": { - "pages": {} - }, - "repositories": [ + "garbage_collections": [ { - "latest_manifest": { - "blobs": [ - { - "compressed_size_bytes": 0, # - Optional. The compressed size of the blob in bytes. - "digest": "str" # Optional. The - digest of the blob. - } - ], - "compressed_size_bytes": 0, # Optional. The - compressed size of the manifest in bytes. - "digest": "str", # Optional. The manifest digest. - "registry_name": "str", # Optional. The name of the - container registry. - "repository": "str", # Optional. The name of the - repository. - "size_bytes": 0, # Optional. The uncompressed size - of the manifest in bytes (this size is calculated asynchronously so - it may not be immediately available). - "tags": [ - "str" # Optional. All tags associated with - this manifest. - ], - "updated_at": "2020-02-20 00:00:00" # Optional. The - time the manifest was last updated. - }, - "manifest_count": 0, # Optional. The number of manifests in - the repository. - "name": "str", # Optional. The name of the repository. + "blobs_deleted": 0, # Optional. The number of blobs deleted + as a result of this garbage collection. + "created_at": "2020-02-20 00:00:00", # Optional. The time + the garbage collection was created. + "freed_bytes": 0, # Optional. The number of bytes freed as a + result of this garbage collection. "registry_name": "str", # Optional. The name of the container registry. - "tag_count": 0 # Optional. The number of tags in the - repository. + "status": "str", # Optional. The current status of this + garbage collection. Known values are: "requested", "waiting for write + JWTs to expire", "scanning manifests", "deleting unreferenced blobs", + "cancelling", "failed", "succeeded", and "cancelled". + "updated_at": "2020-02-20 00:00:00", # Optional. The time + the garbage collection was last updated. + "uuid": "str" # Optional. A string specifying the UUID of + the garbage collection. } ] } - # response body for status code(s): 400, 404 + # response body for status code(s): 404 response == { "id": "str", # A short identifier corresponding to the HTTP status code returned. For example, the ID for a response returning a 404 status code would @@ -170715,11 +181000,10 @@ async def list_repositories_v2( cls: ClsType[JSON] = kwargs.pop("cls", None) - _request = build_registry_list_repositories_v2_request( + _request = build_registry_list_garbage_collections_request( registry_name=registry_name, per_page=per_page, page=page, - page_token=page_token, headers=_headers, params=_params, ) @@ -170734,7 +181018,7 @@ async def list_repositories_v2( response = pipeline_response.http_response - if response.status_code not in [200, 400, 404]: + if response.status_code not in [200, 404]: if _stream: await response.read() # Load the body in memory and close the socket map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore @@ -170757,22 +181041,6 @@ async def list_repositories_v2( else: deserialized = None - if response.status_code == 400: - response_headers["ratelimit-limit"] = self._deserialize( - "int", response.headers.get("ratelimit-limit") - ) - response_headers["ratelimit-remaining"] = self._deserialize( - "int", response.headers.get("ratelimit-remaining") - ) - response_headers["ratelimit-reset"] = self._deserialize( - "int", response.headers.get("ratelimit-reset") - ) - - if response.content: - deserialized = response.json() - else: - deserialized = None - if response.status_code == 404: response_headers["ratelimit-limit"] = self._deserialize( "int", response.headers.get("ratelimit-limit") @@ -170794,38 +181062,34 @@ async def list_repositories_v2( return cast(JSON, deserialized) # type: ignore - @distributed_trace_async - async def list_repository_tags( + @overload + async def update_garbage_collection( self, registry_name: str, - repository_name: str, + garbage_collection_uuid: str, + body: JSON, *, - per_page: int = 20, - page: int = 1, + content_type: str = "application/json", **kwargs: Any ) -> JSON: # pylint: disable=line-too-long - """List All Container Registry Repository Tags. + """Update Garbage Collection. **Note: This endpoint is deprecated. Please use the ``/v2/registries`` endpoint instead.** - To list all tags in your container registry repository, send a GET - request to ``/v2/registry/$REGISTRY_NAME/repositories/$REPOSITORY_NAME/tags``. - - Note that if your repository name contains ``/`` characters, it must be - URL-encoded in the request URL. For example, to list tags for - ``registry.digitalocean.com/example/my/repo``\\ , the path would be - ``/v2/registry/example/repositories/my%2Frepo/tags``. + To cancel the currently-active garbage collection for a registry, + send a PUT request to ``/v2/registry/$REGISTRY_NAME/garbage-collection/$GC_UUID`` + and specify one or more of the attributes below. :param registry_name: The name of a container registry. Required. :type registry_name: str - :param repository_name: The name of a container registry repository. If the name contains ``/`` - characters, they must be URL-encoded, e.g. ``%2F``. Required. - :type repository_name: str - :keyword per_page: Number of items returned per page. Default value is 20. - :paramtype per_page: int - :keyword page: Which 'page' of paginated results to return. Default value is 1. - :paramtype page: int + :param garbage_collection_uuid: The UUID of a garbage collection run. Required. + :type garbage_collection_uuid: str + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str :return: JSON object :rtype: JSON :raises ~azure.core.exceptions.HttpResponseError: @@ -170833,31 +181097,32 @@ async def list_repository_tags( Example: .. code-block:: python + # JSON input template you can fill out and use as your body input. + body = { + "cancel": bool # Optional. A boolean value indicating that the garbage + collection should be cancelled. + } + # response body for status code(s): 200 response == { - "meta": { - "total": 0 # Optional. Number of objects returned by the request. - }, - "links": { - "pages": {} - }, - "tags": [ - { - "compressed_size_bytes": 0, # Optional. The compressed size - of the tag in bytes. - "manifest_digest": "str", # Optional. The digest of the - manifest associated with the tag. - "registry_name": "str", # Optional. The name of the - container registry. - "repository": "str", # Optional. The name of the repository. - "size_bytes": 0, # Optional. The uncompressed size of the - tag in bytes (this size is calculated asynchronously so it may not be - immediately available). - "tag": "str", # Optional. The name of the tag. - "updated_at": "2020-02-20 00:00:00" # Optional. The time the - tag was last updated. - } - ] + "garbage_collection": { + "blobs_deleted": 0, # Optional. The number of blobs deleted as a + result of this garbage collection. + "created_at": "2020-02-20 00:00:00", # Optional. The time the + garbage collection was created. + "freed_bytes": 0, # Optional. The number of bytes freed as a result + of this garbage collection. + "registry_name": "str", # Optional. The name of the container + registry. + "status": "str", # Optional. The current status of this garbage + collection. Known values are: "requested", "waiting for write JWTs to + expire", "scanning manifests", "deleting unreferenced blobs", "cancelling", + "failed", "succeeded", and "cancelled". + "updated_at": "2020-02-20 00:00:00", # Optional. The time the + garbage collection was last updated. + "uuid": "str" # Optional. A string specifying the UUID of the + garbage collection. + } } # response body for status code(s): 404 response == { @@ -170871,125 +181136,133 @@ async def list_repository_tags( tickets to help identify the issue. } """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - 401: cast( - Type[HttpResponseError], - lambda response: ClientAuthenticationError(response=response), - ), - 429: HttpResponseError, - 500: HttpResponseError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = kwargs.pop("headers", {}) or {} - _params = kwargs.pop("params", {}) or {} - - cls: ClsType[JSON] = kwargs.pop("cls", None) - - _request = build_registry_list_repository_tags_request( - registry_name=registry_name, - repository_name=repository_name, - per_page=per_page, - page=page, - headers=_headers, - params=_params, - ) - _request.url = self._client.format_url(_request.url) - - _stream = False - pipeline_response: PipelineResponse = ( - await self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - ) - - response = pipeline_response.http_response - - if response.status_code not in [200, 404]: - if _stream: - await response.read() # Load the body in memory and close the socket - map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore - raise HttpResponseError(response=response) - response_headers = {} - if response.status_code == 200: - response_headers["ratelimit-limit"] = self._deserialize( - "int", response.headers.get("ratelimit-limit") - ) - response_headers["ratelimit-remaining"] = self._deserialize( - "int", response.headers.get("ratelimit-remaining") - ) - response_headers["ratelimit-reset"] = self._deserialize( - "int", response.headers.get("ratelimit-reset") - ) + @overload + async def update_garbage_collection( + self, + registry_name: str, + garbage_collection_uuid: str, + body: IO[bytes], + *, + content_type: str = "application/json", + **kwargs: Any + ) -> JSON: + # pylint: disable=line-too-long + """Update Garbage Collection. - if response.content: - deserialized = response.json() - else: - deserialized = None + **Note: This endpoint is deprecated. Please use the ``/v2/registries`` endpoint instead.** - if response.status_code == 404: - response_headers["ratelimit-limit"] = self._deserialize( - "int", response.headers.get("ratelimit-limit") - ) - response_headers["ratelimit-remaining"] = self._deserialize( - "int", response.headers.get("ratelimit-remaining") - ) - response_headers["ratelimit-reset"] = self._deserialize( - "int", response.headers.get("ratelimit-reset") - ) + To cancel the currently-active garbage collection for a registry, + send a PUT request to ``/v2/registry/$REGISTRY_NAME/garbage-collection/$GC_UUID`` + and specify one or more of the attributes below. - if response.content: - deserialized = response.json() - else: - deserialized = None + :param registry_name: The name of a container registry. Required. + :type registry_name: str + :param garbage_collection_uuid: The UUID of a garbage collection run. Required. + :type garbage_collection_uuid: str + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: JSON object + :rtype: JSON + :raises ~azure.core.exceptions.HttpResponseError: - if cls: - return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore + Example: + .. code-block:: python - return cast(JSON, deserialized) # type: ignore + # response body for status code(s): 200 + response == { + "garbage_collection": { + "blobs_deleted": 0, # Optional. The number of blobs deleted as a + result of this garbage collection. + "created_at": "2020-02-20 00:00:00", # Optional. The time the + garbage collection was created. + "freed_bytes": 0, # Optional. The number of bytes freed as a result + of this garbage collection. + "registry_name": "str", # Optional. The name of the container + registry. + "status": "str", # Optional. The current status of this garbage + collection. Known values are: "requested", "waiting for write JWTs to + expire", "scanning manifests", "deleting unreferenced blobs", "cancelling", + "failed", "succeeded", and "cancelled". + "updated_at": "2020-02-20 00:00:00", # Optional. The time the + garbage collection was last updated. + "uuid": "str" # Optional. A string specifying the UUID of the + garbage collection. + } + } + # response body for status code(s): 404 + response == { + "id": "str", # A short identifier corresponding to the HTTP status code + returned. For example, the ID for a response returning a 404 status code would + be "not_found.". Required. + "message": "str", # A message providing additional information about the + error, including details to help resolve it when possible. Required. + "request_id": "str" # Optional. Optionally, some endpoints may include a + request ID that should be provided when reporting bugs or opening support + tickets to help identify the issue. + } + """ @distributed_trace_async - async def delete_repository_tag( + async def update_garbage_collection( self, registry_name: str, - repository_name: str, - repository_tag: str, + garbage_collection_uuid: str, + body: Union[JSON, IO[bytes]], **kwargs: Any - ) -> Optional[JSON]: + ) -> JSON: # pylint: disable=line-too-long - """Delete Container Registry Repository Tag. + """Update Garbage Collection. **Note: This endpoint is deprecated. Please use the ``/v2/registries`` endpoint instead.** - To delete a container repository tag, send a DELETE request to - ``/v2/registry/$REGISTRY_NAME/repositories/$REPOSITORY_NAME/tags/$TAG``. - - Note that if your repository name contains ``/`` characters, it must be - URL-encoded in the request URL. For example, to delete - ``registry.digitalocean.com/example/my/repo:mytag``\\ , the path would be - ``/v2/registry/example/repositories/my%2Frepo/tags/mytag``. - - A successful request will receive a 204 status code with no body in response. - This indicates that the request was processed successfully. + To cancel the currently-active garbage collection for a registry, + send a PUT request to ``/v2/registry/$REGISTRY_NAME/garbage-collection/$GC_UUID`` + and specify one or more of the attributes below. :param registry_name: The name of a container registry. Required. :type registry_name: str - :param repository_name: The name of a container registry repository. If the name contains ``/`` - characters, they must be URL-encoded, e.g. ``%2F``. Required. - :type repository_name: str - :param repository_tag: The name of a container registry repository tag. Required. - :type repository_tag: str - :return: JSON object or None - :rtype: JSON or None + :param garbage_collection_uuid: The UUID of a garbage collection run. Required. + :type garbage_collection_uuid: str + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :return: JSON object + :rtype: JSON :raises ~azure.core.exceptions.HttpResponseError: Example: .. code-block:: python + # JSON input template you can fill out and use as your body input. + body = { + "cancel": bool # Optional. A boolean value indicating that the garbage + collection should be cancelled. + } + + # response body for status code(s): 200 + response == { + "garbage_collection": { + "blobs_deleted": 0, # Optional. The number of blobs deleted as a + result of this garbage collection. + "created_at": "2020-02-20 00:00:00", # Optional. The time the + garbage collection was created. + "freed_bytes": 0, # Optional. The number of bytes freed as a result + of this garbage collection. + "registry_name": "str", # Optional. The name of the container + registry. + "status": "str", # Optional. The current status of this garbage + collection. Known values are: "requested", "waiting for write JWTs to + expire", "scanning manifests", "deleting unreferenced blobs", "cancelling", + "failed", "succeeded", and "cancelled". + "updated_at": "2020-02-20 00:00:00", # Optional. The time the + garbage collection was last updated. + "uuid": "str" # Optional. A string specifying the UUID of the + garbage collection. + } + } # response body for status code(s): 404 response == { "id": "str", # A short identifier corresponding to the HTTP status code @@ -171015,15 +181288,28 @@ async def delete_repository_tag( } error_map.update(kwargs.pop("error_map", {}) or {}) - _headers = kwargs.pop("headers", {}) or {} + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = kwargs.pop("params", {}) or {} - cls: ClsType[Optional[JSON]] = kwargs.pop("cls", None) + content_type: Optional[str] = kwargs.pop( + "content_type", _headers.pop("Content-Type", None) + ) + cls: ClsType[JSON] = kwargs.pop("cls", None) - _request = build_registry_delete_repository_tag_request( + content_type = content_type or "application/json" + _json = None + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _json = body + + _request = build_registry_update_garbage_collection_request( registry_name=registry_name, - repository_name=repository_name, - repository_tag=repository_tag, + garbage_collection_uuid=garbage_collection_uuid, + content_type=content_type, + json=_json, + content=_content, headers=_headers, params=_params, ) @@ -171038,15 +181324,14 @@ async def delete_repository_tag( response = pipeline_response.http_response - if response.status_code not in [204, 404]: + if response.status_code not in [200, 404]: if _stream: await response.read() # Load the body in memory and close the socket map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore raise HttpResponseError(response=response) - deserialized = None response_headers = {} - if response.status_code == 204: + if response.status_code == 200: response_headers["ratelimit-limit"] = self._deserialize( "int", response.headers.get("ratelimit-limit") ) @@ -171057,6 +181342,11 @@ async def delete_repository_tag( "int", response.headers.get("ratelimit-reset") ) + if response.content: + deserialized = response.json() + else: + deserialized = None + if response.status_code == 404: response_headers["ratelimit-limit"] = self._deserialize( "int", response.headers.get("ratelimit-limit") @@ -171074,42 +181364,32 @@ async def delete_repository_tag( deserialized = None if cls: - return cls(pipeline_response, deserialized, response_headers) # type: ignore + return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore - return deserialized # type: ignore + return cast(JSON, deserialized) # type: ignore @distributed_trace_async - async def list_repository_manifests( - self, - registry_name: str, - repository_name: str, - *, - per_page: int = 20, - page: int = 1, - **kwargs: Any - ) -> JSON: + async def get_options(self, **kwargs: Any) -> JSON: # pylint: disable=line-too-long - """List All Container Registry Repository Manifests. + """List Registry Options (Subscription Tiers and Available Regions). - **Note: This endpoint is deprecated. Please use the ``/v2/registries`` endpoint instead.** + **Note: This endpoint is deprecated and may be removed in a future version. There is no + alternative.****\\ Note: This endpoint is deprecated. Please use the ``/v2/registries`` + endpoint instead.** - To list all manifests in your container registry repository, send a GET - request to ``/v2/registry/$REGISTRY_NAME/repositories/$REPOSITORY_NAME/digests``. + This endpoint serves to provide additional information as to which option values + are available when creating a container registry. - Note that if your repository name contains ``/`` characters, it must be - URL-encoded in the request URL. For example, to list manifests for - ``registry.digitalocean.com/example/my/repo``\\ , the path would be - ``/v2/registry/example/repositories/my%2Frepo/digests``. + There are multiple subscription tiers available for container registry. Each + tier allows a different number of image repositories to be created in your + registry, and has a different amount of storage and transfer included. + + There are multiple regions available for container registry and controls + where your data is stored. + + To list the available options, send a GET request to + ``/v2/registry/options``. - :param registry_name: The name of a container registry. Required. - :type registry_name: str - :param repository_name: The name of a container registry repository. If the name contains ``/`` - characters, they must be URL-encoded, e.g. ``%2F``. Required. - :type repository_name: str - :keyword per_page: Number of items returned per page. Default value is 20. - :paramtype per_page: int - :keyword page: Which 'page' of paginated results to return. Default value is 1. - :paramtype page: int :return: JSON object :rtype: JSON :raises ~azure.core.exceptions.HttpResponseError: @@ -171119,50 +181399,43 @@ async def list_repository_manifests( # response body for status code(s): 200 response == { - "meta": { - "total": 0 # Optional. Number of objects returned by the request. - }, - "links": { - "pages": {} - }, - "manifests": [ - { - "blobs": [ - { - "compressed_size_bytes": 0, # Optional. The - compressed size of the blob in bytes. - "digest": "str" # Optional. The digest of - the blob. - } - ], - "compressed_size_bytes": 0, # Optional. The compressed size - of the manifest in bytes. - "digest": "str", # Optional. The manifest digest. - "registry_name": "str", # Optional. The name of the - container registry. - "repository": "str", # Optional. The name of the repository. - "size_bytes": 0, # Optional. The uncompressed size of the - manifest in bytes (this size is calculated asynchronously so it may not - be immediately available). - "tags": [ - "str" # Optional. All tags associated with this - manifest. - ], - "updated_at": "2020-02-20 00:00:00" # Optional. The time the - manifest was last updated. - } - ] - } - # response body for status code(s): 404 - response == { - "id": "str", # A short identifier corresponding to the HTTP status code - returned. For example, the ID for a response returning a 404 status code would - be "not_found.". Required. - "message": "str", # A message providing additional information about the - error, including details to help resolve it when possible. Required. - "request_id": "str" # Optional. Optionally, some endpoints may include a - request ID that should be provided when reporting bugs or opening support - tickets to help identify the issue. + "options": { + "available_regions": [ + "str" # Optional. + ], + "subscription_tiers": [ + { + "allow_storage_overage": bool, # Optional. A boolean + indicating whether the subscription tier supports additional storage + above what is included in the base plan at an additional cost per GiB + used. + "eligibility_reasons": [ + "str" # Optional. If your account is not + eligible to use a certain subscription tier, this will include a + list of reasons that prevent you from using the tier. + ], + "eligible": bool, # Optional. A boolean indicating + whether your account it eligible to use a certain subscription tier. + "included_bandwidth_bytes": 0, # Optional. The + amount of outbound data transfer included in the subscription tier in + bytes. + "included_repositories": 0, # Optional. The number + of repositories included in the subscription tier. ``0`` indicates + that the subscription tier includes unlimited repositories. + "included_storage_bytes": 0, # Optional. The amount + of storage included in the subscription tier in bytes. + "monthly_price_in_cents": 0, # Optional. The monthly + cost of the subscription tier in cents. + "name": "str", # Optional. The name of the + subscription tier. + "slug": "str", # Optional. The slug identifier of + the subscription tier. + "storage_overage_price_in_cents": 0 # Optional. The + price paid in cents per GiB for additional storage beyond what is + included in the subscription plan. + } + ] + } } """ error_map: MutableMapping[int, Type[HttpResponseError]] = { @@ -171183,11 +181456,7 @@ async def list_repository_manifests( cls: ClsType[JSON] = kwargs.pop("cls", None) - _request = build_registry_list_repository_manifests_request( - registry_name=registry_name, - repository_name=repository_name, - per_page=per_page, - page=page, + _request = build_registry_get_options_request( headers=_headers, params=_params, ) @@ -171202,98 +181471,112 @@ async def list_repository_manifests( response = pipeline_response.http_response - if response.status_code not in [200, 404]: + if response.status_code not in [200]: if _stream: await response.read() # Load the body in memory and close the socket map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore raise HttpResponseError(response=response) response_headers = {} - if response.status_code == 200: - response_headers["ratelimit-limit"] = self._deserialize( - "int", response.headers.get("ratelimit-limit") - ) - response_headers["ratelimit-remaining"] = self._deserialize( - "int", response.headers.get("ratelimit-remaining") - ) - response_headers["ratelimit-reset"] = self._deserialize( - "int", response.headers.get("ratelimit-reset") - ) - - if response.content: - deserialized = response.json() - else: - deserialized = None - - if response.status_code == 404: - response_headers["ratelimit-limit"] = self._deserialize( - "int", response.headers.get("ratelimit-limit") - ) - response_headers["ratelimit-remaining"] = self._deserialize( - "int", response.headers.get("ratelimit-remaining") - ) - response_headers["ratelimit-reset"] = self._deserialize( - "int", response.headers.get("ratelimit-reset") - ) + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) - if response.content: - deserialized = response.json() - else: - deserialized = None + if response.content: + deserialized = response.json() + else: + deserialized = None if cls: return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore return cast(JSON, deserialized) # type: ignore - @distributed_trace_async - async def delete_repository_manifest( - self, - registry_name: str, - repository_name: str, - manifest_digest: str, - **kwargs: Any - ) -> Optional[JSON]: - # pylint: disable=line-too-long - """Delete Container Registry Repository Manifest. - **Note: This endpoint is deprecated. Please use the ``/v2/registries`` endpoint instead.** +class ReservedIPsOperations: + """ + .. warning:: + **DO NOT** instantiate this class directly. - To delete a container repository manifest by digest, send a DELETE request to - ``/v2/registry/$REGISTRY_NAME/repositories/$REPOSITORY_NAME/digests/$MANIFEST_DIGEST``. + Instead, you should access the following operations through + :class:`~pydo.aio.GeneratedClient`'s + :attr:`reserved_ips` attribute. + """ - Note that if your repository name contains ``/`` characters, it must be - URL-encoded in the request URL. For example, to delete - ``registry.digitalocean.com/example/my/repo@sha256:abcd``\\ , the path would be - ``/v2/registry/example/repositories/my%2Frepo/digests/sha256:abcd``. + def __init__(self, *args, **kwargs) -> None: + input_args = list(args) + self._client = input_args.pop(0) if input_args else kwargs.pop("client") + self._config = input_args.pop(0) if input_args else kwargs.pop("config") + self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer") + self._deserialize = ( + input_args.pop(0) if input_args else kwargs.pop("deserializer") + ) - A successful request will receive a 204 status code with no body in response. - This indicates that the request was processed successfully. + @distributed_trace_async + async def list(self, *, per_page: int = 20, page: int = 1, **kwargs: Any) -> JSON: + # pylint: disable=line-too-long + """List All Reserved IPs. - :param registry_name: The name of a container registry. Required. - :type registry_name: str - :param repository_name: The name of a container registry repository. If the name contains ``/`` - characters, they must be URL-encoded, e.g. ``%2F``. Required. - :type repository_name: str - :param manifest_digest: The manifest digest of a container registry repository tag. Required. - :type manifest_digest: str - :return: JSON object or None - :rtype: JSON or None + To list all of the reserved IPs available on your account, send a GET request to + ``/v2/reserved_ips``. + + :keyword per_page: Number of items returned per page. Default value is 20. + :paramtype per_page: int + :keyword page: Which 'page' of paginated results to return. Default value is 1. + :paramtype page: int + :return: JSON object + :rtype: JSON :raises ~azure.core.exceptions.HttpResponseError: Example: .. code-block:: python - # response body for status code(s): 404 + # response body for status code(s): 200 response == { - "id": "str", # A short identifier corresponding to the HTTP status code - returned. For example, the ID for a response returning a 404 status code would - be "not_found.". Required. - "message": "str", # A message providing additional information about the - error, including details to help resolve it when possible. Required. - "request_id": "str" # Optional. Optionally, some endpoints may include a - request ID that should be provided when reporting bugs or opening support - tickets to help identify the issue. + "meta": { + "total": 0 # Optional. Number of objects returned by the request. + }, + "links": { + "pages": {} + }, + "reserved_ips": [ + { + "droplet": {}, + "ip": "str", # Optional. The public IP address of the + reserved IP. It also serves as its identifier. + "locked": bool, # Optional. A boolean value indicating + whether or not the reserved IP has pending actions preventing new ones + from being submitted. + "project_id": "str", # Optional. The UUID of the project to + which the reserved IP currently belongs.:code:`
`:code:`
`Requires + ``project:read`` scope. + "region": { + "available": bool, # This is a boolean value that + represents whether new Droplets can be created in this region. + Required. + "features": [ + "str" # This attribute is set to an array + which contains features available in this region. Required. + ], + "name": "str", # The display name of the region. + This will be a full name that is used in the control panel and other + interfaces. Required. + "sizes": [ + "str" # This attribute is set to an array + which contains the identifying slugs for the sizes available in + this region. sizes:read is required to view. Required. + ], + "slug": "str" # A human-readable string that is used + as a unique identifier for each region. Required. + } + } + ] } """ error_map: MutableMapping[int, Type[HttpResponseError]] = { @@ -171312,12 +181595,11 @@ async def delete_repository_manifest( _headers = kwargs.pop("headers", {}) or {} _params = kwargs.pop("params", {}) or {} - cls: ClsType[Optional[JSON]] = kwargs.pop("cls", None) + cls: ClsType[JSON] = kwargs.pop("cls", None) - _request = build_registry_delete_repository_manifest_request( - registry_name=registry_name, - repository_name=repository_name, - manifest_digest=manifest_digest, + _request = build_reserved_ips_list_request( + per_page=per_page, + page=page, headers=_headers, params=_params, ) @@ -171332,87 +181614,52 @@ async def delete_repository_manifest( response = pipeline_response.http_response - if response.status_code not in [204, 404]: + if response.status_code not in [200]: if _stream: await response.read() # Load the body in memory and close the socket map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore raise HttpResponseError(response=response) - deserialized = None response_headers = {} - if response.status_code == 204: - response_headers["ratelimit-limit"] = self._deserialize( - "int", response.headers.get("ratelimit-limit") - ) - response_headers["ratelimit-remaining"] = self._deserialize( - "int", response.headers.get("ratelimit-remaining") - ) - response_headers["ratelimit-reset"] = self._deserialize( - "int", response.headers.get("ratelimit-reset") - ) - - if response.status_code == 404: - response_headers["ratelimit-limit"] = self._deserialize( - "int", response.headers.get("ratelimit-limit") - ) - response_headers["ratelimit-remaining"] = self._deserialize( - "int", response.headers.get("ratelimit-remaining") - ) - response_headers["ratelimit-reset"] = self._deserialize( - "int", response.headers.get("ratelimit-reset") - ) + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) - if response.content: - deserialized = response.json() - else: - deserialized = None + if response.content: + deserialized = response.json() + else: + deserialized = None if cls: - return cls(pipeline_response, deserialized, response_headers) # type: ignore + return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore - return deserialized # type: ignore + return cast(JSON, deserialized) # type: ignore @overload - async def run_garbage_collection( - self, - registry_name: str, - body: Optional[JSON] = None, - *, - content_type: str = "application/json", - **kwargs: Any + async def create( + self, body: JSON, *, content_type: str = "application/json", **kwargs: Any ) -> JSON: # pylint: disable=line-too-long - """Start Garbage Collection. - - **Note: This endpoint is deprecated. Please use the ``/v2/registries`` endpoint instead.** + """Create a New Reserved IP. - Garbage collection enables users to clear out unreferenced blobs (layer & - manifest data) after deleting one or more manifests from a repository. If - there are no unreferenced blobs resulting from the deletion of one or more - manifests, garbage collection is effectively a noop. - `See here for more information - `_ - about how and why you should clean up your container registry periodically. + On creation, a reserved IP must be either assigned to a Droplet or reserved to a region. - To request a garbage collection run on your registry, send a POST request to - ``/v2/registry/$REGISTRY_NAME/garbage-collection``. This will initiate the - following sequence of events on your registry. + * + To create a new reserved IP assigned to a Droplet, send a POST + request to ``/v2/reserved_ips`` with the ``droplet_id`` attribute. - * Set the registry to read-only mode, meaning no further write-scoped - JWTs will be issued to registry clients. Existing write-scoped JWTs will - continue to work until they expire which can take up to 15 minutes. - * Wait until all existing write-scoped JWTs have expired. - * Scan all registry manifests to determine which blobs are unreferenced. - * Delete all unreferenced blobs from the registry. - * Record the number of blobs deleted and bytes freed, mark the garbage - collection status as ``success``. - * Remove the read-only mode restriction from the registry, meaning write-scoped - JWTs will once again be issued to registry clients. + * + To create a new reserved IP reserved to a region, send a POST request to + ``/v2/reserved_ips`` with the ``region`` attribute. - :param registry_name: The name of a container registry. Required. - :type registry_name: str - :param body: Default value is None. + :param body: Required. :type body: JSON :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". @@ -171425,87 +181672,83 @@ async def run_garbage_collection( .. code-block:: python # JSON input template you can fill out and use as your body input. - body = { - "type": "str" # Optional. Type of the garbage collection to run against this - registry. Known values are: "untagged manifests only", "unreferenced blobs only", - and "untagged manifests and unreferenced blobs". - } + body = {} - # response body for status code(s): 201 + # response body for status code(s): 202 response == { - "garbage_collection": { - "blobs_deleted": 0, # Optional. The number of blobs deleted as a - result of this garbage collection. - "created_at": "2020-02-20 00:00:00", # Optional. The time the - garbage collection was created. - "freed_bytes": 0, # Optional. The number of bytes freed as a result - of this garbage collection. - "registry_name": "str", # Optional. The name of the container - registry. - "status": "str", # Optional. The current status of this garbage - collection. Known values are: "requested", "waiting for write JWTs to - expire", "scanning manifests", "deleting unreferenced blobs", "cancelling", - "failed", "succeeded", and "cancelled". - "updated_at": "2020-02-20 00:00:00", # Optional. The time the - garbage collection was last updated. - "uuid": "str" # Optional. A string specifying the UUID of the - garbage collection. + "links": { + "actions": [ + { + "href": "str", # Optional. A URL that can be used to + access the action. + "id": 0, # Optional. A unique numeric ID that can be + used to identify and reference an action. + "rel": "str" # Optional. A string specifying the + type of the related action. + } + ], + "droplets": [ + { + "href": "str", # Optional. A URL that can be used to + access the action. + "id": 0, # Optional. A unique numeric ID that can be + used to identify and reference an action. + "rel": "str" # Optional. A string specifying the + type of the related action. + } + ] + }, + "reserved_ip": { + "droplet": {}, + "ip": "str", # Optional. The public IP address of the reserved IP. + It also serves as its identifier. + "locked": bool, # Optional. A boolean value indicating whether or + not the reserved IP has pending actions preventing new ones from being + submitted. + "project_id": "str", # Optional. The UUID of the project to which + the reserved IP currently belongs.:code:`
`:code:`
`Requires + ``project:read`` scope. + "region": { + "available": bool, # This is a boolean value that represents + whether new Droplets can be created in this region. Required. + "features": [ + "str" # This attribute is set to an array which + contains features available in this region. Required. + ], + "name": "str", # The display name of the region. This will + be a full name that is used in the control panel and other interfaces. + Required. + "sizes": [ + "str" # This attribute is set to an array which + contains the identifying slugs for the sizes available in this + region. sizes:read is required to view. Required. + ], + "slug": "str" # A human-readable string that is used as a + unique identifier for each region. Required. + } } } - # response body for status code(s): 404 - response == { - "id": "str", # A short identifier corresponding to the HTTP status code - returned. For example, the ID for a response returning a 404 status code would - be "not_found.". Required. - "message": "str", # A message providing additional information about the - error, including details to help resolve it when possible. Required. - "request_id": "str" # Optional. Optionally, some endpoints may include a - request ID that should be provided when reporting bugs or opening support - tickets to help identify the issue. - } """ @overload - async def run_garbage_collection( - self, - registry_name: str, - body: Optional[IO[bytes]] = None, - *, - content_type: str = "application/json", - **kwargs: Any + async def create( + self, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any ) -> JSON: # pylint: disable=line-too-long - """Start Garbage Collection. - - **Note: This endpoint is deprecated. Please use the ``/v2/registries`` endpoint instead.** + """Create a New Reserved IP. - Garbage collection enables users to clear out unreferenced blobs (layer & - manifest data) after deleting one or more manifests from a repository. If - there are no unreferenced blobs resulting from the deletion of one or more - manifests, garbage collection is effectively a noop. - `See here for more information - `_ - about how and why you should clean up your container registry periodically. + On creation, a reserved IP must be either assigned to a Droplet or reserved to a region. - To request a garbage collection run on your registry, send a POST request to - ``/v2/registry/$REGISTRY_NAME/garbage-collection``. This will initiate the - following sequence of events on your registry. + * + To create a new reserved IP assigned to a Droplet, send a POST + request to ``/v2/reserved_ips`` with the ``droplet_id`` attribute. - * Set the registry to read-only mode, meaning no further write-scoped - JWTs will be issued to registry clients. Existing write-scoped JWTs will - continue to work until they expire which can take up to 15 minutes. - * Wait until all existing write-scoped JWTs have expired. - * Scan all registry manifests to determine which blobs are unreferenced. - * Delete all unreferenced blobs from the registry. - * Record the number of blobs deleted and bytes freed, mark the garbage - collection status as ``success``. - * Remove the read-only mode restriction from the registry, meaning write-scoped - JWTs will once again be issued to registry clients. + * + To create a new reserved IP reserved to a region, send a POST request to + ``/v2/reserved_ips`` with the ``region`` attribute. - :param registry_name: The name of a container registry. Required. - :type registry_name: str - :param body: Default value is None. + :param body: Required. :type body: IO[bytes] :keyword content_type: Body Parameter content-type. Content type parameter for binary body. Default value is "application/json". @@ -171517,79 +181760,79 @@ async def run_garbage_collection( Example: .. code-block:: python - # response body for status code(s): 201 + # response body for status code(s): 202 response == { - "garbage_collection": { - "blobs_deleted": 0, # Optional. The number of blobs deleted as a - result of this garbage collection. - "created_at": "2020-02-20 00:00:00", # Optional. The time the - garbage collection was created. - "freed_bytes": 0, # Optional. The number of bytes freed as a result - of this garbage collection. - "registry_name": "str", # Optional. The name of the container - registry. - "status": "str", # Optional. The current status of this garbage - collection. Known values are: "requested", "waiting for write JWTs to - expire", "scanning manifests", "deleting unreferenced blobs", "cancelling", - "failed", "succeeded", and "cancelled". - "updated_at": "2020-02-20 00:00:00", # Optional. The time the - garbage collection was last updated. - "uuid": "str" # Optional. A string specifying the UUID of the - garbage collection. + "links": { + "actions": [ + { + "href": "str", # Optional. A URL that can be used to + access the action. + "id": 0, # Optional. A unique numeric ID that can be + used to identify and reference an action. + "rel": "str" # Optional. A string specifying the + type of the related action. + } + ], + "droplets": [ + { + "href": "str", # Optional. A URL that can be used to + access the action. + "id": 0, # Optional. A unique numeric ID that can be + used to identify and reference an action. + "rel": "str" # Optional. A string specifying the + type of the related action. + } + ] + }, + "reserved_ip": { + "droplet": {}, + "ip": "str", # Optional. The public IP address of the reserved IP. + It also serves as its identifier. + "locked": bool, # Optional. A boolean value indicating whether or + not the reserved IP has pending actions preventing new ones from being + submitted. + "project_id": "str", # Optional. The UUID of the project to which + the reserved IP currently belongs.:code:`
`:code:`
`Requires + ``project:read`` scope. + "region": { + "available": bool, # This is a boolean value that represents + whether new Droplets can be created in this region. Required. + "features": [ + "str" # This attribute is set to an array which + contains features available in this region. Required. + ], + "name": "str", # The display name of the region. This will + be a full name that is used in the control panel and other interfaces. + Required. + "sizes": [ + "str" # This attribute is set to an array which + contains the identifying slugs for the sizes available in this + region. sizes:read is required to view. Required. + ], + "slug": "str" # A human-readable string that is used as a + unique identifier for each region. Required. + } } } - # response body for status code(s): 404 - response == { - "id": "str", # A short identifier corresponding to the HTTP status code - returned. For example, the ID for a response returning a 404 status code would - be "not_found.". Required. - "message": "str", # A message providing additional information about the - error, including details to help resolve it when possible. Required. - "request_id": "str" # Optional. Optionally, some endpoints may include a - request ID that should be provided when reporting bugs or opening support - tickets to help identify the issue. - } """ @distributed_trace_async - async def run_garbage_collection( - self, - registry_name: str, - body: Optional[Union[JSON, IO[bytes]]] = None, - **kwargs: Any - ) -> JSON: + async def create(self, body: Union[JSON, IO[bytes]], **kwargs: Any) -> JSON: # pylint: disable=line-too-long - """Start Garbage Collection. - - **Note: This endpoint is deprecated. Please use the ``/v2/registries`` endpoint instead.** + """Create a New Reserved IP. - Garbage collection enables users to clear out unreferenced blobs (layer & - manifest data) after deleting one or more manifests from a repository. If - there are no unreferenced blobs resulting from the deletion of one or more - manifests, garbage collection is effectively a noop. - `See here for more information - `_ - about how and why you should clean up your container registry periodically. + On creation, a reserved IP must be either assigned to a Droplet or reserved to a region. - To request a garbage collection run on your registry, send a POST request to - ``/v2/registry/$REGISTRY_NAME/garbage-collection``. This will initiate the - following sequence of events on your registry. + * + To create a new reserved IP assigned to a Droplet, send a POST + request to ``/v2/reserved_ips`` with the ``droplet_id`` attribute. - * Set the registry to read-only mode, meaning no further write-scoped - JWTs will be issued to registry clients. Existing write-scoped JWTs will - continue to work until they expire which can take up to 15 minutes. - * Wait until all existing write-scoped JWTs have expired. - * Scan all registry manifests to determine which blobs are unreferenced. - * Delete all unreferenced blobs from the registry. - * Record the number of blobs deleted and bytes freed, mark the garbage - collection status as ``success``. - * Remove the read-only mode restriction from the registry, meaning write-scoped - JWTs will once again be issued to registry clients. + * + To create a new reserved IP reserved to a region, send a POST request to + ``/v2/reserved_ips`` with the ``region`` attribute. - :param registry_name: The name of a container registry. Required. - :type registry_name: str - :param body: Is either a JSON type or a IO[bytes] type. Default value is None. + :param body: Is either a JSON type or a IO[bytes] type. Required. :type body: JSON or IO[bytes] :return: JSON object :rtype: JSON @@ -171599,44 +181842,62 @@ async def run_garbage_collection( .. code-block:: python # JSON input template you can fill out and use as your body input. - body = { - "type": "str" # Optional. Type of the garbage collection to run against this - registry. Known values are: "untagged manifests only", "unreferenced blobs only", - and "untagged manifests and unreferenced blobs". - } + body = {} - # response body for status code(s): 201 + # response body for status code(s): 202 response == { - "garbage_collection": { - "blobs_deleted": 0, # Optional. The number of blobs deleted as a - result of this garbage collection. - "created_at": "2020-02-20 00:00:00", # Optional. The time the - garbage collection was created. - "freed_bytes": 0, # Optional. The number of bytes freed as a result - of this garbage collection. - "registry_name": "str", # Optional. The name of the container - registry. - "status": "str", # Optional. The current status of this garbage - collection. Known values are: "requested", "waiting for write JWTs to - expire", "scanning manifests", "deleting unreferenced blobs", "cancelling", - "failed", "succeeded", and "cancelled". - "updated_at": "2020-02-20 00:00:00", # Optional. The time the - garbage collection was last updated. - "uuid": "str" # Optional. A string specifying the UUID of the - garbage collection. + "links": { + "actions": [ + { + "href": "str", # Optional. A URL that can be used to + access the action. + "id": 0, # Optional. A unique numeric ID that can be + used to identify and reference an action. + "rel": "str" # Optional. A string specifying the + type of the related action. + } + ], + "droplets": [ + { + "href": "str", # Optional. A URL that can be used to + access the action. + "id": 0, # Optional. A unique numeric ID that can be + used to identify and reference an action. + "rel": "str" # Optional. A string specifying the + type of the related action. + } + ] + }, + "reserved_ip": { + "droplet": {}, + "ip": "str", # Optional. The public IP address of the reserved IP. + It also serves as its identifier. + "locked": bool, # Optional. A boolean value indicating whether or + not the reserved IP has pending actions preventing new ones from being + submitted. + "project_id": "str", # Optional. The UUID of the project to which + the reserved IP currently belongs.:code:`
`:code:`
`Requires + ``project:read`` scope. + "region": { + "available": bool, # This is a boolean value that represents + whether new Droplets can be created in this region. Required. + "features": [ + "str" # This attribute is set to an array which + contains features available in this region. Required. + ], + "name": "str", # The display name of the region. This will + be a full name that is used in the control panel and other interfaces. + Required. + "sizes": [ + "str" # This attribute is set to an array which + contains the identifying slugs for the sizes available in this + region. sizes:read is required to view. Required. + ], + "slug": "str" # A human-readable string that is used as a + unique identifier for each region. Required. + } } } - # response body for status code(s): 404 - response == { - "id": "str", # A short identifier corresponding to the HTTP status code - returned. For example, the ID for a response returning a 404 status code would - be "not_found.". Required. - "message": "str", # A message providing additional information about the - error, including details to help resolve it when possible. Required. - "request_id": "str" # Optional. Optionally, some endpoints may include a - request ID that should be provided when reporting bugs or opening support - tickets to help identify the issue. - } """ error_map: MutableMapping[int, Type[HttpResponseError]] = { 404: ResourceNotFoundError, @@ -171665,13 +181926,9 @@ async def run_garbage_collection( if isinstance(body, (IOBase, bytes)): _content = body else: - if body is not None: - _json = body - else: - _json = None + _json = body - _request = build_registry_run_garbage_collection_request( - registry_name=registry_name, + _request = build_reserved_ips_create_request( content_type=content_type, json=_json, content=_content, @@ -171689,14 +181946,136 @@ async def run_garbage_collection( response = pipeline_response.http_response - if response.status_code not in [201, 404]: + if response.status_code not in [202]: if _stream: await response.read() # Load the body in memory and close the socket map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore raise HttpResponseError(response=response) response_headers = {} - if response.status_code == 201: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.content: + deserialized = response.json() + else: + deserialized = None + + if cls: + return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore + + return cast(JSON, deserialized) # type: ignore + + @distributed_trace_async + async def get(self, reserved_ip: str, **kwargs: Any) -> JSON: + # pylint: disable=line-too-long + """Retrieve an Existing Reserved IP. + + To show information about a reserved IP, send a GET request to + ``/v2/reserved_ips/$RESERVED_IP_ADDR``. + + :param reserved_ip: A reserved IP address. Required. + :type reserved_ip: str + :return: JSON object + :rtype: JSON + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 200 + response == { + "reserved_ip": { + "droplet": {}, + "ip": "str", # Optional. The public IP address of the reserved IP. + It also serves as its identifier. + "locked": bool, # Optional. A boolean value indicating whether or + not the reserved IP has pending actions preventing new ones from being + submitted. + "project_id": "str", # Optional. The UUID of the project to which + the reserved IP currently belongs.:code:`
`:code:`
`Requires + ``project:read`` scope. + "region": { + "available": bool, # This is a boolean value that represents + whether new Droplets can be created in this region. Required. + "features": [ + "str" # This attribute is set to an array which + contains features available in this region. Required. + ], + "name": "str", # The display name of the region. This will + be a full name that is used in the control panel and other interfaces. + Required. + "sizes": [ + "str" # This attribute is set to an array which + contains the identifying slugs for the sizes available in this + region. sizes:read is required to view. Required. + ], + "slug": "str" # A human-readable string that is used as a + unique identifier for each region. Required. + } + } + } + # response body for status code(s): 404 + response == { + "id": "str", # A short identifier corresponding to the HTTP status code + returned. For example, the ID for a response returning a 404 status code would + be "not_found.". Required. + "message": "str", # A message providing additional information about the + error, including details to help resolve it when possible. Required. + "request_id": "str" # Optional. Optionally, some endpoints may include a + request ID that should be provided when reporting bugs or opening support + tickets to help identify the issue. + } + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + 401: cast( + Type[HttpResponseError], + lambda response: ClientAuthenticationError(response=response), + ), + 429: HttpResponseError, + 500: HttpResponseError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[JSON] = kwargs.pop("cls", None) + + _request = build_reserved_ips_get_request( + reserved_ip=reserved_ip, + headers=_headers, + params=_params, + ) + _request.url = self._client.format_url(_request.url) + + _stream = False + pipeline_response: PipelineResponse = ( + await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + ) + + response = pipeline_response.http_response + + if response.status_code not in [200, 404]: + if _stream: + await response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore + raise HttpResponseError(response=response) + + response_headers = {} + if response.status_code == 200: response_headers["ratelimit-limit"] = self._deserialize( "int", response.headers.get("ratelimit-limit") ) @@ -171734,45 +182113,25 @@ async def run_garbage_collection( return cast(JSON, deserialized) # type: ignore @distributed_trace_async - async def get_garbage_collection(self, registry_name: str, **kwargs: Any) -> JSON: + async def delete(self, reserved_ip: str, **kwargs: Any) -> Optional[JSON]: # pylint: disable=line-too-long - """Get Active Garbage Collection. + """Delete a Reserved IP. - **Note: This endpoint is deprecated. Please use the ``/v2/registries`` endpoint instead.** + To delete a reserved IP and remove it from your account, send a DELETE request + to ``/v2/reserved_ips/$RESERVED_IP_ADDR``. - To get information about the currently-active garbage collection - for a registry, send a GET request to ``/v2/registry/$REGISTRY_NAME/garbage-collection``. + A successful request will receive a 204 status code with no body in response. + This indicates that the request was processed successfully. - :param registry_name: The name of a container registry. Required. - :type registry_name: str - :return: JSON object - :rtype: JSON + :param reserved_ip: A reserved IP address. Required. + :type reserved_ip: str + :return: JSON object or None + :rtype: JSON or None :raises ~azure.core.exceptions.HttpResponseError: Example: .. code-block:: python - # response body for status code(s): 200 - response == { - "garbage_collection": { - "blobs_deleted": 0, # Optional. The number of blobs deleted as a - result of this garbage collection. - "created_at": "2020-02-20 00:00:00", # Optional. The time the - garbage collection was created. - "freed_bytes": 0, # Optional. The number of bytes freed as a result - of this garbage collection. - "registry_name": "str", # Optional. The name of the container - registry. - "status": "str", # Optional. The current status of this garbage - collection. Known values are: "requested", "waiting for write JWTs to - expire", "scanning manifests", "deleting unreferenced blobs", "cancelling", - "failed", "succeeded", and "cancelled". - "updated_at": "2020-02-20 00:00:00", # Optional. The time the - garbage collection was last updated. - "uuid": "str" # Optional. A string specifying the UUID of the - garbage collection. - } - } # response body for status code(s): 404 response == { "id": "str", # A short identifier corresponding to the HTTP status code @@ -171801,10 +182160,10 @@ async def get_garbage_collection(self, registry_name: str, **kwargs: Any) -> JSO _headers = kwargs.pop("headers", {}) or {} _params = kwargs.pop("params", {}) or {} - cls: ClsType[JSON] = kwargs.pop("cls", None) + cls: ClsType[Optional[JSON]] = kwargs.pop("cls", None) - _request = build_registry_get_garbage_collection_request( - registry_name=registry_name, + _request = build_reserved_ips_delete_request( + reserved_ip=reserved_ip, headers=_headers, params=_params, ) @@ -171819,14 +182178,15 @@ async def get_garbage_collection(self, registry_name: str, **kwargs: Any) -> JSO response = pipeline_response.http_response - if response.status_code not in [200, 404]: + if response.status_code not in [204, 404]: if _stream: await response.read() # Load the body in memory and close the socket map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore raise HttpResponseError(response=response) + deserialized = None response_headers = {} - if response.status_code == 200: + if response.status_code == 204: response_headers["ratelimit-limit"] = self._deserialize( "int", response.headers.get("ratelimit-limit") ) @@ -171837,11 +182197,6 @@ async def get_garbage_collection(self, registry_name: str, **kwargs: Any) -> JSO "int", response.headers.get("ratelimit-reset") ) - if response.content: - deserialized = response.json() - else: - deserialized = None - if response.status_code == 404: response_headers["ratelimit-limit"] = self._deserialize( "int", response.headers.get("ratelimit-limit") @@ -171859,28 +182214,40 @@ async def get_garbage_collection(self, registry_name: str, **kwargs: Any) -> JSO deserialized = None if cls: - return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore + return cls(pipeline_response, deserialized, response_headers) # type: ignore - return cast(JSON, deserialized) # type: ignore + return deserialized # type: ignore + + +class ReservedIPsActionsOperations: + """ + .. warning:: + **DO NOT** instantiate this class directly. + + Instead, you should access the following operations through + :class:`~pydo.aio.GeneratedClient`'s + :attr:`reserved_ips_actions` attribute. + """ + + def __init__(self, *args, **kwargs) -> None: + input_args = list(args) + self._client = input_args.pop(0) if input_args else kwargs.pop("client") + self._config = input_args.pop(0) if input_args else kwargs.pop("config") + self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer") + self._deserialize = ( + input_args.pop(0) if input_args else kwargs.pop("deserializer") + ) @distributed_trace_async - async def list_garbage_collections( - self, registry_name: str, *, per_page: int = 20, page: int = 1, **kwargs: Any - ) -> JSON: + async def list(self, reserved_ip: str, **kwargs: Any) -> JSON: # pylint: disable=line-too-long - """List Garbage Collections. - - **Note: This endpoint is deprecated. Please use the ``/v2/registries`` endpoint instead.** + """List All Actions for a Reserved IP. - To get information about past garbage collections for a registry, - send a GET request to ``/v2/registry/$REGISTRY_NAME/garbage-collections``. + To retrieve all actions that have been executed on a reserved IP, send a GET request to + ``/v2/reserved_ips/$RESERVED_IP/actions``. - :param registry_name: The name of a container registry. Required. - :type registry_name: str - :keyword per_page: Number of items returned per page. Default value is 20. - :paramtype per_page: int - :keyword page: Which 'page' of paginated results to return. Default value is 1. - :paramtype page: int + :param reserved_ip: A reserved IP address. Required. + :type reserved_ip: str :return: JSON object :rtype: JSON :raises ~azure.core.exceptions.HttpResponseError: @@ -171890,26 +182257,56 @@ async def list_garbage_collections( # response body for status code(s): 200 response == { - "garbage_collections": [ + "meta": { + "total": 0 # Optional. Number of objects returned by the request. + }, + "actions": [ { - "blobs_deleted": 0, # Optional. The number of blobs deleted - as a result of this garbage collection. - "created_at": "2020-02-20 00:00:00", # Optional. The time - the garbage collection was created. - "freed_bytes": 0, # Optional. The number of bytes freed as a - result of this garbage collection. - "registry_name": "str", # Optional. The name of the - container registry. - "status": "str", # Optional. The current status of this - garbage collection. Known values are: "requested", "waiting for write - JWTs to expire", "scanning manifests", "deleting unreferenced blobs", - "cancelling", "failed", "succeeded", and "cancelled". - "updated_at": "2020-02-20 00:00:00", # Optional. The time - the garbage collection was last updated. - "uuid": "str" # Optional. A string specifying the UUID of - the garbage collection. + "completed_at": "2020-02-20 00:00:00", # Optional. A time + value given in ISO8601 combined date and time format that represents when + the action was completed. + "id": 0, # Optional. A unique numeric ID that can be used to + identify and reference an action. + "region": { + "available": bool, # This is a boolean value that + represents whether new Droplets can be created in this region. + Required. + "features": [ + "str" # This attribute is set to an array + which contains features available in this region. Required. + ], + "name": "str", # The display name of the region. + This will be a full name that is used in the control panel and other + interfaces. Required. + "sizes": [ + "str" # This attribute is set to an array + which contains the identifying slugs for the sizes available in + this region. sizes:read is required to view. Required. + ], + "slug": "str" # A human-readable string that is used + as a unique identifier for each region. Required. + }, + "region_slug": "str", # Optional. A human-readable string + that is used as a unique identifier for each region. + "resource_id": 0, # Optional. A unique identifier for the + resource that the action is associated with. + "resource_type": "str", # Optional. The type of resource + that the action is associated with. + "started_at": "2020-02-20 00:00:00", # Optional. A time + value given in ISO8601 combined date and time format that represents when + the action was initiated. + "status": "in-progress", # Optional. Default value is + "in-progress". The current status of the action. This can be + "in-progress", "completed", or "errored". Known values are: + "in-progress", "completed", and "errored". + "type": "str" # Optional. This is the type of action that + the object represents. For example, this could be "transfer" to represent + the state of an image transfer action. } - ] + ], + "links": { + "pages": {} + } } # response body for status code(s): 404 response == { @@ -171941,10 +182338,8 @@ async def list_garbage_collections( cls: ClsType[JSON] = kwargs.pop("cls", None) - _request = build_registry_list_garbage_collections_request( - registry_name=registry_name, - per_page=per_page, - page=page, + _request = build_reserved_ips_actions_list_request( + reserved_ip=reserved_ip, headers=_headers, params=_params, ) @@ -172004,29 +182399,35 @@ async def list_garbage_collections( return cast(JSON, deserialized) # type: ignore @overload - async def update_garbage_collection( + async def post( self, - registry_name: str, - garbage_collection_uuid: str, - body: JSON, + reserved_ip: str, + body: Optional[JSON] = None, *, content_type: str = "application/json", **kwargs: Any ) -> JSON: # pylint: disable=line-too-long - """Update Garbage Collection. + """Initiate a Reserved IP Action. - **Note: This endpoint is deprecated. Please use the ``/v2/registries`` endpoint instead.** + To initiate an action on a reserved IP send a POST request to + ``/v2/reserved_ips/$RESERVED_IP/actions``. In the JSON body to the request, + set the ``type`` attribute to on of the supported action types: - To cancel the currently-active garbage collection for a registry, - send a PUT request to ``/v2/registry/$REGISTRY_NAME/garbage-collection/$GC_UUID`` - and specify one or more of the attributes below. + .. list-table:: + :header-rows: 1 - :param registry_name: The name of a container registry. Required. - :type registry_name: str - :param garbage_collection_uuid: The UUID of a garbage collection run. Required. - :type garbage_collection_uuid: str - :param body: Required. + * - Action + - Details + * - ``assign`` + - Assigns a reserved IP to a Droplet + * - ``unassign`` + - Unassign a reserved IP from a Droplet. + + :param reserved_ip: A reserved IP address. Required. + :type reserved_ip: str + :param body: The ``type`` attribute set in the request body will specify the action that + will be taken on the reserved IP. Default value is None. :type body: JSON :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". @@ -172039,30 +182440,51 @@ async def update_garbage_collection( .. code-block:: python # JSON input template you can fill out and use as your body input. - body = { - "cancel": bool # Optional. A boolean value indicating that the garbage - collection should be cancelled. - } + body = {} - # response body for status code(s): 200 + # response body for status code(s): 201 response == { - "garbage_collection": { - "blobs_deleted": 0, # Optional. The number of blobs deleted as a - result of this garbage collection. - "created_at": "2020-02-20 00:00:00", # Optional. The time the - garbage collection was created. - "freed_bytes": 0, # Optional. The number of bytes freed as a result - of this garbage collection. - "registry_name": "str", # Optional. The name of the container - registry. - "status": "str", # Optional. The current status of this garbage - collection. Known values are: "requested", "waiting for write JWTs to - expire", "scanning manifests", "deleting unreferenced blobs", "cancelling", - "failed", "succeeded", and "cancelled". - "updated_at": "2020-02-20 00:00:00", # Optional. The time the - garbage collection was last updated. - "uuid": "str" # Optional. A string specifying the UUID of the - garbage collection. + "action": { + "completed_at": "2020-02-20 00:00:00", # Optional. A time value + given in ISO8601 combined date and time format that represents when the + action was completed. + "id": 0, # Optional. A unique numeric ID that can be used to + identify and reference an action. + "project_id": "str", # Optional. The UUID of the project to which + the reserved IP currently belongs. + "region": { + "available": bool, # This is a boolean value that represents + whether new Droplets can be created in this region. Required. + "features": [ + "str" # This attribute is set to an array which + contains features available in this region. Required. + ], + "name": "str", # The display name of the region. This will + be a full name that is used in the control panel and other interfaces. + Required. + "sizes": [ + "str" # This attribute is set to an array which + contains the identifying slugs for the sizes available in this + region. sizes:read is required to view. Required. + ], + "slug": "str" # A human-readable string that is used as a + unique identifier for each region. Required. + }, + "region_slug": "str", # Optional. A human-readable string that is + used as a unique identifier for each region. + "resource_id": 0, # Optional. A unique identifier for the resource + that the action is associated with. + "resource_type": "str", # Optional. The type of resource that the + action is associated with. + "started_at": "2020-02-20 00:00:00", # Optional. A time value given + in ISO8601 combined date and time format that represents when the action was + initiated. + "status": "in-progress", # Optional. Default value is "in-progress". + The current status of the action. This can be "in-progress", "completed", or + "errored". Known values are: "in-progress", "completed", and "errored". + "type": "str" # Optional. This is the type of action that the object + represents. For example, this could be "transfer" to represent the state of + an image transfer action. } } # response body for status code(s): 404 @@ -172079,29 +182501,35 @@ async def update_garbage_collection( """ @overload - async def update_garbage_collection( + async def post( self, - registry_name: str, - garbage_collection_uuid: str, - body: IO[bytes], + reserved_ip: str, + body: Optional[IO[bytes]] = None, *, content_type: str = "application/json", **kwargs: Any ) -> JSON: # pylint: disable=line-too-long - """Update Garbage Collection. + """Initiate a Reserved IP Action. - **Note: This endpoint is deprecated. Please use the ``/v2/registries`` endpoint instead.** + To initiate an action on a reserved IP send a POST request to + ``/v2/reserved_ips/$RESERVED_IP/actions``. In the JSON body to the request, + set the ``type`` attribute to on of the supported action types: - To cancel the currently-active garbage collection for a registry, - send a PUT request to ``/v2/registry/$REGISTRY_NAME/garbage-collection/$GC_UUID`` - and specify one or more of the attributes below. + .. list-table:: + :header-rows: 1 - :param registry_name: The name of a container registry. Required. - :type registry_name: str - :param garbage_collection_uuid: The UUID of a garbage collection run. Required. - :type garbage_collection_uuid: str - :param body: Required. + * - Action + - Details + * - ``assign`` + - Assigns a reserved IP to a Droplet + * - ``unassign`` + - Unassign a reserved IP from a Droplet. + + :param reserved_ip: A reserved IP address. Required. + :type reserved_ip: str + :param body: The ``type`` attribute set in the request body will specify the action that + will be taken on the reserved IP. Default value is None. :type body: IO[bytes] :keyword content_type: Body Parameter content-type. Content type parameter for binary body. Default value is "application/json". @@ -172113,25 +182541,49 @@ async def update_garbage_collection( Example: .. code-block:: python - # response body for status code(s): 200 + # response body for status code(s): 201 response == { - "garbage_collection": { - "blobs_deleted": 0, # Optional. The number of blobs deleted as a - result of this garbage collection. - "created_at": "2020-02-20 00:00:00", # Optional. The time the - garbage collection was created. - "freed_bytes": 0, # Optional. The number of bytes freed as a result - of this garbage collection. - "registry_name": "str", # Optional. The name of the container - registry. - "status": "str", # Optional. The current status of this garbage - collection. Known values are: "requested", "waiting for write JWTs to - expire", "scanning manifests", "deleting unreferenced blobs", "cancelling", - "failed", "succeeded", and "cancelled". - "updated_at": "2020-02-20 00:00:00", # Optional. The time the - garbage collection was last updated. - "uuid": "str" # Optional. A string specifying the UUID of the - garbage collection. + "action": { + "completed_at": "2020-02-20 00:00:00", # Optional. A time value + given in ISO8601 combined date and time format that represents when the + action was completed. + "id": 0, # Optional. A unique numeric ID that can be used to + identify and reference an action. + "project_id": "str", # Optional. The UUID of the project to which + the reserved IP currently belongs. + "region": { + "available": bool, # This is a boolean value that represents + whether new Droplets can be created in this region. Required. + "features": [ + "str" # This attribute is set to an array which + contains features available in this region. Required. + ], + "name": "str", # The display name of the region. This will + be a full name that is used in the control panel and other interfaces. + Required. + "sizes": [ + "str" # This attribute is set to an array which + contains the identifying slugs for the sizes available in this + region. sizes:read is required to view. Required. + ], + "slug": "str" # A human-readable string that is used as a + unique identifier for each region. Required. + }, + "region_slug": "str", # Optional. A human-readable string that is + used as a unique identifier for each region. + "resource_id": 0, # Optional. A unique identifier for the resource + that the action is associated with. + "resource_type": "str", # Optional. The type of resource that the + action is associated with. + "started_at": "2020-02-20 00:00:00", # Optional. A time value given + in ISO8601 combined date and time format that represents when the action was + initiated. + "status": "in-progress", # Optional. Default value is "in-progress". + The current status of the action. This can be "in-progress", "completed", or + "errored". Known values are: "in-progress", "completed", and "errored". + "type": "str" # Optional. This is the type of action that the object + represents. For example, this could be "transfer" to represent the state of + an image transfer action. } } # response body for status code(s): 404 @@ -172148,27 +182600,34 @@ async def update_garbage_collection( """ @distributed_trace_async - async def update_garbage_collection( + async def post( self, - registry_name: str, - garbage_collection_uuid: str, - body: Union[JSON, IO[bytes]], + reserved_ip: str, + body: Optional[Union[JSON, IO[bytes]]] = None, **kwargs: Any ) -> JSON: # pylint: disable=line-too-long - """Update Garbage Collection. + """Initiate a Reserved IP Action. - **Note: This endpoint is deprecated. Please use the ``/v2/registries`` endpoint instead.** + To initiate an action on a reserved IP send a POST request to + ``/v2/reserved_ips/$RESERVED_IP/actions``. In the JSON body to the request, + set the ``type`` attribute to on of the supported action types: - To cancel the currently-active garbage collection for a registry, - send a PUT request to ``/v2/registry/$REGISTRY_NAME/garbage-collection/$GC_UUID`` - and specify one or more of the attributes below. + .. list-table:: + :header-rows: 1 - :param registry_name: The name of a container registry. Required. - :type registry_name: str - :param garbage_collection_uuid: The UUID of a garbage collection run. Required. - :type garbage_collection_uuid: str - :param body: Is either a JSON type or a IO[bytes] type. Required. + * - Action + - Details + * - ``assign`` + - Assigns a reserved IP to a Droplet + * - ``unassign`` + - Unassign a reserved IP from a Droplet. + + :param reserved_ip: A reserved IP address. Required. + :type reserved_ip: str + :param body: The ``type`` attribute set in the request body will specify the action that + will be taken on the reserved IP. Is either a JSON type or a IO[bytes] type. Default value is + None. :type body: JSON or IO[bytes] :return: JSON object :rtype: JSON @@ -172178,30 +182637,51 @@ async def update_garbage_collection( .. code-block:: python # JSON input template you can fill out and use as your body input. - body = { - "cancel": bool # Optional. A boolean value indicating that the garbage - collection should be cancelled. - } + body = {} - # response body for status code(s): 200 + # response body for status code(s): 201 response == { - "garbage_collection": { - "blobs_deleted": 0, # Optional. The number of blobs deleted as a - result of this garbage collection. - "created_at": "2020-02-20 00:00:00", # Optional. The time the - garbage collection was created. - "freed_bytes": 0, # Optional. The number of bytes freed as a result - of this garbage collection. - "registry_name": "str", # Optional. The name of the container - registry. - "status": "str", # Optional. The current status of this garbage - collection. Known values are: "requested", "waiting for write JWTs to - expire", "scanning manifests", "deleting unreferenced blobs", "cancelling", - "failed", "succeeded", and "cancelled". - "updated_at": "2020-02-20 00:00:00", # Optional. The time the - garbage collection was last updated. - "uuid": "str" # Optional. A string specifying the UUID of the - garbage collection. + "action": { + "completed_at": "2020-02-20 00:00:00", # Optional. A time value + given in ISO8601 combined date and time format that represents when the + action was completed. + "id": 0, # Optional. A unique numeric ID that can be used to + identify and reference an action. + "project_id": "str", # Optional. The UUID of the project to which + the reserved IP currently belongs. + "region": { + "available": bool, # This is a boolean value that represents + whether new Droplets can be created in this region. Required. + "features": [ + "str" # This attribute is set to an array which + contains features available in this region. Required. + ], + "name": "str", # The display name of the region. This will + be a full name that is used in the control panel and other interfaces. + Required. + "sizes": [ + "str" # This attribute is set to an array which + contains the identifying slugs for the sizes available in this + region. sizes:read is required to view. Required. + ], + "slug": "str" # A human-readable string that is used as a + unique identifier for each region. Required. + }, + "region_slug": "str", # Optional. A human-readable string that is + used as a unique identifier for each region. + "resource_id": 0, # Optional. A unique identifier for the resource + that the action is associated with. + "resource_type": "str", # Optional. The type of resource that the + action is associated with. + "started_at": "2020-02-20 00:00:00", # Optional. A time value given + in ISO8601 combined date and time format that represents when the action was + initiated. + "status": "in-progress", # Optional. Default value is "in-progress". + The current status of the action. This can be "in-progress", "completed", or + "errored". Known values are: "in-progress", "completed", and "errored". + "type": "str" # Optional. This is the type of action that the object + represents. For example, this could be "transfer" to represent the state of + an image transfer action. } } # response body for status code(s): 404 @@ -172243,11 +182723,13 @@ async def update_garbage_collection( if isinstance(body, (IOBase, bytes)): _content = body else: - _json = body + if body is not None: + _json = body + else: + _json = None - _request = build_registry_update_garbage_collection_request( - registry_name=registry_name, - garbage_collection_uuid=garbage_collection_uuid, + _request = build_reserved_ips_actions_post_request( + reserved_ip=reserved_ip, content_type=content_type, json=_json, content=_content, @@ -172265,14 +182747,14 @@ async def update_garbage_collection( response = pipeline_response.http_response - if response.status_code not in [200, 404]: + if response.status_code not in [201, 404]: if _stream: await response.read() # Load the body in memory and close the socket map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore raise HttpResponseError(response=response) response_headers = {} - if response.status_code == 200: + if response.status_code == 201: response_headers["ratelimit-limit"] = self._deserialize( "int", response.headers.get("ratelimit-limit") ) @@ -172310,27 +182792,18 @@ async def update_garbage_collection( return cast(JSON, deserialized) # type: ignore @distributed_trace_async - async def get_options(self, **kwargs: Any) -> JSON: + async def get(self, reserved_ip: str, action_id: int, **kwargs: Any) -> JSON: # pylint: disable=line-too-long - """List Registry Options (Subscription Tiers and Available Regions). - - **Note: This endpoint is deprecated and may be removed in a future version. There is no - alternative.****\\ Note: This endpoint is deprecated. Please use the ``/v2/registries`` - endpoint instead.** - - This endpoint serves to provide additional information as to which option values - are available when creating a container registry. - - There are multiple subscription tiers available for container registry. Each - tier allows a different number of image repositories to be created in your - registry, and has a different amount of storage and transfer included. - - There are multiple regions available for container registry and controls - where your data is stored. + """Retrieve an Existing Reserved IP Action. - To list the available options, send a GET request to - ``/v2/registry/options``. + To retrieve the status of a reserved IP action, send a GET request to + ``/v2/reserved_ips/$RESERVED_IP/actions/$ACTION_ID``. + :param reserved_ip: A reserved IP address. Required. + :type reserved_ip: str + :param action_id: A unique numeric ID that can be used to identify and reference an action. + Required. + :type action_id: int :return: JSON object :rtype: JSON :raises ~azure.core.exceptions.HttpResponseError: @@ -172340,44 +182813,60 @@ async def get_options(self, **kwargs: Any) -> JSON: # response body for status code(s): 200 response == { - "options": { - "available_regions": [ - "str" # Optional. - ], - "subscription_tiers": [ - { - "allow_storage_overage": bool, # Optional. A boolean - indicating whether the subscription tier supports additional storage - above what is included in the base plan at an additional cost per GiB - used. - "eligibility_reasons": [ - "str" # Optional. If your account is not - eligible to use a certain subscription tier, this will include a - list of reasons that prevent you from using the tier. - ], - "eligible": bool, # Optional. A boolean indicating - whether your account it eligible to use a certain subscription tier. - "included_bandwidth_bytes": 0, # Optional. The - amount of outbound data transfer included in the subscription tier in - bytes. - "included_repositories": 0, # Optional. The number - of repositories included in the subscription tier. ``0`` indicates - that the subscription tier includes unlimited repositories. - "included_storage_bytes": 0, # Optional. The amount - of storage included in the subscription tier in bytes. - "monthly_price_in_cents": 0, # Optional. The monthly - cost of the subscription tier in cents. - "name": "str", # Optional. The name of the - subscription tier. - "slug": "str", # Optional. The slug identifier of - the subscription tier. - "storage_overage_price_in_cents": 0 # Optional. The - price paid in cents per GiB for additional storage beyond what is - included in the subscription plan. - } - ] + "action": { + "completed_at": "2020-02-20 00:00:00", # Optional. A time value + given in ISO8601 combined date and time format that represents when the + action was completed. + "id": 0, # Optional. A unique numeric ID that can be used to + identify and reference an action. + "project_id": "str", # Optional. The UUID of the project to which + the reserved IP currently belongs. + "region": { + "available": bool, # This is a boolean value that represents + whether new Droplets can be created in this region. Required. + "features": [ + "str" # This attribute is set to an array which + contains features available in this region. Required. + ], + "name": "str", # The display name of the region. This will + be a full name that is used in the control panel and other interfaces. + Required. + "sizes": [ + "str" # This attribute is set to an array which + contains the identifying slugs for the sizes available in this + region. sizes:read is required to view. Required. + ], + "slug": "str" # A human-readable string that is used as a + unique identifier for each region. Required. + }, + "region_slug": "str", # Optional. A human-readable string that is + used as a unique identifier for each region. + "resource_id": 0, # Optional. A unique identifier for the resource + that the action is associated with. + "resource_type": "str", # Optional. The type of resource that the + action is associated with. + "started_at": "2020-02-20 00:00:00", # Optional. A time value given + in ISO8601 combined date and time format that represents when the action was + initiated. + "status": "in-progress", # Optional. Default value is "in-progress". + The current status of the action. This can be "in-progress", "completed", or + "errored". Known values are: "in-progress", "completed", and "errored". + "type": "str" # Optional. This is the type of action that the object + represents. For example, this could be "transfer" to represent the state of + an image transfer action. } } + # response body for status code(s): 404 + response == { + "id": "str", # A short identifier corresponding to the HTTP status code + returned. For example, the ID for a response returning a 404 status code would + be "not_found.". Required. + "message": "str", # A message providing additional information about the + error, including details to help resolve it when possible. Required. + "request_id": "str" # Optional. Optionally, some endpoints may include a + request ID that should be provided when reporting bugs or opening support + tickets to help identify the issue. + } """ error_map: MutableMapping[int, Type[HttpResponseError]] = { 404: ResourceNotFoundError, @@ -172397,7 +182886,9 @@ async def get_options(self, **kwargs: Any) -> JSON: cls: ClsType[JSON] = kwargs.pop("cls", None) - _request = build_registry_get_options_request( + _request = build_reserved_ips_actions_get_request( + reserved_ip=reserved_ip, + action_id=action_id, headers=_headers, params=_params, ) @@ -172412,27 +182903,44 @@ async def get_options(self, **kwargs: Any) -> JSON: response = pipeline_response.http_response - if response.status_code not in [200]: + if response.status_code not in [200, 404]: if _stream: await response.read() # Load the body in memory and close the socket map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore raise HttpResponseError(response=response) response_headers = {} - response_headers["ratelimit-limit"] = self._deserialize( - "int", response.headers.get("ratelimit-limit") - ) - response_headers["ratelimit-remaining"] = self._deserialize( - "int", response.headers.get("ratelimit-remaining") - ) - response_headers["ratelimit-reset"] = self._deserialize( - "int", response.headers.get("ratelimit-reset") - ) + if response.status_code == 200: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) - if response.content: - deserialized = response.json() - else: - deserialized = None + if response.content: + deserialized = response.json() + else: + deserialized = None + + if response.status_code == 404: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.content: + deserialized = response.json() + else: + deserialized = None if cls: return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore @@ -172440,14 +182948,14 @@ async def get_options(self, **kwargs: Any) -> JSON: return cast(JSON, deserialized) # type: ignore -class ReservedIPsOperations: +class ReservedIPv6Operations: """ .. warning:: **DO NOT** instantiate this class directly. Instead, you should access the following operations through :class:`~pydo.aio.GeneratedClient`'s - :attr:`reserved_ips` attribute. + :attr:`reserved_ipv6` attribute. """ def __init__(self, *args, **kwargs) -> None: @@ -172462,10 +182970,10 @@ def __init__(self, *args, **kwargs) -> None: @distributed_trace_async async def list(self, *, per_page: int = 20, page: int = 1, **kwargs: Any) -> JSON: # pylint: disable=line-too-long - """List All Reserved IPs. + """List All Reserved IPv6s. - To list all of the reserved IPs available on your account, send a GET request to - ``/v2/reserved_ips``. + To list all of the reserved IPv6s available on your account, send a GET request to + ``/v2/reserved_ipv6``. :keyword per_page: Number of items returned per page. Default value is 20. :paramtype per_page: int @@ -172486,36 +182994,15 @@ async def list(self, *, per_page: int = 20, page: int = 1, **kwargs: Any) -> JSO "links": { "pages": {} }, - "reserved_ips": [ + "reserved_ipv6s": [ { "droplet": {}, "ip": "str", # Optional. The public IP address of the - reserved IP. It also serves as its identifier. - "locked": bool, # Optional. A boolean value indicating - whether or not the reserved IP has pending actions preventing new ones - from being submitted. - "project_id": "str", # Optional. The UUID of the project to - which the reserved IP currently belongs.:code:`
`:code:`
`Requires - ``project:read`` scope. - "region": { - "available": bool, # This is a boolean value that - represents whether new Droplets can be created in this region. - Required. - "features": [ - "str" # This attribute is set to an array - which contains features available in this region. Required. - ], - "name": "str", # The display name of the region. - This will be a full name that is used in the control panel and other - interfaces. Required. - "sizes": [ - "str" # This attribute is set to an array - which contains the identifying slugs for the sizes available in - this region. sizes:read is required to view. Required. - ], - "slug": "str" # A human-readable string that is used - as a unique identifier for each region. Required. - } + reserved IPv6. It also serves as its identifier. + "region_slug": "str", # Optional. The region that the + reserved IPv6 is reserved to. When you query a reserved IPv6,the + region_slug will be returned. + "reserved_at": "2020-02-20 00:00:00" # Optional. } ] } @@ -172538,7 +183025,7 @@ async def list(self, *, per_page: int = 20, page: int = 1, **kwargs: Any) -> JSO cls: ClsType[JSON] = kwargs.pop("cls", None) - _request = build_reserved_ips_list_request( + _request = build_reserved_ipv6_list_request( per_page=per_page, page=page, headers=_headers, @@ -172587,18 +183074,13 @@ async def create( self, body: JSON, *, content_type: str = "application/json", **kwargs: Any ) -> JSON: # pylint: disable=line-too-long - """Create a New Reserved IP. - - On creation, a reserved IP must be either assigned to a Droplet or reserved to a region. + """Create a New Reserved IPv6. + On creation, a reserved IPv6 must be reserved to a region. - * - To create a new reserved IP assigned to a Droplet, send a POST - request to ``/v2/reserved_ips`` with the ``droplet_id`` attribute. - * - To create a new reserved IP reserved to a region, send a POST request to - ``/v2/reserved_ips`` with the ``region`` attribute. + * To create a new reserved IPv6 reserved to a region, send a POST request to + ``/v2/reserved_ipv6`` with the ``region_slug`` attribute. :param body: Required. :type body: JSON @@ -172613,60 +183095,20 @@ async def create( .. code-block:: python # JSON input template you can fill out and use as your body input. - body = {} + body = { + "region_slug": "str" # The slug identifier for the region the reserved IPv6 + will be reserved to. Required. + } - # response body for status code(s): 202 + # response body for status code(s): 201 response == { - "links": { - "actions": [ - { - "href": "str", # Optional. A URL that can be used to - access the action. - "id": 0, # Optional. A unique numeric ID that can be - used to identify and reference an action. - "rel": "str" # Optional. A string specifying the - type of the related action. - } - ], - "droplets": [ - { - "href": "str", # Optional. A URL that can be used to - access the action. - "id": 0, # Optional. A unique numeric ID that can be - used to identify and reference an action. - "rel": "str" # Optional. A string specifying the - type of the related action. - } - ] - }, - "reserved_ip": { - "droplet": {}, - "ip": "str", # Optional. The public IP address of the reserved IP. + "reserved_ipv6": { + "ip": "str", # Optional. The public IP address of the reserved IPv6. It also serves as its identifier. - "locked": bool, # Optional. A boolean value indicating whether or - not the reserved IP has pending actions preventing new ones from being - submitted. - "project_id": "str", # Optional. The UUID of the project to which - the reserved IP currently belongs.:code:`
`:code:`
`Requires - ``project:read`` scope. - "region": { - "available": bool, # This is a boolean value that represents - whether new Droplets can be created in this region. Required. - "features": [ - "str" # This attribute is set to an array which - contains features available in this region. Required. - ], - "name": "str", # The display name of the region. This will - be a full name that is used in the control panel and other interfaces. - Required. - "sizes": [ - "str" # This attribute is set to an array which - contains the identifying slugs for the sizes available in this - region. sizes:read is required to view. Required. - ], - "slug": "str" # A human-readable string that is used as a - unique identifier for each region. Required. - } + "region_slug": "str", # Optional. The region that the reserved IPv6 + is reserved to. When you query a reserved IPv6,the region_slug will be + returned. + "reserved_at": "2020-02-20 00:00:00" # Optional. } } """ @@ -172676,18 +183118,13 @@ async def create( self, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any ) -> JSON: # pylint: disable=line-too-long - """Create a New Reserved IP. - - On creation, a reserved IP must be either assigned to a Droplet or reserved to a region. + """Create a New Reserved IPv6. + On creation, a reserved IPv6 must be reserved to a region. - * - To create a new reserved IP assigned to a Droplet, send a POST - request to ``/v2/reserved_ips`` with the ``droplet_id`` attribute. - * - To create a new reserved IP reserved to a region, send a POST request to - ``/v2/reserved_ips`` with the ``region`` attribute. + * To create a new reserved IPv6 reserved to a region, send a POST request to + ``/v2/reserved_ipv6`` with the ``region_slug`` attribute. :param body: Required. :type body: IO[bytes] @@ -172701,58 +183138,15 @@ async def create( Example: .. code-block:: python - # response body for status code(s): 202 + # response body for status code(s): 201 response == { - "links": { - "actions": [ - { - "href": "str", # Optional. A URL that can be used to - access the action. - "id": 0, # Optional. A unique numeric ID that can be - used to identify and reference an action. - "rel": "str" # Optional. A string specifying the - type of the related action. - } - ], - "droplets": [ - { - "href": "str", # Optional. A URL that can be used to - access the action. - "id": 0, # Optional. A unique numeric ID that can be - used to identify and reference an action. - "rel": "str" # Optional. A string specifying the - type of the related action. - } - ] - }, - "reserved_ip": { - "droplet": {}, - "ip": "str", # Optional. The public IP address of the reserved IP. + "reserved_ipv6": { + "ip": "str", # Optional. The public IP address of the reserved IPv6. It also serves as its identifier. - "locked": bool, # Optional. A boolean value indicating whether or - not the reserved IP has pending actions preventing new ones from being - submitted. - "project_id": "str", # Optional. The UUID of the project to which - the reserved IP currently belongs.:code:`
`:code:`
`Requires - ``project:read`` scope. - "region": { - "available": bool, # This is a boolean value that represents - whether new Droplets can be created in this region. Required. - "features": [ - "str" # This attribute is set to an array which - contains features available in this region. Required. - ], - "name": "str", # The display name of the region. This will - be a full name that is used in the control panel and other interfaces. - Required. - "sizes": [ - "str" # This attribute is set to an array which - contains the identifying slugs for the sizes available in this - region. sizes:read is required to view. Required. - ], - "slug": "str" # A human-readable string that is used as a - unique identifier for each region. Required. - } + "region_slug": "str", # Optional. The region that the reserved IPv6 + is reserved to. When you query a reserved IPv6,the region_slug will be + returned. + "reserved_at": "2020-02-20 00:00:00" # Optional. } } """ @@ -172760,18 +183154,13 @@ async def create( @distributed_trace_async async def create(self, body: Union[JSON, IO[bytes]], **kwargs: Any) -> JSON: # pylint: disable=line-too-long - """Create a New Reserved IP. - - On creation, a reserved IP must be either assigned to a Droplet or reserved to a region. + """Create a New Reserved IPv6. + On creation, a reserved IPv6 must be reserved to a region. - * - To create a new reserved IP assigned to a Droplet, send a POST - request to ``/v2/reserved_ips`` with the ``droplet_id`` attribute. - * - To create a new reserved IP reserved to a region, send a POST request to - ``/v2/reserved_ips`` with the ``region`` attribute. + * To create a new reserved IPv6 reserved to a region, send a POST request to + ``/v2/reserved_ipv6`` with the ``region_slug`` attribute. :param body: Is either a JSON type or a IO[bytes] type. Required. :type body: JSON or IO[bytes] @@ -172783,60 +183172,20 @@ async def create(self, body: Union[JSON, IO[bytes]], **kwargs: Any) -> JSON: .. code-block:: python # JSON input template you can fill out and use as your body input. - body = {} + body = { + "region_slug": "str" # The slug identifier for the region the reserved IPv6 + will be reserved to. Required. + } - # response body for status code(s): 202 + # response body for status code(s): 201 response == { - "links": { - "actions": [ - { - "href": "str", # Optional. A URL that can be used to - access the action. - "id": 0, # Optional. A unique numeric ID that can be - used to identify and reference an action. - "rel": "str" # Optional. A string specifying the - type of the related action. - } - ], - "droplets": [ - { - "href": "str", # Optional. A URL that can be used to - access the action. - "id": 0, # Optional. A unique numeric ID that can be - used to identify and reference an action. - "rel": "str" # Optional. A string specifying the - type of the related action. - } - ] - }, - "reserved_ip": { - "droplet": {}, - "ip": "str", # Optional. The public IP address of the reserved IP. + "reserved_ipv6": { + "ip": "str", # Optional. The public IP address of the reserved IPv6. It also serves as its identifier. - "locked": bool, # Optional. A boolean value indicating whether or - not the reserved IP has pending actions preventing new ones from being - submitted. - "project_id": "str", # Optional. The UUID of the project to which - the reserved IP currently belongs.:code:`
`:code:`
`Requires - ``project:read`` scope. - "region": { - "available": bool, # This is a boolean value that represents - whether new Droplets can be created in this region. Required. - "features": [ - "str" # This attribute is set to an array which - contains features available in this region. Required. - ], - "name": "str", # The display name of the region. This will - be a full name that is used in the control panel and other interfaces. - Required. - "sizes": [ - "str" # This attribute is set to an array which - contains the identifying slugs for the sizes available in this - region. sizes:read is required to view. Required. - ], - "slug": "str" # A human-readable string that is used as a - unique identifier for each region. Required. - } + "region_slug": "str", # Optional. The region that the reserved IPv6 + is reserved to. When you query a reserved IPv6,the region_slug will be + returned. + "reserved_at": "2020-02-20 00:00:00" # Optional. } } """ @@ -172869,7 +183218,7 @@ async def create(self, body: Union[JSON, IO[bytes]], **kwargs: Any) -> JSON: else: _json = body - _request = build_reserved_ips_create_request( + _request = build_reserved_ipv6_create_request( content_type=content_type, json=_json, content=_content, @@ -172887,7 +183236,7 @@ async def create(self, body: Union[JSON, IO[bytes]], **kwargs: Any) -> JSON: response = pipeline_response.http_response - if response.status_code not in [202]: + if response.status_code not in [201]: if _stream: await response.read() # Load the body in memory and close the socket map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore @@ -172915,15 +183264,15 @@ async def create(self, body: Union[JSON, IO[bytes]], **kwargs: Any) -> JSON: return cast(JSON, deserialized) # type: ignore @distributed_trace_async - async def get(self, reserved_ip: str, **kwargs: Any) -> JSON: + async def get(self, reserved_ipv6: str, **kwargs: Any) -> JSON: # pylint: disable=line-too-long - """Retrieve an Existing Reserved IP. + """Retrieve an Existing Reserved IPv6. - To show information about a reserved IP, send a GET request to - ``/v2/reserved_ips/$RESERVED_IP_ADDR``. + To show information about a reserved IPv6, send a GET request to + ``/v2/reserved_ipv6/$RESERVED_IPV6``. - :param reserved_ip: A reserved IP address. Required. - :type reserved_ip: str + :param reserved_ipv6: A reserved IPv6 address. Required. + :type reserved_ipv6: str :return: JSON object :rtype: JSON :raises ~azure.core.exceptions.HttpResponseError: @@ -172933,34 +183282,15 @@ async def get(self, reserved_ip: str, **kwargs: Any) -> JSON: # response body for status code(s): 200 response == { - "reserved_ip": { + "reserved_ipv6": { "droplet": {}, - "ip": "str", # Optional. The public IP address of the reserved IP. + "ip": "str", # Optional. The public IP address of the reserved IPv6. It also serves as its identifier. - "locked": bool, # Optional. A boolean value indicating whether or - not the reserved IP has pending actions preventing new ones from being - submitted. - "project_id": "str", # Optional. The UUID of the project to which - the reserved IP currently belongs.:code:`
`:code:`
`Requires - ``project:read`` scope. - "region": { - "available": bool, # This is a boolean value that represents - whether new Droplets can be created in this region. Required. - "features": [ - "str" # This attribute is set to an array which - contains features available in this region. Required. - ], - "name": "str", # The display name of the region. This will - be a full name that is used in the control panel and other interfaces. - Required. - "sizes": [ - "str" # This attribute is set to an array which - contains the identifying slugs for the sizes available in this - region. sizes:read is required to view. Required. - ], - "slug": "str" # A human-readable string that is used as a - unique identifier for each region. Required. - } + "region_slug": "str", # Optional. The region that the reserved IPv6 + is reserved to. When you query a reserved IPv6,the region_slug will be + returned. + "reserved_at": "2020-02-20 00:00:00" # Optional. The date and time + when the reserved IPv6 was reserved. } } # response body for status code(s): 404 @@ -172993,8 +183323,8 @@ async def get(self, reserved_ip: str, **kwargs: Any) -> JSON: cls: ClsType[JSON] = kwargs.pop("cls", None) - _request = build_reserved_ips_get_request( - reserved_ip=reserved_ip, + _request = build_reserved_ipv6_get_request( + reserved_ipv6=reserved_ipv6, headers=_headers, params=_params, ) @@ -173054,18 +183384,18 @@ async def get(self, reserved_ip: str, **kwargs: Any) -> JSON: return cast(JSON, deserialized) # type: ignore @distributed_trace_async - async def delete(self, reserved_ip: str, **kwargs: Any) -> Optional[JSON]: + async def delete(self, reserved_ipv6: str, **kwargs: Any) -> Optional[JSON]: # pylint: disable=line-too-long - """Delete a Reserved IP. + """Delete a Reserved IPv6. To delete a reserved IP and remove it from your account, send a DELETE request - to ``/v2/reserved_ips/$RESERVED_IP_ADDR``. + to ``/v2/reserved_ipv6/$RESERVED_IPV6``. A successful request will receive a 204 status code with no body in response. This indicates that the request was processed successfully. - :param reserved_ip: A reserved IP address. Required. - :type reserved_ip: str + :param reserved_ipv6: A reserved IPv6 address. Required. + :type reserved_ipv6: str :return: JSON object or None :rtype: JSON or None :raises ~azure.core.exceptions.HttpResponseError: @@ -173073,7 +183403,7 @@ async def delete(self, reserved_ip: str, **kwargs: Any) -> Optional[JSON]: Example: .. code-block:: python - # response body for status code(s): 404 + # response body for status code(s): 404, 422 response == { "id": "str", # A short identifier corresponding to the HTTP status code returned. For example, the ID for a response returning a 404 status code would @@ -173103,8 +183433,8 @@ async def delete(self, reserved_ip: str, **kwargs: Any) -> Optional[JSON]: cls: ClsType[Optional[JSON]] = kwargs.pop("cls", None) - _request = build_reserved_ips_delete_request( - reserved_ip=reserved_ip, + _request = build_reserved_ipv6_delete_request( + reserved_ipv6=reserved_ipv6, headers=_headers, params=_params, ) @@ -173119,7 +183449,7 @@ async def delete(self, reserved_ip: str, **kwargs: Any) -> Optional[JSON]: response = pipeline_response.http_response - if response.status_code not in [204, 404]: + if response.status_code not in [204, 404, 422]: if _stream: await response.read() # Load the body in memory and close the socket map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore @@ -173154,20 +183484,36 @@ async def delete(self, reserved_ip: str, **kwargs: Any) -> Optional[JSON]: else: deserialized = None + if response.status_code == 422: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.content: + deserialized = response.json() + else: + deserialized = None + if cls: return cls(pipeline_response, deserialized, response_headers) # type: ignore return deserialized # type: ignore -class ReservedIPsActionsOperations: +class ReservedIPv6ActionsOperations: """ .. warning:: **DO NOT** instantiate this class directly. Instead, you should access the following operations through :class:`~pydo.aio.GeneratedClient`'s - :attr:`reserved_ips_actions` attribute. + :attr:`reserved_ipv6_actions` attribute. """ def __init__(self, *args, **kwargs) -> None: @@ -173179,16 +183525,40 @@ def __init__(self, *args, **kwargs) -> None: input_args.pop(0) if input_args else kwargs.pop("deserializer") ) - @distributed_trace_async - async def list(self, reserved_ip: str, **kwargs: Any) -> JSON: + @overload + async def post( + self, + reserved_ipv6: str, + body: Optional[JSON] = None, + *, + content_type: str = "application/json", + **kwargs: Any + ) -> JSON: # pylint: disable=line-too-long - """List All Actions for a Reserved IP. + """Initiate a Reserved IPv6 Action. - To retrieve all actions that have been executed on a reserved IP, send a GET request to - ``/v2/reserved_ips/$RESERVED_IP/actions``. + To initiate an action on a reserved IPv6 send a POST request to + ``/v2/reserved_ipv6/$RESERVED_IPV6/actions``. In the JSON body to the request, + set the ``type`` attribute to on of the supported action types: - :param reserved_ip: A reserved IP address. Required. - :type reserved_ip: str + .. list-table:: + :header-rows: 1 + + * - Action + - Details + * - ``assign`` + - Assigns a reserved IPv6 to a Droplet + * - ``unassign`` + - Unassign a reserved IPv6 from a Droplet. + + :param reserved_ipv6: A reserved IPv6 address. Required. + :type reserved_ipv6: str + :param body: The ``type`` attribute set in the request body will specify the action that + will be taken on the reserved IPv6. Default value is None. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str :return: JSON object :rtype: JSON :raises ~azure.core.exceptions.HttpResponseError: @@ -173196,57 +183566,50 @@ async def list(self, reserved_ip: str, **kwargs: Any) -> JSON: Example: .. code-block:: python - # response body for status code(s): 200 + # JSON input template you can fill out and use as your body input. + body = {} + + # response body for status code(s): 201 response == { - "meta": { - "total": 0 # Optional. Number of objects returned by the request. - }, - "actions": [ - { - "completed_at": "2020-02-20 00:00:00", # Optional. A time - value given in ISO8601 combined date and time format that represents when - the action was completed. - "id": 0, # Optional. A unique numeric ID that can be used to - identify and reference an action. - "region": { - "available": bool, # This is a boolean value that - represents whether new Droplets can be created in this region. - Required. - "features": [ - "str" # This attribute is set to an array - which contains features available in this region. Required. - ], - "name": "str", # The display name of the region. - This will be a full name that is used in the control panel and other - interfaces. Required. - "sizes": [ - "str" # This attribute is set to an array - which contains the identifying slugs for the sizes available in - this region. sizes:read is required to view. Required. - ], - "slug": "str" # A human-readable string that is used - as a unique identifier for each region. Required. - }, - "region_slug": "str", # Optional. A human-readable string - that is used as a unique identifier for each region. - "resource_id": 0, # Optional. A unique identifier for the - resource that the action is associated with. - "resource_type": "str", # Optional. The type of resource - that the action is associated with. - "started_at": "2020-02-20 00:00:00", # Optional. A time - value given in ISO8601 combined date and time format that represents when - the action was initiated. - "status": "in-progress", # Optional. Default value is - "in-progress". The current status of the action. This can be - "in-progress", "completed", or "errored". Known values are: - "in-progress", "completed", and "errored". - "type": "str" # Optional. This is the type of action that - the object represents. For example, this could be "transfer" to represent - the state of an image transfer action. - } - ], - "links": { - "pages": {} + "action": { + "completed_at": "2020-02-20 00:00:00", # Optional. A time value + given in ISO8601 combined date and time format that represents when the + action was completed. + "id": 0, # Optional. A unique numeric ID that can be used to + identify and reference an action. + "region": { + "available": bool, # This is a boolean value that represents + whether new Droplets can be created in this region. Required. + "features": [ + "str" # This attribute is set to an array which + contains features available in this region. Required. + ], + "name": "str", # The display name of the region. This will + be a full name that is used in the control panel and other interfaces. + Required. + "sizes": [ + "str" # This attribute is set to an array which + contains the identifying slugs for the sizes available in this + region. sizes:read is required to view. Required. + ], + "slug": "str" # A human-readable string that is used as a + unique identifier for each region. Required. + }, + "region_slug": "str", # Optional. A human-readable string that is + used as a unique identifier for each region. + "resource_id": 0, # Optional. A unique identifier for the resource + that the action is associated with. + "resource_type": "str", # Optional. The type of resource that the + action is associated with. + "started_at": "2020-02-20 00:00:00", # Optional. A time value given + in ISO8601 combined date and time format that represents when the action was + initiated. + "status": "in-progress", # Optional. Default value is "in-progress". + The current status of the action. This can be "in-progress", "completed", or + "errored". Known values are: "in-progress", "completed", and "errored". + "type": "str" # Optional. This is the type of action that the object + represents. For example, this could be "transfer" to represent the state of + an image transfer action. } } # response body for status code(s): 404 @@ -173261,98 +183624,21 @@ async def list(self, reserved_ip: str, **kwargs: Any) -> JSON: tickets to help identify the issue. } """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - 401: cast( - Type[HttpResponseError], - lambda response: ClientAuthenticationError(response=response), - ), - 429: HttpResponseError, - 500: HttpResponseError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = kwargs.pop("headers", {}) or {} - _params = kwargs.pop("params", {}) or {} - - cls: ClsType[JSON] = kwargs.pop("cls", None) - - _request = build_reserved_ips_actions_list_request( - reserved_ip=reserved_ip, - headers=_headers, - params=_params, - ) - _request.url = self._client.format_url(_request.url) - - _stream = False - pipeline_response: PipelineResponse = ( - await self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - ) - - response = pipeline_response.http_response - - if response.status_code not in [200, 404]: - if _stream: - await response.read() # Load the body in memory and close the socket - map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore - raise HttpResponseError(response=response) - - response_headers = {} - if response.status_code == 200: - response_headers["ratelimit-limit"] = self._deserialize( - "int", response.headers.get("ratelimit-limit") - ) - response_headers["ratelimit-remaining"] = self._deserialize( - "int", response.headers.get("ratelimit-remaining") - ) - response_headers["ratelimit-reset"] = self._deserialize( - "int", response.headers.get("ratelimit-reset") - ) - - if response.content: - deserialized = response.json() - else: - deserialized = None - - if response.status_code == 404: - response_headers["ratelimit-limit"] = self._deserialize( - "int", response.headers.get("ratelimit-limit") - ) - response_headers["ratelimit-remaining"] = self._deserialize( - "int", response.headers.get("ratelimit-remaining") - ) - response_headers["ratelimit-reset"] = self._deserialize( - "int", response.headers.get("ratelimit-reset") - ) - - if response.content: - deserialized = response.json() - else: - deserialized = None - - if cls: - return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore - - return cast(JSON, deserialized) # type: ignore @overload async def post( self, - reserved_ip: str, - body: Optional[JSON] = None, + reserved_ipv6: str, + body: Optional[IO[bytes]] = None, *, content_type: str = "application/json", **kwargs: Any ) -> JSON: # pylint: disable=line-too-long - """Initiate a Reserved IP Action. + """Initiate a Reserved IPv6 Action. - To initiate an action on a reserved IP send a POST request to - ``/v2/reserved_ips/$RESERVED_IP/actions``. In the JSON body to the request, + To initiate an action on a reserved IPv6 send a POST request to + ``/v2/reserved_ipv6/$RESERVED_IPV6/actions``. In the JSON body to the request, set the ``type`` attribute to on of the supported action types: .. list-table:: @@ -173361,16 +183647,16 @@ async def post( * - Action - Details * - ``assign`` - - Assigns a reserved IP to a Droplet + - Assigns a reserved IPv6 to a Droplet * - ``unassign`` - - Unassign a reserved IP from a Droplet. + - Unassign a reserved IPv6 from a Droplet. - :param reserved_ip: A reserved IP address. Required. - :type reserved_ip: str + :param reserved_ipv6: A reserved IPv6 address. Required. + :type reserved_ipv6: str :param body: The ``type`` attribute set in the request body will specify the action that - will be taken on the reserved IP. Default value is None. - :type body: JSON - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + will be taken on the reserved IPv6. Default value is None. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. Default value is "application/json". :paramtype content_type: str :return: JSON object @@ -173380,9 +183666,6 @@ async def post( Example: .. code-block:: python - # JSON input template you can fill out and use as your body input. - body = {} - # response body for status code(s): 201 response == { "action": { @@ -173391,8 +183674,6 @@ async def post( action was completed. "id": 0, # Optional. A unique numeric ID that can be used to identify and reference an action. - "project_id": "str", # Optional. The UUID of the project to which - the reserved IP currently belongs. "region": { "available": bool, # This is a boolean value that represents whether new Droplets can be created in this region. Required. @@ -173441,20 +183722,18 @@ async def post( } """ - @overload + @distributed_trace_async async def post( self, - reserved_ip: str, - body: Optional[IO[bytes]] = None, - *, - content_type: str = "application/json", + reserved_ipv6: str, + body: Optional[Union[JSON, IO[bytes]]] = None, **kwargs: Any ) -> JSON: # pylint: disable=line-too-long - """Initiate a Reserved IP Action. + """Initiate a Reserved IPv6 Action. - To initiate an action on a reserved IP send a POST request to - ``/v2/reserved_ips/$RESERVED_IP/actions``. In the JSON body to the request, + To initiate an action on a reserved IPv6 send a POST request to + ``/v2/reserved_ipv6/$RESERVED_IPV6/actions``. In the JSON body to the request, set the ``type`` attribute to on of the supported action types: .. list-table:: @@ -173463,18 +183742,16 @@ async def post( * - Action - Details * - ``assign`` - - Assigns a reserved IP to a Droplet + - Assigns a reserved IPv6 to a Droplet * - ``unassign`` - - Unassign a reserved IP from a Droplet. + - Unassign a reserved IPv6 from a Droplet. - :param reserved_ip: A reserved IP address. Required. - :type reserved_ip: str + :param reserved_ipv6: A reserved IPv6 address. Required. + :type reserved_ipv6: str :param body: The ``type`` attribute set in the request body will specify the action that - will be taken on the reserved IP. Default value is None. - :type body: IO[bytes] - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str + will be taken on the reserved IPv6. Is either a JSON type or a IO[bytes] type. Default value + is None. + :type body: JSON or IO[bytes] :return: JSON object :rtype: JSON :raises ~azure.core.exceptions.HttpResponseError: @@ -173482,6 +183759,9 @@ async def post( Example: .. code-block:: python + # JSON input template you can fill out and use as your body input. + body = {} + # response body for status code(s): 201 response == { "action": { @@ -173490,8 +183770,6 @@ async def post( action was completed. "id": 0, # Optional. A unique numeric ID that can be used to identify and reference an action. - "project_id": "str", # Optional. The UUID of the project to which - the reserved IP currently belongs. "region": { "available": bool, # This is a boolean value that represents whether new Droplets can be created in this region. Required. @@ -173527,7 +183805,215 @@ async def post( an image transfer action. } } - # response body for status code(s): 404 + # response body for status code(s): 404 + response == { + "id": "str", # A short identifier corresponding to the HTTP status code + returned. For example, the ID for a response returning a 404 status code would + be "not_found.". Required. + "message": "str", # A message providing additional information about the + error, including details to help resolve it when possible. Required. + "request_id": "str" # Optional. Optionally, some endpoints may include a + request ID that should be provided when reporting bugs or opening support + tickets to help identify the issue. + } + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + 401: cast( + Type[HttpResponseError], + lambda response: ClientAuthenticationError(response=response), + ), + 429: HttpResponseError, + 500: HttpResponseError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop( + "content_type", _headers.pop("Content-Type", None) + ) + cls: ClsType[JSON] = kwargs.pop("cls", None) + + content_type = content_type or "application/json" + _json = None + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + if body is not None: + _json = body + else: + _json = None + + _request = build_reserved_ipv6_actions_post_request( + reserved_ipv6=reserved_ipv6, + content_type=content_type, + json=_json, + content=_content, + headers=_headers, + params=_params, + ) + _request.url = self._client.format_url(_request.url) + + _stream = False + pipeline_response: PipelineResponse = ( + await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + ) + + response = pipeline_response.http_response + + if response.status_code not in [201, 404]: + if _stream: + await response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore + raise HttpResponseError(response=response) + + response_headers = {} + if response.status_code == 201: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.content: + deserialized = response.json() + else: + deserialized = None + + if response.status_code == 404: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.content: + deserialized = response.json() + else: + deserialized = None + + if cls: + return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore + + return cast(JSON, deserialized) # type: ignore + + +class ByoipPrefixesOperations: + """ + .. warning:: + **DO NOT** instantiate this class directly. + + Instead, you should access the following operations through + :class:`~pydo.aio.GeneratedClient`'s + :attr:`byoip_prefixes` attribute. + """ + + def __init__(self, *args, **kwargs) -> None: + input_args = list(args) + self._client = input_args.pop(0) if input_args else kwargs.pop("client") + self._config = input_args.pop(0) if input_args else kwargs.pop("config") + self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer") + self._deserialize = ( + input_args.pop(0) if input_args else kwargs.pop("deserializer") + ) + + @overload + async def create( + self, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> JSON: + # pylint: disable=line-too-long + """Create a BYOIP Prefix. + + To create a BYOIP prefix, send a POST request to ``/v2/byoip_prefixes``. + + A successful request will initiate the process of bringing your BYOIP Prefix into your account. + The response will include the details of the created prefix, including its UUID and status. + + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: JSON object + :rtype: JSON + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "prefix": "str", # The IP prefix in CIDR notation to bring. Required. + "region": "str", # The region where the prefix will be created. Required. + "signature": "str" # The signature hash for the prefix creation request. + Required. + } + + # response body for status code(s): 202 + response == { + "region": "str", # Optional. The region where the prefix is created. + "status": "str", # Optional. The status of the BYOIP prefix. + "uuid": "str" # Optional. The unique identifier for the BYOIP prefix. + } + # response body for status code(s): 422 + response == { + "id": "str", # A short identifier corresponding to the HTTP status code + returned. For example, the ID for a response returning a 404 status code would + be "not_found.". Required. + "message": "str", # A message providing additional information about the + error, including details to help resolve it when possible. Required. + "request_id": "str" # Optional. Optionally, some endpoints may include a + request ID that should be provided when reporting bugs or opening support + tickets to help identify the issue. + } + """ + + @overload + async def create( + self, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> JSON: + # pylint: disable=line-too-long + """Create a BYOIP Prefix. + + To create a BYOIP prefix, send a POST request to ``/v2/byoip_prefixes``. + + A successful request will initiate the process of bringing your BYOIP Prefix into your account. + The response will include the details of the created prefix, including its UUID and status. + + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: JSON object + :rtype: JSON + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 202 + response == { + "region": "str", # Optional. The region where the prefix is created. + "status": "str", # Optional. The status of the BYOIP prefix. + "uuid": "str" # Optional. The unique identifier for the BYOIP prefix. + } + # response body for status code(s): 422 response == { "id": "str", # A short identifier corresponding to the HTTP status code returned. For example, the ID for a response returning a 404 status code would @@ -173541,34 +184027,16 @@ async def post( """ @distributed_trace_async - async def post( - self, - reserved_ip: str, - body: Optional[Union[JSON, IO[bytes]]] = None, - **kwargs: Any - ) -> JSON: + async def create(self, body: Union[JSON, IO[bytes]], **kwargs: Any) -> JSON: # pylint: disable=line-too-long - """Initiate a Reserved IP Action. - - To initiate an action on a reserved IP send a POST request to - ``/v2/reserved_ips/$RESERVED_IP/actions``. In the JSON body to the request, - set the ``type`` attribute to on of the supported action types: + """Create a BYOIP Prefix. - .. list-table:: - :header-rows: 1 + To create a BYOIP prefix, send a POST request to ``/v2/byoip_prefixes``. - * - Action - - Details - * - ``assign`` - - Assigns a reserved IP to a Droplet - * - ``unassign`` - - Unassign a reserved IP from a Droplet. + A successful request will initiate the process of bringing your BYOIP Prefix into your account. + The response will include the details of the created prefix, including its UUID and status. - :param reserved_ip: A reserved IP address. Required. - :type reserved_ip: str - :param body: The ``type`` attribute set in the request body will specify the action that - will be taken on the reserved IP. Is either a JSON type or a IO[bytes] type. Default value is - None. + :param body: Is either a JSON type or a IO[bytes] type. Required. :type body: JSON or IO[bytes] :return: JSON object :rtype: JSON @@ -173578,54 +184046,20 @@ async def post( .. code-block:: python # JSON input template you can fill out and use as your body input. - body = {} + body = { + "prefix": "str", # The IP prefix in CIDR notation to bring. Required. + "region": "str", # The region where the prefix will be created. Required. + "signature": "str" # The signature hash for the prefix creation request. + Required. + } - # response body for status code(s): 201 + # response body for status code(s): 202 response == { - "action": { - "completed_at": "2020-02-20 00:00:00", # Optional. A time value - given in ISO8601 combined date and time format that represents when the - action was completed. - "id": 0, # Optional. A unique numeric ID that can be used to - identify and reference an action. - "project_id": "str", # Optional. The UUID of the project to which - the reserved IP currently belongs. - "region": { - "available": bool, # This is a boolean value that represents - whether new Droplets can be created in this region. Required. - "features": [ - "str" # This attribute is set to an array which - contains features available in this region. Required. - ], - "name": "str", # The display name of the region. This will - be a full name that is used in the control panel and other interfaces. - Required. - "sizes": [ - "str" # This attribute is set to an array which - contains the identifying slugs for the sizes available in this - region. sizes:read is required to view. Required. - ], - "slug": "str" # A human-readable string that is used as a - unique identifier for each region. Required. - }, - "region_slug": "str", # Optional. A human-readable string that is - used as a unique identifier for each region. - "resource_id": 0, # Optional. A unique identifier for the resource - that the action is associated with. - "resource_type": "str", # Optional. The type of resource that the - action is associated with. - "started_at": "2020-02-20 00:00:00", # Optional. A time value given - in ISO8601 combined date and time format that represents when the action was - initiated. - "status": "in-progress", # Optional. Default value is "in-progress". - The current status of the action. This can be "in-progress", "completed", or - "errored". Known values are: "in-progress", "completed", and "errored". - "type": "str" # Optional. This is the type of action that the object - represents. For example, this could be "transfer" to represent the state of - an image transfer action. - } + "region": "str", # Optional. The region where the prefix is created. + "status": "str", # Optional. The status of the BYOIP prefix. + "uuid": "str" # Optional. The unique identifier for the BYOIP prefix. } - # response body for status code(s): 404 + # response body for status code(s): 422 response == { "id": "str", # A short identifier corresponding to the HTTP status code returned. For example, the ID for a response returning a 404 status code would @@ -173664,13 +184098,9 @@ async def post( if isinstance(body, (IOBase, bytes)): _content = body else: - if body is not None: - _json = body - else: - _json = None + _json = body - _request = build_reserved_ips_actions_post_request( - reserved_ip=reserved_ip, + _request = build_byoip_prefixes_create_request( content_type=content_type, json=_json, content=_content, @@ -173688,14 +184118,14 @@ async def post( response = pipeline_response.http_response - if response.status_code not in [201, 404]: + if response.status_code not in [202, 422]: if _stream: await response.read() # Load the body in memory and close the socket map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore raise HttpResponseError(response=response) response_headers = {} - if response.status_code == 201: + if response.status_code == 202: response_headers["ratelimit-limit"] = self._deserialize( "int", response.headers.get("ratelimit-limit") ) @@ -173711,7 +184141,7 @@ async def post( else: deserialized = None - if response.status_code == 404: + if response.status_code == 422: response_headers["ratelimit-limit"] = self._deserialize( "int", response.headers.get("ratelimit-limit") ) @@ -173733,18 +184163,135 @@ async def post( return cast(JSON, deserialized) # type: ignore @distributed_trace_async - async def get(self, reserved_ip: str, action_id: int, **kwargs: Any) -> JSON: + async def list(self, *, per_page: int = 20, page: int = 1, **kwargs: Any) -> JSON: + """List BYOIP Prefixes. + + To list all BYOIP prefixes, send a GET request to ``/v2/byoip_prefixes``. + A successful response will return a list of all BYOIP prefixes associated with the account. + + :keyword per_page: Number of items returned per page. Default value is 20. + :paramtype per_page: int + :keyword page: Which 'page' of paginated results to return. Default value is 1. + :paramtype page: int + :return: JSON object + :rtype: JSON + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 200 + response == { + "meta": { + "total": 0 # Optional. Number of objects returned by the request. + }, + "byoip_prefixes": [ + { + "advertised": bool, # Optional. Whether the BYOIP prefix is + being advertised. + "failure_reason": "str", # Optional. Reason for failure, if + applicable. + "locked": bool, # Optional. Whether the BYOIP prefix is + locked. + "name": "str", # Optional. Name of the BYOIP prefix. + "prefix": "str", # Optional. The IP prefix in CIDR notation. + "project_id": "str", # Optional. The ID of the project + associated with the BYOIP prefix. + "region": "str", # Optional. Region where the BYOIP prefix + is located. + "status": "str", # Optional. Status of the BYOIP prefix. + "uuid": "str", # Optional. Unique identifier for the BYOIP + prefix. + "validations": [ + { + "name": "str", # Optional. Name of the + validation. + "note": "str", # Optional. Additional notes + or details about the validation. + "status": "str" # Optional. Status of the + validation. + } + ] + } + ], + "links": { + "pages": {} + } + } + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + 401: cast( + Type[HttpResponseError], + lambda response: ClientAuthenticationError(response=response), + ), + 429: HttpResponseError, + 500: HttpResponseError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[JSON] = kwargs.pop("cls", None) + + _request = build_byoip_prefixes_list_request( + per_page=per_page, + page=page, + headers=_headers, + params=_params, + ) + _request.url = self._client.format_url(_request.url) + + _stream = False + pipeline_response: PipelineResponse = ( + await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + await response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore + raise HttpResponseError(response=response) + + response_headers = {} + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.content: + deserialized = response.json() + else: + deserialized = None + + if cls: + return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore + + return cast(JSON, deserialized) # type: ignore + + @distributed_trace_async + async def get(self, byoip_prefix_uuid: str, **kwargs: Any) -> JSON: # pylint: disable=line-too-long - """Retrieve an Existing Reserved IP Action. + """Get a BYOIP Prefix. - To retrieve the status of a reserved IP action, send a GET request to - ``/v2/reserved_ips/$RESERVED_IP/actions/$ACTION_ID``. + To get a BYOIP prefix, send a GET request to ``/v2/byoip_prefixes/$byoip_prefix_uuid``. - :param reserved_ip: A reserved IP address. Required. - :type reserved_ip: str - :param action_id: A unique numeric ID that can be used to identify and reference an action. - Required. - :type action_id: int + A successful response will return the details of the specified BYOIP prefix. + + :param byoip_prefix_uuid: The unique identifier for the BYOIP Prefix. Required. + :type byoip_prefix_uuid: str :return: JSON object :rtype: JSON :raises ~azure.core.exceptions.HttpResponseError: @@ -173754,50 +184301,32 @@ async def get(self, reserved_ip: str, action_id: int, **kwargs: Any) -> JSON: # response body for status code(s): 200 response == { - "action": { - "completed_at": "2020-02-20 00:00:00", # Optional. A time value - given in ISO8601 combined date and time format that represents when the - action was completed. - "id": 0, # Optional. A unique numeric ID that can be used to - identify and reference an action. - "project_id": "str", # Optional. The UUID of the project to which - the reserved IP currently belongs. - "region": { - "available": bool, # This is a boolean value that represents - whether new Droplets can be created in this region. Required. - "features": [ - "str" # This attribute is set to an array which - contains features available in this region. Required. - ], - "name": "str", # The display name of the region. This will - be a full name that is used in the control panel and other interfaces. - Required. - "sizes": [ - "str" # This attribute is set to an array which - contains the identifying slugs for the sizes available in this - region. sizes:read is required to view. Required. - ], - "slug": "str" # A human-readable string that is used as a - unique identifier for each region. Required. - }, - "region_slug": "str", # Optional. A human-readable string that is - used as a unique identifier for each region. - "resource_id": 0, # Optional. A unique identifier for the resource - that the action is associated with. - "resource_type": "str", # Optional. The type of resource that the - action is associated with. - "started_at": "2020-02-20 00:00:00", # Optional. A time value given - in ISO8601 combined date and time format that represents when the action was - initiated. - "status": "in-progress", # Optional. Default value is "in-progress". - The current status of the action. This can be "in-progress", "completed", or - "errored". Known values are: "in-progress", "completed", and "errored". - "type": "str" # Optional. This is the type of action that the object - represents. For example, this could be "transfer" to represent the state of - an image transfer action. + "byoip_prefix": { + "advertised": bool, # Optional. Whether the BYOIP prefix is being + advertised. + "failure_reason": "str", # Optional. Reason for failure, if + applicable. + "locked": bool, # Optional. Whether the BYOIP prefix is locked. + "name": "str", # Optional. Name of the BYOIP prefix. + "prefix": "str", # Optional. The IP prefix in CIDR notation. + "project_id": "str", # Optional. The ID of the project associated + with the BYOIP prefix. + "region": "str", # Optional. Region where the BYOIP prefix is + located. + "status": "str", # Optional. Status of the BYOIP prefix. + "uuid": "str", # Optional. Unique identifier for the BYOIP prefix. + "validations": [ + { + "name": "str", # Optional. Name of the validation. + "note": "str", # Optional. Additional notes or + details about the validation. + "status": "str" # Optional. Status of the + validation. + } + ] } } - # response body for status code(s): 404 + # response body for status code(s): 404, 422 response == { "id": "str", # A short identifier corresponding to the HTTP status code returned. For example, the ID for a response returning a 404 status code would @@ -173827,9 +184356,8 @@ async def get(self, reserved_ip: str, action_id: int, **kwargs: Any) -> JSON: cls: ClsType[JSON] = kwargs.pop("cls", None) - _request = build_reserved_ips_actions_get_request( - reserved_ip=reserved_ip, - action_id=action_id, + _request = build_byoip_prefixes_get_request( + byoip_prefix_uuid=byoip_prefix_uuid, headers=_headers, params=_params, ) @@ -173844,7 +184372,7 @@ async def get(self, reserved_ip: str, action_id: int, **kwargs: Any) -> JSON: response = pipeline_response.http_response - if response.status_code not in [200, 404]: + if response.status_code not in [200, 404, 422]: if _stream: await response.read() # Load the body in memory and close the socket map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore @@ -173883,69 +184411,57 @@ async def get(self, reserved_ip: str, action_id: int, **kwargs: Any) -> JSON: else: deserialized = None + if response.status_code == 422: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.content: + deserialized = response.json() + else: + deserialized = None + if cls: return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore return cast(JSON, deserialized) # type: ignore - -class ReservedIPv6Operations: - """ - .. warning:: - **DO NOT** instantiate this class directly. - - Instead, you should access the following operations through - :class:`~pydo.aio.GeneratedClient`'s - :attr:`reserved_ipv6` attribute. - """ - - def __init__(self, *args, **kwargs) -> None: - input_args = list(args) - self._client = input_args.pop(0) if input_args else kwargs.pop("client") - self._config = input_args.pop(0) if input_args else kwargs.pop("config") - self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer") - self._deserialize = ( - input_args.pop(0) if input_args else kwargs.pop("deserializer") - ) - @distributed_trace_async - async def list(self, *, per_page: int = 20, page: int = 1, **kwargs: Any) -> JSON: + async def delete(self, byoip_prefix_uuid: str, **kwargs: Any) -> Optional[JSON]: # pylint: disable=line-too-long - """List All Reserved IPv6s. + """Delete a BYOIP Prefix. - To list all of the reserved IPv6s available on your account, send a GET request to - ``/v2/reserved_ipv6``. + To delete a BYOIP prefix and remove it from your account, send a DELETE request + to ``/v2/byoip_prefixes/$byoip_prefix_uuid``. - :keyword per_page: Number of items returned per page. Default value is 20. - :paramtype per_page: int - :keyword page: Which 'page' of paginated results to return. Default value is 1. - :paramtype page: int - :return: JSON object - :rtype: JSON + A successful request will receive a 202 status code with no body in response. + This indicates that the request was accepted and the prefix is being deleted. + + :param byoip_prefix_uuid: The unique identifier for the BYOIP Prefix. Required. + :type byoip_prefix_uuid: str + :return: JSON object or None + :rtype: JSON or None :raises ~azure.core.exceptions.HttpResponseError: Example: .. code-block:: python - # response body for status code(s): 200 + # response body for status code(s): 404, 422 response == { - "meta": { - "total": 0 # Optional. Number of objects returned by the request. - }, - "links": { - "pages": {} - }, - "reserved_ipv6s": [ - { - "droplet": {}, - "ip": "str", # Optional. The public IP address of the - reserved IPv6. It also serves as its identifier. - "region_slug": "str", # Optional. The region that the - reserved IPv6 is reserved to. When you query a reserved IPv6,the - region_slug will be returned. - "reserved_at": "2020-02-20 00:00:00" # Optional. - } - ] + "id": "str", # A short identifier corresponding to the HTTP status code + returned. For example, the ID for a response returning a 404 status code would + be "not_found.". Required. + "message": "str", # A message providing additional information about the + error, including details to help resolve it when possible. Required. + "request_id": "str" # Optional. Optionally, some endpoints may include a + request ID that should be provided when reporting bugs or opening support + tickets to help identify the issue. } """ error_map: MutableMapping[int, Type[HttpResponseError]] = { @@ -173964,11 +184480,10 @@ async def list(self, *, per_page: int = 20, page: int = 1, **kwargs: Any) -> JSO _headers = kwargs.pop("headers", {}) or {} _params = kwargs.pop("params", {}) or {} - cls: ClsType[JSON] = kwargs.pop("cls", None) + cls: ClsType[Optional[JSON]] = kwargs.pop("cls", None) - _request = build_reserved_ipv6_list_request( - per_page=per_page, - page=page, + _request = build_byoip_prefixes_delete_request( + byoip_prefix_uuid=byoip_prefix_uuid, headers=_headers, params=_params, ) @@ -173983,46 +184498,81 @@ async def list(self, *, per_page: int = 20, page: int = 1, **kwargs: Any) -> JSO response = pipeline_response.http_response - if response.status_code not in [200]: + if response.status_code not in [202, 404, 422]: if _stream: await response.read() # Load the body in memory and close the socket map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore raise HttpResponseError(response=response) + deserialized = None response_headers = {} - response_headers["ratelimit-limit"] = self._deserialize( - "int", response.headers.get("ratelimit-limit") - ) - response_headers["ratelimit-remaining"] = self._deserialize( - "int", response.headers.get("ratelimit-remaining") - ) - response_headers["ratelimit-reset"] = self._deserialize( - "int", response.headers.get("ratelimit-reset") - ) + if response.status_code == 202: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) - if response.content: - deserialized = response.json() - else: - deserialized = None + if response.status_code == 404: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.content: + deserialized = response.json() + else: + deserialized = None + + if response.status_code == 422: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.content: + deserialized = response.json() + else: + deserialized = None if cls: - return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore + return cls(pipeline_response, deserialized, response_headers) # type: ignore - return cast(JSON, deserialized) # type: ignore + return deserialized # type: ignore @overload - async def create( - self, body: JSON, *, content_type: str = "application/json", **kwargs: Any + async def patch( + self, + byoip_prefix_uuid: str, + body: JSON, + *, + content_type: str = "application/json", + **kwargs: Any ) -> JSON: # pylint: disable=line-too-long - """Create a New Reserved IPv6. - - On creation, a reserved IPv6 must be reserved to a region. + """Update a BYOIP Prefix. + To update a BYOIP prefix, send a PATCH request to ``/v2/byoip_prefixes/$byoip_prefix_uuid``. - * To create a new reserved IPv6 reserved to a region, send a POST request to - ``/v2/reserved_ipv6`` with the ``region_slug`` attribute. + Currently, you can update the advertisement status of the prefix. + The response will include the updated details of the prefix. + :param byoip_prefix_uuid: A unique identifier for a BYOIP prefix. Required. + :type byoip_prefix_uuid: str :param body: Required. :type body: JSON :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. @@ -174037,36 +184587,68 @@ async def create( # JSON input template you can fill out and use as your body input. body = { - "region_slug": "str" # The slug identifier for the region the reserved IPv6 - will be reserved to. Required. + "advertise": bool # Optional. Whether the BYOIP prefix should be advertised. } - # response body for status code(s): 201 + # response body for status code(s): 202 response == { - "reserved_ipv6": { - "ip": "str", # Optional. The public IP address of the reserved IPv6. - It also serves as its identifier. - "region_slug": "str", # Optional. The region that the reserved IPv6 - is reserved to. When you query a reserved IPv6,the region_slug will be - returned. - "reserved_at": "2020-02-20 00:00:00" # Optional. + "byoip_prefix": { + "advertised": bool, # Optional. Whether the BYOIP prefix is being + advertised. + "failure_reason": "str", # Optional. Reason for failure, if + applicable. + "locked": bool, # Optional. Whether the BYOIP prefix is locked. + "name": "str", # Optional. Name of the BYOIP prefix. + "prefix": "str", # Optional. The IP prefix in CIDR notation. + "project_id": "str", # Optional. The ID of the project associated + with the BYOIP prefix. + "region": "str", # Optional. Region where the BYOIP prefix is + located. + "status": "str", # Optional. Status of the BYOIP prefix. + "uuid": "str", # Optional. Unique identifier for the BYOIP prefix. + "validations": [ + { + "name": "str", # Optional. Name of the validation. + "note": "str", # Optional. Additional notes or + details about the validation. + "status": "str" # Optional. Status of the + validation. + } + ] } } + # response body for status code(s): 404, 422 + response == { + "id": "str", # A short identifier corresponding to the HTTP status code + returned. For example, the ID for a response returning a 404 status code would + be "not_found.". Required. + "message": "str", # A message providing additional information about the + error, including details to help resolve it when possible. Required. + "request_id": "str" # Optional. Optionally, some endpoints may include a + request ID that should be provided when reporting bugs or opening support + tickets to help identify the issue. + } """ @overload - async def create( - self, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + async def patch( + self, + byoip_prefix_uuid: str, + body: IO[bytes], + *, + content_type: str = "application/json", + **kwargs: Any ) -> JSON: # pylint: disable=line-too-long - """Create a New Reserved IPv6. - - On creation, a reserved IPv6 must be reserved to a region. + """Update a BYOIP Prefix. + To update a BYOIP prefix, send a PATCH request to ``/v2/byoip_prefixes/$byoip_prefix_uuid``. - * To create a new reserved IPv6 reserved to a region, send a POST request to - ``/v2/reserved_ipv6`` with the ``region_slug`` attribute. + Currently, you can update the advertisement status of the prefix. + The response will include the updated details of the prefix. + :param byoip_prefix_uuid: A unique identifier for a BYOIP prefix. Required. + :type byoip_prefix_uuid: str :param body: Required. :type body: IO[bytes] :keyword content_type: Body Parameter content-type. Content type parameter for binary body. @@ -174079,30 +184661,60 @@ async def create( Example: .. code-block:: python - # response body for status code(s): 201 + # response body for status code(s): 202 response == { - "reserved_ipv6": { - "ip": "str", # Optional. The public IP address of the reserved IPv6. - It also serves as its identifier. - "region_slug": "str", # Optional. The region that the reserved IPv6 - is reserved to. When you query a reserved IPv6,the region_slug will be - returned. - "reserved_at": "2020-02-20 00:00:00" # Optional. + "byoip_prefix": { + "advertised": bool, # Optional. Whether the BYOIP prefix is being + advertised. + "failure_reason": "str", # Optional. Reason for failure, if + applicable. + "locked": bool, # Optional. Whether the BYOIP prefix is locked. + "name": "str", # Optional. Name of the BYOIP prefix. + "prefix": "str", # Optional. The IP prefix in CIDR notation. + "project_id": "str", # Optional. The ID of the project associated + with the BYOIP prefix. + "region": "str", # Optional. Region where the BYOIP prefix is + located. + "status": "str", # Optional. Status of the BYOIP prefix. + "uuid": "str", # Optional. Unique identifier for the BYOIP prefix. + "validations": [ + { + "name": "str", # Optional. Name of the validation. + "note": "str", # Optional. Additional notes or + details about the validation. + "status": "str" # Optional. Status of the + validation. + } + ] } } + # response body for status code(s): 404, 422 + response == { + "id": "str", # A short identifier corresponding to the HTTP status code + returned. For example, the ID for a response returning a 404 status code would + be "not_found.". Required. + "message": "str", # A message providing additional information about the + error, including details to help resolve it when possible. Required. + "request_id": "str" # Optional. Optionally, some endpoints may include a + request ID that should be provided when reporting bugs or opening support + tickets to help identify the issue. + } """ @distributed_trace_async - async def create(self, body: Union[JSON, IO[bytes]], **kwargs: Any) -> JSON: + async def patch( + self, byoip_prefix_uuid: str, body: Union[JSON, IO[bytes]], **kwargs: Any + ) -> JSON: # pylint: disable=line-too-long - """Create a New Reserved IPv6. - - On creation, a reserved IPv6 must be reserved to a region. + """Update a BYOIP Prefix. + To update a BYOIP prefix, send a PATCH request to ``/v2/byoip_prefixes/$byoip_prefix_uuid``. - * To create a new reserved IPv6 reserved to a region, send a POST request to - ``/v2/reserved_ipv6`` with the ``region_slug`` attribute. + Currently, you can update the advertisement status of the prefix. + The response will include the updated details of the prefix. + :param byoip_prefix_uuid: A unique identifier for a BYOIP prefix. Required. + :type byoip_prefix_uuid: str :param body: Is either a JSON type or a IO[bytes] type. Required. :type body: JSON or IO[bytes] :return: JSON object @@ -174114,21 +184726,47 @@ async def create(self, body: Union[JSON, IO[bytes]], **kwargs: Any) -> JSON: # JSON input template you can fill out and use as your body input. body = { - "region_slug": "str" # The slug identifier for the region the reserved IPv6 - will be reserved to. Required. + "advertise": bool # Optional. Whether the BYOIP prefix should be advertised. } - # response body for status code(s): 201 + # response body for status code(s): 202 response == { - "reserved_ipv6": { - "ip": "str", # Optional. The public IP address of the reserved IPv6. - It also serves as its identifier. - "region_slug": "str", # Optional. The region that the reserved IPv6 - is reserved to. When you query a reserved IPv6,the region_slug will be - returned. - "reserved_at": "2020-02-20 00:00:00" # Optional. + "byoip_prefix": { + "advertised": bool, # Optional. Whether the BYOIP prefix is being + advertised. + "failure_reason": "str", # Optional. Reason for failure, if + applicable. + "locked": bool, # Optional. Whether the BYOIP prefix is locked. + "name": "str", # Optional. Name of the BYOIP prefix. + "prefix": "str", # Optional. The IP prefix in CIDR notation. + "project_id": "str", # Optional. The ID of the project associated + with the BYOIP prefix. + "region": "str", # Optional. Region where the BYOIP prefix is + located. + "status": "str", # Optional. Status of the BYOIP prefix. + "uuid": "str", # Optional. Unique identifier for the BYOIP prefix. + "validations": [ + { + "name": "str", # Optional. Name of the validation. + "note": "str", # Optional. Additional notes or + details about the validation. + "status": "str" # Optional. Status of the + validation. + } + ] } } + # response body for status code(s): 404, 422 + response == { + "id": "str", # A short identifier corresponding to the HTTP status code + returned. For example, the ID for a response returning a 404 status code would + be "not_found.". Required. + "message": "str", # A message providing additional information about the + error, including details to help resolve it when possible. Required. + "request_id": "str" # Optional. Optionally, some endpoints may include a + request ID that should be provided when reporting bugs or opening support + tickets to help identify the issue. + } """ error_map: MutableMapping[int, Type[HttpResponseError]] = { 404: ResourceNotFoundError, @@ -174159,7 +184797,8 @@ async def create(self, body: Union[JSON, IO[bytes]], **kwargs: Any) -> JSON: else: _json = body - _request = build_reserved_ipv6_create_request( + _request = build_byoip_prefixes_patch_request( + byoip_prefix_uuid=byoip_prefix_uuid, content_type=content_type, json=_json, content=_content, @@ -174177,27 +184816,60 @@ async def create(self, body: Union[JSON, IO[bytes]], **kwargs: Any) -> JSON: response = pipeline_response.http_response - if response.status_code not in [201]: + if response.status_code not in [202, 404, 422]: if _stream: await response.read() # Load the body in memory and close the socket map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore raise HttpResponseError(response=response) response_headers = {} - response_headers["ratelimit-limit"] = self._deserialize( - "int", response.headers.get("ratelimit-limit") - ) - response_headers["ratelimit-remaining"] = self._deserialize( - "int", response.headers.get("ratelimit-remaining") - ) - response_headers["ratelimit-reset"] = self._deserialize( - "int", response.headers.get("ratelimit-reset") - ) + if response.status_code == 202: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) - if response.content: - deserialized = response.json() - else: - deserialized = None + if response.content: + deserialized = response.json() + else: + deserialized = None + + if response.status_code == 404: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.content: + deserialized = response.json() + else: + deserialized = None + + if response.status_code == 422: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.content: + deserialized = response.json() + else: + deserialized = None if cls: return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore @@ -174205,15 +184877,29 @@ async def create(self, body: Union[JSON, IO[bytes]], **kwargs: Any) -> JSON: return cast(JSON, deserialized) # type: ignore @distributed_trace_async - async def get(self, reserved_ipv6: str, **kwargs: Any) -> JSON: + async def list_resources( + self, + byoip_prefix_uuid: str, + *, + per_page: int = 20, + page: int = 1, + **kwargs: Any + ) -> JSON: # pylint: disable=line-too-long - """Retrieve an Existing Reserved IPv6. + """List BYOIP Prefix Resources. - To show information about a reserved IPv6, send a GET request to - ``/v2/reserved_ipv6/$RESERVED_IPV6``. + To list resources associated with BYOIP prefixes, send a GET request to + ``/v2/byoip_prefixes/{byoip_prefix_uuid}/ips``. - :param reserved_ipv6: A reserved IPv6 address. Required. - :type reserved_ipv6: str + A successful response will return a list of resources associated with the specified BYOIP + prefix. + + :param byoip_prefix_uuid: The unique identifier for the BYOIP Prefix. Required. + :type byoip_prefix_uuid: str + :keyword per_page: Number of items returned per page. Default value is 20. + :paramtype per_page: int + :keyword page: Which 'page' of paginated results to return. Default value is 1. + :paramtype page: int :return: JSON object :rtype: JSON :raises ~azure.core.exceptions.HttpResponseError: @@ -174223,15 +184909,23 @@ async def get(self, reserved_ipv6: str, **kwargs: Any) -> JSON: # response body for status code(s): 200 response == { - "reserved_ipv6": { - "droplet": {}, - "ip": "str", # Optional. The public IP address of the reserved IPv6. - It also serves as its identifier. - "region_slug": "str", # Optional. The region that the reserved IPv6 - is reserved to. When you query a reserved IPv6,the region_slug will be - returned. - "reserved_at": "2020-02-20 00:00:00" # Optional. The date and time - when the reserved IPv6 was reserved. + "meta": { + "total": 0 # Optional. Number of objects returned by the request. + }, + "ips": [ + { + "assigned_at": "2020-02-20 00:00:00", # Optional. Time when + the allocation was assigned. + "byoip": "str", # Optional. The BYOIP prefix UUID. + "id": 0, # Optional. Unique identifier for the allocation. + "region": "str", # Optional. Region where the allocation is + made. + "resource": "str" # Optional. The resource associated with + the allocation. + } + ], + "links": { + "pages": {} } } # response body for status code(s): 404 @@ -174264,8 +184958,10 @@ async def get(self, reserved_ipv6: str, **kwargs: Any) -> JSON: cls: ClsType[JSON] = kwargs.pop("cls", None) - _request = build_reserved_ipv6_get_request( - reserved_ipv6=reserved_ipv6, + _request = build_byoip_prefixes_list_resources_request( + byoip_prefix_uuid=byoip_prefix_uuid, + per_page=per_page, + page=page, headers=_headers, params=_params, ) @@ -174324,27 +185020,96 @@ async def get(self, reserved_ipv6: str, **kwargs: Any) -> JSON: return cast(JSON, deserialized) # type: ignore + +class SecurityOperations: + """ + .. warning:: + **DO NOT** instantiate this class directly. + + Instead, you should access the following operations through + :class:`~pydo.aio.GeneratedClient`'s + :attr:`security` attribute. + """ + + def __init__(self, *args, **kwargs) -> None: + input_args = list(args) + self._client = input_args.pop(0) if input_args else kwargs.pop("client") + self._config = input_args.pop(0) if input_args else kwargs.pop("config") + self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer") + self._deserialize = ( + input_args.pop(0) if input_args else kwargs.pop("deserializer") + ) + @distributed_trace_async - async def delete(self, reserved_ipv6: str, **kwargs: Any) -> Optional[JSON]: + async def list_scans( + self, *, per_page: int = 20, page: int = 1, **kwargs: Any + ) -> JSON: # pylint: disable=line-too-long - """Delete a Reserved IPv6. + """List Scans. - To delete a reserved IP and remove it from your account, send a DELETE request - to ``/v2/reserved_ipv6/$RESERVED_IPV6``. - - A successful request will receive a 204 status code with no body in response. - This indicates that the request was processed successfully. + To list all CSPM scans, send a GET request to ``/v2/security/scans``. - :param reserved_ipv6: A reserved IPv6 address. Required. - :type reserved_ipv6: str - :return: JSON object or None - :rtype: JSON or None + :keyword per_page: Number of items returned per page. Default value is 20. + :paramtype per_page: int + :keyword page: Which 'page' of paginated results to return. Default value is 1. + :paramtype page: int + :return: JSON object + :rtype: JSON :raises ~azure.core.exceptions.HttpResponseError: Example: .. code-block:: python - # response body for status code(s): 404, 422 + # response body for status code(s): 200 + response == { + "meta": { + "total": 0 # Optional. Number of objects returned by the request. + }, + "links": { + "pages": {} + }, + "scans": [ + { + "created_at": "2020-02-20 00:00:00", # Optional. When scan + was created. + "findings": [ + { + "affected_resources_count": 0, # Optional. + The number of affected resources for the finding. + "business_impact": "str", # Optional. A + description of the business impact of the finding. + "details": "str", # Optional. A description + of the risk associated with the finding. + "found_at": "2020-02-20 00:00:00", # + Optional. When the finding was discovered. + "mitigation_steps": [ + { + "description": "str", # + Optional. description. + "step": 0, # Optional. step. + "title": "str" # Optional. + title. + } + ], + "name": "str", # Optional. The name of the + rule that triggered the finding. + "rule_uuid": "str", # Optional. The unique + identifier for the rule that triggered the finding. + "severity": "str", # Optional. The severity + of the finding. Known values are: "CRITICAL", "HIGH", "MEDIUM", + and "LOW". + "technical_details": "str" # Optional. A + description of the technical details related to the finding. + } + ], + "id": "str", # Optional. The unique identifier for the scan. + "status": "str" # Optional. The status of the scan. Known + values are: "IN_PROGRESS", "COMPLETED", "FAILED", "CSPM_NOT_ENABLED", and + "SCAN_NOT_RUN". + } + ] + } + # response body for status code(s): 404 response == { "id": "str", # A short identifier corresponding to the HTTP status code returned. For example, the ID for a response returning a 404 status code would @@ -174372,10 +185137,11 @@ async def delete(self, reserved_ipv6: str, **kwargs: Any) -> Optional[JSON]: _headers = kwargs.pop("headers", {}) or {} _params = kwargs.pop("params", {}) or {} - cls: ClsType[Optional[JSON]] = kwargs.pop("cls", None) + cls: ClsType[JSON] = kwargs.pop("cls", None) - _request = build_reserved_ipv6_delete_request( - reserved_ipv6=reserved_ipv6, + _request = build_security_list_scans_request( + per_page=per_page, + page=page, headers=_headers, params=_params, ) @@ -174390,26 +185156,14 @@ async def delete(self, reserved_ipv6: str, **kwargs: Any) -> Optional[JSON]: response = pipeline_response.http_response - if response.status_code not in [204, 404, 422]: + if response.status_code not in [200, 404]: if _stream: await response.read() # Load the body in memory and close the socket map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore raise HttpResponseError(response=response) - deserialized = None response_headers = {} - if response.status_code == 204: - response_headers["ratelimit-limit"] = self._deserialize( - "int", response.headers.get("ratelimit-limit") - ) - response_headers["ratelimit-remaining"] = self._deserialize( - "int", response.headers.get("ratelimit-remaining") - ) - response_headers["ratelimit-reset"] = self._deserialize( - "int", response.headers.get("ratelimit-reset") - ) - - if response.status_code == 404: + if response.status_code == 200: response_headers["ratelimit-limit"] = self._deserialize( "int", response.headers.get("ratelimit-limit") ) @@ -174425,7 +185179,7 @@ async def delete(self, reserved_ipv6: str, **kwargs: Any) -> Optional[JSON]: else: deserialized = None - if response.status_code == 422: + if response.status_code == 404: response_headers["ratelimit-limit"] = self._deserialize( "int", response.headers.get("ratelimit-limit") ) @@ -174442,64 +185196,17 @@ async def delete(self, reserved_ipv6: str, **kwargs: Any) -> Optional[JSON]: deserialized = None if cls: - return cls(pipeline_response, deserialized, response_headers) # type: ignore - - return deserialized # type: ignore - - -class ReservedIPv6ActionsOperations: - """ - .. warning:: - **DO NOT** instantiate this class directly. - - Instead, you should access the following operations through - :class:`~pydo.aio.GeneratedClient`'s - :attr:`reserved_ipv6_actions` attribute. - """ + return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore - def __init__(self, *args, **kwargs) -> None: - input_args = list(args) - self._client = input_args.pop(0) if input_args else kwargs.pop("client") - self._config = input_args.pop(0) if input_args else kwargs.pop("config") - self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer") - self._deserialize = ( - input_args.pop(0) if input_args else kwargs.pop("deserializer") - ) + return cast(JSON, deserialized) # type: ignore - @overload - async def post( - self, - reserved_ipv6: str, - body: Optional[JSON] = None, - *, - content_type: str = "application/json", - **kwargs: Any - ) -> JSON: + @distributed_trace_async + async def create_scan(self, **kwargs: Any) -> JSON: # pylint: disable=line-too-long - """Initiate a Reserved IPv6 Action. - - To initiate an action on a reserved IPv6 send a POST request to - ``/v2/reserved_ipv6/$RESERVED_IPV6/actions``. In the JSON body to the request, - set the ``type`` attribute to on of the supported action types: - - .. list-table:: - :header-rows: 1 + """Create Scan. - * - Action - - Details - * - ``assign`` - - Assigns a reserved IPv6 to a Droplet - * - ``unassign`` - - Unassign a reserved IPv6 from a Droplet. + To create a CSPM scan, send a POST request to ``/v2/security/scans``. - :param reserved_ipv6: A reserved IPv6 address. Required. - :type reserved_ipv6: str - :param body: The ``type`` attribute set in the request body will specify the action that - will be taken on the reserved IPv6. Default value is None. - :type body: JSON - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str :return: JSON object :rtype: JSON :raises ~azure.core.exceptions.HttpResponseError: @@ -174507,53 +185214,46 @@ async def post( Example: .. code-block:: python - # JSON input template you can fill out and use as your body input. - body = {} - # response body for status code(s): 201 response == { - "action": { - "completed_at": "2020-02-20 00:00:00", # Optional. A time value - given in ISO8601 combined date and time format that represents when the - action was completed. - "id": 0, # Optional. A unique numeric ID that can be used to - identify and reference an action. - "region": { - "available": bool, # This is a boolean value that represents - whether new Droplets can be created in this region. Required. - "features": [ - "str" # This attribute is set to an array which - contains features available in this region. Required. - ], - "name": "str", # The display name of the region. This will - be a full name that is used in the control panel and other interfaces. - Required. - "sizes": [ - "str" # This attribute is set to an array which - contains the identifying slugs for the sizes available in this - region. sizes:read is required to view. Required. - ], - "slug": "str" # A human-readable string that is used as a - unique identifier for each region. Required. - }, - "region_slug": "str", # Optional. A human-readable string that is - used as a unique identifier for each region. - "resource_id": 0, # Optional. A unique identifier for the resource - that the action is associated with. - "resource_type": "str", # Optional. The type of resource that the - action is associated with. - "started_at": "2020-02-20 00:00:00", # Optional. A time value given - in ISO8601 combined date and time format that represents when the action was - initiated. - "status": "in-progress", # Optional. Default value is "in-progress". - The current status of the action. This can be "in-progress", "completed", or - "errored". Known values are: "in-progress", "completed", and "errored". - "type": "str" # Optional. This is the type of action that the object - represents. For example, this could be "transfer" to represent the state of - an image transfer action. + "scan": { + "created_at": "2020-02-20 00:00:00", # Optional. When scan was + created. + "findings": [ + { + "affected_resources_count": 0, # Optional. The + number of affected resources for the finding. + "business_impact": "str", # Optional. A description + of the business impact of the finding. + "details": "str", # Optional. A description of the + risk associated with the finding. + "found_at": "2020-02-20 00:00:00", # Optional. When + the finding was discovered. + "mitigation_steps": [ + { + "description": "str", # Optional. + description. + "step": 0, # Optional. step. + "title": "str" # Optional. title. + } + ], + "name": "str", # Optional. The name of the rule that + triggered the finding. + "rule_uuid": "str", # Optional. The unique + identifier for the rule that triggered the finding. + "severity": "str", # Optional. The severity of the + finding. Known values are: "CRITICAL", "HIGH", "MEDIUM", and "LOW". + "technical_details": "str" # Optional. A description + of the technical details related to the finding. + } + ], + "id": "str", # Optional. The unique identifier for the scan. + "status": "str" # Optional. The status of the scan. Known values + are: "IN_PROGRESS", "COMPLETED", "FAILED", "CSPM_NOT_ENABLED", and + "SCAN_NOT_RUN". } } - # response body for status code(s): 404 + # response body for status code(s): 400, 404 response == { "id": "str", # A short identifier corresponding to the HTTP status code returned. For example, the ID for a response returning a 404 status code would @@ -174565,41 +185265,126 @@ async def post( tickets to help identify the issue. } """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + 401: cast( + Type[HttpResponseError], + lambda response: ClientAuthenticationError(response=response), + ), + 429: HttpResponseError, + 500: HttpResponseError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[JSON] = kwargs.pop("cls", None) + + _request = build_security_create_scan_request( + headers=_headers, + params=_params, + ) + _request.url = self._client.format_url(_request.url) + + _stream = False + pipeline_response: PipelineResponse = ( + await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + ) + + response = pipeline_response.http_response + + if response.status_code not in [201, 400, 404]: + if _stream: + await response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore + raise HttpResponseError(response=response) + + response_headers = {} + if response.status_code == 201: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) - @overload - async def post( + if response.content: + deserialized = response.json() + else: + deserialized = None + + if response.status_code == 400: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.content: + deserialized = response.json() + else: + deserialized = None + + if response.status_code == 404: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.content: + deserialized = response.json() + else: + deserialized = None + + if cls: + return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore + + return cast(JSON, deserialized) # type: ignore + + @distributed_trace_async + async def get_scan( self, - reserved_ipv6: str, - body: Optional[IO[bytes]] = None, + scan_id: str, *, - content_type: str = "application/json", + severity: Optional[str] = None, + per_page: int = 20, + page: int = 1, + type: Optional[str] = None, **kwargs: Any ) -> JSON: # pylint: disable=line-too-long - """Initiate a Reserved IPv6 Action. - - To initiate an action on a reserved IPv6 send a POST request to - ``/v2/reserved_ipv6/$RESERVED_IPV6/actions``. In the JSON body to the request, - set the ``type`` attribute to on of the supported action types: - - .. list-table:: - :header-rows: 1 + """Get Scan. - * - Action - - Details - * - ``assign`` - - Assigns a reserved IPv6 to a Droplet - * - ``unassign`` - - Unassign a reserved IPv6 from a Droplet. + To get a CSPM scan by ID, send a GET request to ``/v2/security/scans/{scan_id}``. - :param reserved_ipv6: A reserved IPv6 address. Required. - :type reserved_ipv6: str - :param body: The ``type`` attribute set in the request body will specify the action that - will be taken on the reserved IPv6. Default value is None. - :type body: IO[bytes] - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str + :param scan_id: The scan UUID. Required. + :type scan_id: str + :keyword severity: The finding severity level to include. Known values are: "LOW", "MEDIUM", + "HIGH", and "CRITICAL". Default value is None. + :paramtype severity: str + :keyword per_page: Number of items returned per page. Default value is 20. + :paramtype per_page: int + :keyword page: Which 'page' of paginated results to return. Default value is 1. + :paramtype page: int + :keyword type: The finding type to include. Default value is None. + :paramtype type: str :return: JSON object :rtype: JSON :raises ~azure.core.exceptions.HttpResponseError: @@ -174607,47 +185392,43 @@ async def post( Example: .. code-block:: python - # response body for status code(s): 201 + # response body for status code(s): 200 response == { - "action": { - "completed_at": "2020-02-20 00:00:00", # Optional. A time value - given in ISO8601 combined date and time format that represents when the - action was completed. - "id": 0, # Optional. A unique numeric ID that can be used to - identify and reference an action. - "region": { - "available": bool, # This is a boolean value that represents - whether new Droplets can be created in this region. Required. - "features": [ - "str" # This attribute is set to an array which - contains features available in this region. Required. - ], - "name": "str", # The display name of the region. This will - be a full name that is used in the control panel and other interfaces. - Required. - "sizes": [ - "str" # This attribute is set to an array which - contains the identifying slugs for the sizes available in this - region. sizes:read is required to view. Required. - ], - "slug": "str" # A human-readable string that is used as a - unique identifier for each region. Required. - }, - "region_slug": "str", # Optional. A human-readable string that is - used as a unique identifier for each region. - "resource_id": 0, # Optional. A unique identifier for the resource - that the action is associated with. - "resource_type": "str", # Optional. The type of resource that the - action is associated with. - "started_at": "2020-02-20 00:00:00", # Optional. A time value given - in ISO8601 combined date and time format that represents when the action was - initiated. - "status": "in-progress", # Optional. Default value is "in-progress". - The current status of the action. This can be "in-progress", "completed", or - "errored". Known values are: "in-progress", "completed", and "errored". - "type": "str" # Optional. This is the type of action that the object - represents. For example, this could be "transfer" to represent the state of - an image transfer action. + "scan": { + "created_at": "2020-02-20 00:00:00", # Optional. When scan was + created. + "findings": [ + { + "affected_resources_count": 0, # Optional. The + number of affected resources for the finding. + "business_impact": "str", # Optional. A description + of the business impact of the finding. + "details": "str", # Optional. A description of the + risk associated with the finding. + "found_at": "2020-02-20 00:00:00", # Optional. When + the finding was discovered. + "mitigation_steps": [ + { + "description": "str", # Optional. + description. + "step": 0, # Optional. step. + "title": "str" # Optional. title. + } + ], + "name": "str", # Optional. The name of the rule that + triggered the finding. + "rule_uuid": "str", # Optional. The unique + identifier for the rule that triggered the finding. + "severity": "str", # Optional. The severity of the + finding. Known values are: "CRITICAL", "HIGH", "MEDIUM", and "LOW". + "technical_details": "str" # Optional. A description + of the technical details related to the finding. + } + ], + "id": "str", # Optional. The unique identifier for the scan. + "status": "str" # Optional. The status of the scan. Known values + are: "IN_PROGRESS", "COMPLETED", "FAILED", "CSPM_NOT_ENABLED", and + "SCAN_NOT_RUN". } } # response body for status code(s): 404 @@ -174662,37 +185443,112 @@ async def post( tickets to help identify the issue. } """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + 401: cast( + Type[HttpResponseError], + lambda response: ClientAuthenticationError(response=response), + ), + 429: HttpResponseError, + 500: HttpResponseError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[JSON] = kwargs.pop("cls", None) + + _request = build_security_get_scan_request( + scan_id=scan_id, + severity=severity, + per_page=per_page, + page=page, + type=type, + headers=_headers, + params=_params, + ) + _request.url = self._client.format_url(_request.url) + + _stream = False + pipeline_response: PipelineResponse = ( + await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + ) + + response = pipeline_response.http_response + + if response.status_code not in [200, 404]: + if _stream: + await response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore + raise HttpResponseError(response=response) + + response_headers = {} + if response.status_code == 200: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.content: + deserialized = response.json() + else: + deserialized = None + + if response.status_code == 404: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.content: + deserialized = response.json() + else: + deserialized = None + + if cls: + return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore + + return cast(JSON, deserialized) # type: ignore @distributed_trace_async - async def post( + async def get_latest_scan( self, - reserved_ipv6: str, - body: Optional[Union[JSON, IO[bytes]]] = None, + *, + per_page: int = 20, + page: int = 1, + severity: Optional[str] = None, + type: Optional[str] = None, **kwargs: Any ) -> JSON: # pylint: disable=line-too-long - """Initiate a Reserved IPv6 Action. - - To initiate an action on a reserved IPv6 send a POST request to - ``/v2/reserved_ipv6/$RESERVED_IPV6/actions``. In the JSON body to the request, - set the ``type`` attribute to on of the supported action types: - - .. list-table:: - :header-rows: 1 + """Get Latest Scan. - * - Action - - Details - * - ``assign`` - - Assigns a reserved IPv6 to a Droplet - * - ``unassign`` - - Unassign a reserved IPv6 from a Droplet. + To get the latest CSPM scan, send a GET request to ``/v2/security/scans/latest``. - :param reserved_ipv6: A reserved IPv6 address. Required. - :type reserved_ipv6: str - :param body: The ``type`` attribute set in the request body will specify the action that - will be taken on the reserved IPv6. Is either a JSON type or a IO[bytes] type. Default value - is None. - :type body: JSON or IO[bytes] + :keyword per_page: Number of items returned per page. Default value is 20. + :paramtype per_page: int + :keyword page: Which 'page' of paginated results to return. Default value is 1. + :paramtype page: int + :keyword severity: The finding severity level to include. Known values are: "LOW", "MEDIUM", + "HIGH", and "CRITICAL". Default value is None. + :paramtype severity: str + :keyword type: The finding type to include. Default value is None. + :paramtype type: str :return: JSON object :rtype: JSON :raises ~azure.core.exceptions.HttpResponseError: @@ -174700,50 +185556,43 @@ async def post( Example: .. code-block:: python - # JSON input template you can fill out and use as your body input. - body = {} - - # response body for status code(s): 201 + # response body for status code(s): 200 response == { - "action": { - "completed_at": "2020-02-20 00:00:00", # Optional. A time value - given in ISO8601 combined date and time format that represents when the - action was completed. - "id": 0, # Optional. A unique numeric ID that can be used to - identify and reference an action. - "region": { - "available": bool, # This is a boolean value that represents - whether new Droplets can be created in this region. Required. - "features": [ - "str" # This attribute is set to an array which - contains features available in this region. Required. - ], - "name": "str", # The display name of the region. This will - be a full name that is used in the control panel and other interfaces. - Required. - "sizes": [ - "str" # This attribute is set to an array which - contains the identifying slugs for the sizes available in this - region. sizes:read is required to view. Required. - ], - "slug": "str" # A human-readable string that is used as a - unique identifier for each region. Required. - }, - "region_slug": "str", # Optional. A human-readable string that is - used as a unique identifier for each region. - "resource_id": 0, # Optional. A unique identifier for the resource - that the action is associated with. - "resource_type": "str", # Optional. The type of resource that the - action is associated with. - "started_at": "2020-02-20 00:00:00", # Optional. A time value given - in ISO8601 combined date and time format that represents when the action was - initiated. - "status": "in-progress", # Optional. Default value is "in-progress". - The current status of the action. This can be "in-progress", "completed", or - "errored". Known values are: "in-progress", "completed", and "errored". - "type": "str" # Optional. This is the type of action that the object - represents. For example, this could be "transfer" to represent the state of - an image transfer action. + "scan": { + "created_at": "2020-02-20 00:00:00", # Optional. When scan was + created. + "findings": [ + { + "affected_resources_count": 0, # Optional. The + number of affected resources for the finding. + "business_impact": "str", # Optional. A description + of the business impact of the finding. + "details": "str", # Optional. A description of the + risk associated with the finding. + "found_at": "2020-02-20 00:00:00", # Optional. When + the finding was discovered. + "mitigation_steps": [ + { + "description": "str", # Optional. + description. + "step": 0, # Optional. step. + "title": "str" # Optional. title. + } + ], + "name": "str", # Optional. The name of the rule that + triggered the finding. + "rule_uuid": "str", # Optional. The unique + identifier for the rule that triggered the finding. + "severity": "str", # Optional. The severity of the + finding. Known values are: "CRITICAL", "HIGH", "MEDIUM", and "LOW". + "technical_details": "str" # Optional. A description + of the technical details related to the finding. + } + ], + "id": "str", # Optional. The unique identifier for the scan. + "status": "str" # Optional. The status of the scan. Known values + are: "IN_PROGRESS", "COMPLETED", "FAILED", "CSPM_NOT_ENABLED", and + "SCAN_NOT_RUN". } } # response body for status code(s): 404 @@ -174771,30 +185620,16 @@ async def post( } error_map.update(kwargs.pop("error_map", {}) or {}) - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _headers = kwargs.pop("headers", {}) or {} _params = kwargs.pop("params", {}) or {} - content_type: Optional[str] = kwargs.pop( - "content_type", _headers.pop("Content-Type", None) - ) cls: ClsType[JSON] = kwargs.pop("cls", None) - content_type = content_type or "application/json" - _json = None - _content = None - if isinstance(body, (IOBase, bytes)): - _content = body - else: - if body is not None: - _json = body - else: - _json = None - - _request = build_reserved_ipv6_actions_post_request( - reserved_ipv6=reserved_ipv6, - content_type=content_type, - json=_json, - content=_content, + _request = build_security_get_latest_scan_request( + per_page=per_page, + page=page, + severity=severity, + type=type, headers=_headers, params=_params, ) @@ -174809,14 +185644,14 @@ async def post( response = pipeline_response.http_response - if response.status_code not in [201, 404]: + if response.status_code not in [200, 404]: if _stream: await response.read() # Load the body in memory and close the socket map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore raise HttpResponseError(response=response) response_headers = {} - if response.status_code == 201: + if response.status_code == 200: response_headers["ratelimit-limit"] = self._deserialize( "int", response.headers.get("ratelimit-limit") ) @@ -174853,45 +185688,23 @@ async def post( return cast(JSON, deserialized) # type: ignore - -class ByoipPrefixesOperations: - """ - .. warning:: - **DO NOT** instantiate this class directly. - - Instead, you should access the following operations through - :class:`~pydo.aio.GeneratedClient`'s - :attr:`byoip_prefixes` attribute. - """ - - def __init__(self, *args, **kwargs) -> None: - input_args = list(args) - self._client = input_args.pop(0) if input_args else kwargs.pop("client") - self._config = input_args.pop(0) if input_args else kwargs.pop("config") - self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer") - self._deserialize = ( - input_args.pop(0) if input_args else kwargs.pop("deserializer") - ) - @overload - async def create( + async def create_scan_rule( self, body: JSON, *, content_type: str = "application/json", **kwargs: Any - ) -> JSON: + ) -> Optional[JSON]: # pylint: disable=line-too-long - """Create a BYOIP Prefix. - - To create a BYOIP prefix, send a POST request to ``/v2/byoip_prefixes``. + """Create Scan Rule. - A successful request will initiate the process of bringing your BYOIP Prefix into your account. - The response will include the details of the created prefix, including its UUID and status. + To mark a scan finding as a false positive, send a POST request to + ``/v2/security/scans/rules`` to create a new scan rule. :param body: Required. :type body: JSON :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str - :return: JSON object - :rtype: JSON + :return: JSON object or None + :rtype: JSON or None :raises ~azure.core.exceptions.HttpResponseError: Example: @@ -174899,19 +185712,11 @@ async def create( # JSON input template you can fill out and use as your body input. body = { - "prefix": "str", # The IP prefix in CIDR notation to bring. Required. - "region": "str", # The region where the prefix will be created. Required. - "signature": "str" # The signature hash for the prefix creation request. - Required. + "resource": "str" # Optional. The URN of a resource to exclude from future + scans. } - # response body for status code(s): 202 - response == { - "region": "str", # Optional. The region where the prefix is created. - "status": "str", # Optional. The status of the BYOIP prefix. - "uuid": "str" # Optional. The unique identifier for the BYOIP prefix. - } - # response body for status code(s): 422 + # response body for status code(s): 400, 404 response == { "id": "str", # A short identifier corresponding to the HTTP status code returned. For example, the ID for a response returning a 404 status code would @@ -174925,36 +185730,28 @@ async def create( """ @overload - async def create( + async def create_scan_rule( self, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any - ) -> JSON: + ) -> Optional[JSON]: # pylint: disable=line-too-long - """Create a BYOIP Prefix. - - To create a BYOIP prefix, send a POST request to ``/v2/byoip_prefixes``. + """Create Scan Rule. - A successful request will initiate the process of bringing your BYOIP Prefix into your account. - The response will include the details of the created prefix, including its UUID and status. + To mark a scan finding as a false positive, send a POST request to + ``/v2/security/scans/rules`` to create a new scan rule. :param body: Required. :type body: IO[bytes] :keyword content_type: Body Parameter content-type. Content type parameter for binary body. Default value is "application/json". :paramtype content_type: str - :return: JSON object - :rtype: JSON + :return: JSON object or None + :rtype: JSON or None :raises ~azure.core.exceptions.HttpResponseError: Example: .. code-block:: python - # response body for status code(s): 202 - response == { - "region": "str", # Optional. The region where the prefix is created. - "status": "str", # Optional. The status of the BYOIP prefix. - "uuid": "str" # Optional. The unique identifier for the BYOIP prefix. - } - # response body for status code(s): 422 + # response body for status code(s): 400, 404 response == { "id": "str", # A short identifier corresponding to the HTTP status code returned. For example, the ID for a response returning a 404 status code would @@ -174968,19 +185765,19 @@ async def create( """ @distributed_trace_async - async def create(self, body: Union[JSON, IO[bytes]], **kwargs: Any) -> JSON: + async def create_scan_rule( + self, body: Union[JSON, IO[bytes]], **kwargs: Any + ) -> Optional[JSON]: # pylint: disable=line-too-long - """Create a BYOIP Prefix. - - To create a BYOIP prefix, send a POST request to ``/v2/byoip_prefixes``. + """Create Scan Rule. - A successful request will initiate the process of bringing your BYOIP Prefix into your account. - The response will include the details of the created prefix, including its UUID and status. + To mark a scan finding as a false positive, send a POST request to + ``/v2/security/scans/rules`` to create a new scan rule. :param body: Is either a JSON type or a IO[bytes] type. Required. :type body: JSON or IO[bytes] - :return: JSON object - :rtype: JSON + :return: JSON object or None + :rtype: JSON or None :raises ~azure.core.exceptions.HttpResponseError: Example: @@ -174988,19 +185785,11 @@ async def create(self, body: Union[JSON, IO[bytes]], **kwargs: Any) -> JSON: # JSON input template you can fill out and use as your body input. body = { - "prefix": "str", # The IP prefix in CIDR notation to bring. Required. - "region": "str", # The region where the prefix will be created. Required. - "signature": "str" # The signature hash for the prefix creation request. - Required. + "resource": "str" # Optional. The URN of a resource to exclude from future + scans. } - # response body for status code(s): 202 - response == { - "region": "str", # Optional. The region where the prefix is created. - "status": "str", # Optional. The status of the BYOIP prefix. - "uuid": "str" # Optional. The unique identifier for the BYOIP prefix. - } - # response body for status code(s): 422 + # response body for status code(s): 400, 404 response == { "id": "str", # A short identifier corresponding to the HTTP status code returned. For example, the ID for a response returning a 404 status code would @@ -175031,7 +185820,7 @@ async def create(self, body: Union[JSON, IO[bytes]], **kwargs: Any) -> JSON: content_type: Optional[str] = kwargs.pop( "content_type", _headers.pop("Content-Type", None) ) - cls: ClsType[JSON] = kwargs.pop("cls", None) + cls: ClsType[Optional[JSON]] = kwargs.pop("cls", None) content_type = content_type or "application/json" _json = None @@ -175041,7 +185830,7 @@ async def create(self, body: Union[JSON, IO[bytes]], **kwargs: Any) -> JSON: else: _json = body - _request = build_byoip_prefixes_create_request( + _request = build_security_create_scan_rule_request( content_type=content_type, json=_json, content=_content, @@ -175059,14 +185848,26 @@ async def create(self, body: Union[JSON, IO[bytes]], **kwargs: Any) -> JSON: response = pipeline_response.http_response - if response.status_code not in [202, 422]: + if response.status_code not in [201, 400, 404]: if _stream: await response.read() # Load the body in memory and close the socket map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore raise HttpResponseError(response=response) + deserialized = None response_headers = {} - if response.status_code == 202: + if response.status_code == 201: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.status_code == 400: response_headers["ratelimit-limit"] = self._deserialize( "int", response.headers.get("ratelimit-limit") ) @@ -175082,7 +185883,7 @@ async def create(self, body: Union[JSON, IO[bytes]], **kwargs: Any) -> JSON: else: deserialized = None - if response.status_code == 422: + if response.status_code == 404: response_headers["ratelimit-limit"] = self._deserialize( "int", response.headers.get("ratelimit-limit") ) @@ -175099,17 +185900,30 @@ async def create(self, body: Union[JSON, IO[bytes]], **kwargs: Any) -> JSON: deserialized = None if cls: - return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore + return cls(pipeline_response, deserialized, response_headers) # type: ignore - return cast(JSON, deserialized) # type: ignore + return deserialized # type: ignore @distributed_trace_async - async def list(self, *, per_page: int = 20, page: int = 1, **kwargs: Any) -> JSON: - """List BYOIP Prefixes. + async def list_scan_finding_affected_resources( + self, + scan_id: str, + finding_uuid: str, + *, + per_page: int = 20, + page: int = 1, + **kwargs: Any + ) -> JSON: + # pylint: disable=line-too-long + """List Finding Affected Resources. - To list all BYOIP prefixes, send a GET request to ``/v2/byoip_prefixes``. - A successful response will return a list of all BYOIP prefixes associated with the account. + To get affected resources for a scan finding, send a GET request to + ``/v2/security/scans/{scan_id}/findings/{finding_uuid}/affected_resources``. + :param scan_id: The scan UUID. Required. + :type scan_id: str + :param finding_uuid: The finding UUID. Required. + :type finding_uuid: str :keyword per_page: Number of items returned per page. Default value is 20. :paramtype per_page: int :keyword page: Which 'page' of paginated results to return. Default value is 1. @@ -175123,41 +185937,26 @@ async def list(self, *, per_page: int = 20, page: int = 1, **kwargs: Any) -> JSO # response body for status code(s): 200 response == { - "meta": { - "total": 0 # Optional. Number of objects returned by the request. - }, - "byoip_prefixes": [ + "affected_resources": [ { - "advertised": bool, # Optional. Whether the BYOIP prefix is - being advertised. - "failure_reason": "str", # Optional. Reason for failure, if - applicable. - "locked": bool, # Optional. Whether the BYOIP prefix is - locked. - "name": "str", # Optional. Name of the BYOIP prefix. - "prefix": "str", # Optional. The IP prefix in CIDR notation. - "project_id": "str", # Optional. The ID of the project - associated with the BYOIP prefix. - "region": "str", # Optional. Region where the BYOIP prefix - is located. - "status": "str", # Optional. Status of the BYOIP prefix. - "uuid": "str", # Optional. Unique identifier for the BYOIP - prefix. - "validations": [ - { - "name": "str", # Optional. Name of the - validation. - "note": "str", # Optional. Additional notes - or details about the validation. - "status": "str" # Optional. Status of the - validation. - } - ] + "name": "str", # Optional. The name of the affected + resource. + "type": "str", # Optional. The type of the affected + resource. + "urn": "str" # Optional. The URN for the affected resource. } - ], - "links": { - "pages": {} - } + ] + } + # response body for status code(s): 404 + response == { + "id": "str", # A short identifier corresponding to the HTTP status code + returned. For example, the ID for a response returning a 404 status code would + be "not_found.". Required. + "message": "str", # A message providing additional information about the + error, including details to help resolve it when possible. Required. + "request_id": "str" # Optional. Optionally, some endpoints may include a + request ID that should be provided when reporting bugs or opening support + tickets to help identify the issue. } """ error_map: MutableMapping[int, Type[HttpResponseError]] = { @@ -175178,7 +185977,9 @@ async def list(self, *, per_page: int = 20, page: int = 1, **kwargs: Any) -> JSO cls: ClsType[JSON] = kwargs.pop("cls", None) - _request = build_byoip_prefixes_list_request( + _request = build_security_list_scan_finding_affected_resources_request( + scan_id=scan_id, + finding_uuid=finding_uuid, per_page=per_page, page=page, headers=_headers, @@ -175195,27 +185996,44 @@ async def list(self, *, per_page: int = 20, page: int = 1, **kwargs: Any) -> JSO response = pipeline_response.http_response - if response.status_code not in [200]: + if response.status_code not in [200, 404]: if _stream: await response.read() # Load the body in memory and close the socket map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore raise HttpResponseError(response=response) response_headers = {} - response_headers["ratelimit-limit"] = self._deserialize( - "int", response.headers.get("ratelimit-limit") - ) - response_headers["ratelimit-remaining"] = self._deserialize( - "int", response.headers.get("ratelimit-remaining") - ) - response_headers["ratelimit-reset"] = self._deserialize( - "int", response.headers.get("ratelimit-reset") - ) + if response.status_code == 200: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) - if response.content: - deserialized = response.json() - else: - deserialized = None + if response.content: + deserialized = response.json() + else: + deserialized = None + + if response.status_code == 404: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.content: + deserialized = response.json() + else: + deserialized = None if cls: return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore @@ -175223,16 +186041,18 @@ async def list(self, *, per_page: int = 20, page: int = 1, **kwargs: Any) -> JSO return cast(JSON, deserialized) # type: ignore @distributed_trace_async - async def get(self, byoip_prefix_uuid: str, **kwargs: Any) -> JSON: + async def list_settings( + self, *, per_page: int = 20, page: int = 1, **kwargs: Any + ) -> JSON: # pylint: disable=line-too-long - """Get a BYOIP Prefix. - - To get a BYOIP prefix, send a GET request to ``/v2/byoip_prefixes/$byoip_prefix_uuid``. + """List Settings. - A successful response will return the details of the specified BYOIP prefix. + To list CSPM scan settings, send a GET request to ``/v2/security/settings``. - :param byoip_prefix_uuid: The unique identifier for the BYOIP Prefix. Required. - :type byoip_prefix_uuid: str + :keyword per_page: Number of items returned per page. Default value is 20. + :paramtype per_page: int + :keyword page: Which 'page' of paginated results to return. Default value is 1. + :paramtype page: int :return: JSON object :rtype: JSON :raises ~azure.core.exceptions.HttpResponseError: @@ -175242,32 +186062,61 @@ async def get(self, byoip_prefix_uuid: str, **kwargs: Any) -> JSON: # response body for status code(s): 200 response == { - "byoip_prefix": { - "advertised": bool, # Optional. Whether the BYOIP prefix is being - advertised. - "failure_reason": "str", # Optional. Reason for failure, if - applicable. - "locked": bool, # Optional. Whether the BYOIP prefix is locked. - "name": "str", # Optional. Name of the BYOIP prefix. - "prefix": "str", # Optional. The IP prefix in CIDR notation. - "project_id": "str", # Optional. The ID of the project associated - with the BYOIP prefix. - "region": "str", # Optional. Region where the BYOIP prefix is - located. - "status": "str", # Optional. Status of the BYOIP prefix. - "uuid": "str", # Optional. Unique identifier for the BYOIP prefix. - "validations": [ - { - "name": "str", # Optional. Name of the validation. - "note": "str", # Optional. Additional notes or - details about the validation. - "status": "str" # Optional. Status of the - validation. - } - ] + "plan_downgrades": { + "str": { + "effective_at": "2020-02-20 00:00:00", # Optional. When the + coverage downgrade takes effect. + "resources": [ + "str" # Optional. URNs of resources that will be + downgraded. + ] + } + }, + "settings": { + "suppressions": { + "links": { + "pages": { + "first": "str", # Optional. + "last": "str", # Optional. + "next": "str", # Optional. + "prev": "str" # Optional. + } + }, + "meta": { + "page": 0, # Optional. + "pages": 0, # Optional. + "total": 0 # Optional. + }, + "resources": [ + { + "id": "str", # Optional. Unique identifier + for the suppressed resource. + "resource_id": "str", # Optional. Unique + identifier for the resource suppressed. + "resource_type": "str", # Optional. Resource + type for the resource suppressed. + "rule_name": "str", # Optional. + Human-readable rule name for the suppressed rule. + "rule_uuid": "str" # Optional. Unique + identifier for the suppressed rule. + } + ] + } + }, + "tier_coverage": { + "str": { + "resources": [ + "str" # Optional. Dictionary of + . + ], + "tags": [ + "str" # Optional. Dictionary of + . + ] + } } } - # response body for status code(s): 404, 422 + # response body for status code(s): 404 response == { "id": "str", # A short identifier corresponding to the HTTP status code returned. For example, the ID for a response returning a 404 status code would @@ -175297,8 +186146,9 @@ async def get(self, byoip_prefix_uuid: str, **kwargs: Any) -> JSON: cls: ClsType[JSON] = kwargs.pop("cls", None) - _request = build_byoip_prefixes_get_request( - byoip_prefix_uuid=byoip_prefix_uuid, + _request = build_security_list_settings_request( + per_page=per_page, + page=page, headers=_headers, params=_params, ) @@ -175313,7 +186163,7 @@ async def get(self, byoip_prefix_uuid: str, **kwargs: Any) -> JSON: response = pipeline_response.http_response - if response.status_code not in [200, 404, 422]: + if response.status_code not in [200, 404]: if _stream: await response.read() # Load the body in memory and close the socket map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore @@ -175352,48 +186202,175 @@ async def get(self, byoip_prefix_uuid: str, **kwargs: Any) -> JSON: else: deserialized = None - if response.status_code == 422: - response_headers["ratelimit-limit"] = self._deserialize( - "int", response.headers.get("ratelimit-limit") - ) - response_headers["ratelimit-remaining"] = self._deserialize( - "int", response.headers.get("ratelimit-remaining") - ) - response_headers["ratelimit-reset"] = self._deserialize( - "int", response.headers.get("ratelimit-reset") - ) - - if response.content: - deserialized = response.json() - else: - deserialized = None - if cls: return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore return cast(JSON, deserialized) # type: ignore - @distributed_trace_async - async def delete(self, byoip_prefix_uuid: str, **kwargs: Any) -> Optional[JSON]: + @overload + async def update_settings_plan( + self, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> JSON: # pylint: disable=line-too-long - """Delete a BYOIP Prefix. + """Update Plan. - To delete a BYOIP prefix and remove it from your account, send a DELETE request - to ``/v2/byoip_prefixes/$byoip_prefix_uuid``. + To update CSPM plan coverage, send a PUT request to ``/v2/security/settings/plan``. - A successful request will receive a 202 status code with no body in response. - This indicates that the request was accepted and the prefix is being deleted. + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: JSON object + :rtype: JSON + :raises ~azure.core.exceptions.HttpResponseError: - :param byoip_prefix_uuid: The unique identifier for the BYOIP Prefix. Required. - :type byoip_prefix_uuid: str - :return: JSON object or None - :rtype: JSON or None + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "tier_coverage": { + "str": { + "resources": [ + "str" # Optional. The URNs of resources to scan for + the tier. + ], + "tags": [ + "str" # Optional. Resource tags to scan for the + tier. + ] + } + } + } + + # response body for status code(s): 200 + response == { + "tier_coverage": { + "str": { + "resources": [ + "str" # Optional. Dictionary of + . + ], + "tags": [ + "str" # Optional. Dictionary of + . + ] + } + } + } + # response body for status code(s): 400, 404 + response == { + "id": "str", # A short identifier corresponding to the HTTP status code + returned. For example, the ID for a response returning a 404 status code would + be "not_found.". Required. + "message": "str", # A message providing additional information about the + error, including details to help resolve it when possible. Required. + "request_id": "str" # Optional. Optionally, some endpoints may include a + request ID that should be provided when reporting bugs or opening support + tickets to help identify the issue. + } + """ + + @overload + async def update_settings_plan( + self, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> JSON: + # pylint: disable=line-too-long + """Update Plan. + + To update CSPM plan coverage, send a PUT request to ``/v2/security/settings/plan``. + + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: JSON object + :rtype: JSON :raises ~azure.core.exceptions.HttpResponseError: Example: .. code-block:: python - # response body for status code(s): 404, 422 + # response body for status code(s): 200 + response == { + "tier_coverage": { + "str": { + "resources": [ + "str" # Optional. Dictionary of + . + ], + "tags": [ + "str" # Optional. Dictionary of + . + ] + } + } + } + # response body for status code(s): 400, 404 + response == { + "id": "str", # A short identifier corresponding to the HTTP status code + returned. For example, the ID for a response returning a 404 status code would + be "not_found.". Required. + "message": "str", # A message providing additional information about the + error, including details to help resolve it when possible. Required. + "request_id": "str" # Optional. Optionally, some endpoints may include a + request ID that should be provided when reporting bugs or opening support + tickets to help identify the issue. + } + """ + + @distributed_trace_async + async def update_settings_plan( + self, body: Union[JSON, IO[bytes]], **kwargs: Any + ) -> JSON: + # pylint: disable=line-too-long + """Update Plan. + + To update CSPM plan coverage, send a PUT request to ``/v2/security/settings/plan``. + + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :return: JSON object + :rtype: JSON + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "tier_coverage": { + "str": { + "resources": [ + "str" # Optional. The URNs of resources to scan for + the tier. + ], + "tags": [ + "str" # Optional. Resource tags to scan for the + tier. + ] + } + } + } + + # response body for status code(s): 200 + response == { + "tier_coverage": { + "str": { + "resources": [ + "str" # Optional. Dictionary of + . + ], + "tags": [ + "str" # Optional. Dictionary of + . + ] + } + } + } + # response body for status code(s): 400, 404 response == { "id": "str", # A short identifier corresponding to the HTTP status code returned. For example, the ID for a response returning a 404 status code would @@ -175418,13 +186395,26 @@ async def delete(self, byoip_prefix_uuid: str, **kwargs: Any) -> Optional[JSON]: } error_map.update(kwargs.pop("error_map", {}) or {}) - _headers = kwargs.pop("headers", {}) or {} + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = kwargs.pop("params", {}) or {} - cls: ClsType[Optional[JSON]] = kwargs.pop("cls", None) + content_type: Optional[str] = kwargs.pop( + "content_type", _headers.pop("Content-Type", None) + ) + cls: ClsType[JSON] = kwargs.pop("cls", None) - _request = build_byoip_prefixes_delete_request( - byoip_prefix_uuid=byoip_prefix_uuid, + content_type = content_type or "application/json" + _json = None + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _json = body + + _request = build_security_update_settings_plan_request( + content_type=content_type, + json=_json, + content=_content, headers=_headers, params=_params, ) @@ -175439,15 +186429,14 @@ async def delete(self, byoip_prefix_uuid: str, **kwargs: Any) -> Optional[JSON]: response = pipeline_response.http_response - if response.status_code not in [202, 404, 422]: + if response.status_code not in [200, 400, 404]: if _stream: await response.read() # Load the body in memory and close the socket map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore raise HttpResponseError(response=response) - deserialized = None response_headers = {} - if response.status_code == 202: + if response.status_code == 200: response_headers["ratelimit-limit"] = self._deserialize( "int", response.headers.get("ratelimit-limit") ) @@ -175458,7 +186447,12 @@ async def delete(self, byoip_prefix_uuid: str, **kwargs: Any) -> Optional[JSON]: "int", response.headers.get("ratelimit-reset") ) - if response.status_code == 404: + if response.content: + deserialized = response.json() + else: + deserialized = None + + if response.status_code == 400: response_headers["ratelimit-limit"] = self._deserialize( "int", response.headers.get("ratelimit-limit") ) @@ -175474,7 +186468,7 @@ async def delete(self, byoip_prefix_uuid: str, **kwargs: Any) -> Optional[JSON]: else: deserialized = None - if response.status_code == 422: + if response.status_code == 404: response_headers["ratelimit-limit"] = self._deserialize( "int", response.headers.get("ratelimit-limit") ) @@ -175491,29 +186485,19 @@ async def delete(self, byoip_prefix_uuid: str, **kwargs: Any) -> Optional[JSON]: deserialized = None if cls: - return cls(pipeline_response, deserialized, response_headers) # type: ignore + return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore - return deserialized # type: ignore + return cast(JSON, deserialized) # type: ignore @overload - async def patch( - self, - byoip_prefix_uuid: str, - body: JSON, - *, - content_type: str = "application/json", - **kwargs: Any + async def create_suppression( + self, body: JSON, *, content_type: str = "application/json", **kwargs: Any ) -> JSON: # pylint: disable=line-too-long - """Update a BYOIP Prefix. - - To update a BYOIP prefix, send a PATCH request to ``/v2/byoip_prefixes/$byoip_prefix_uuid``. + """Create Suppression. - Currently, you can update the advertisement status of the prefix. - The response will include the updated details of the prefix. + To suppress scan findings, send a POST request to ``/v2/security/settings/suppressions``. - :param byoip_prefix_uuid: A unique identifier for a BYOIP prefix. Required. - :type byoip_prefix_uuid: str :param body: Required. :type body: JSON :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. @@ -175528,37 +186512,44 @@ async def patch( # JSON input template you can fill out and use as your body input. body = { - "advertise": bool # Optional. Whether the BYOIP prefix should be advertised. + "resources": [ + "str" # Optional. The URNs of resources to suppress for the rule. + ], + "rule_uuid": "str" # Optional. The rule UUID to suppress for the listed + resources. } - # response body for status code(s): 202 + # response body for status code(s): 201 response == { - "byoip_prefix": { - "advertised": bool, # Optional. Whether the BYOIP prefix is being - advertised. - "failure_reason": "str", # Optional. Reason for failure, if - applicable. - "locked": bool, # Optional. Whether the BYOIP prefix is locked. - "name": "str", # Optional. Name of the BYOIP prefix. - "prefix": "str", # Optional. The IP prefix in CIDR notation. - "project_id": "str", # Optional. The ID of the project associated - with the BYOIP prefix. - "region": "str", # Optional. Region where the BYOIP prefix is - located. - "status": "str", # Optional. Status of the BYOIP prefix. - "uuid": "str", # Optional. Unique identifier for the BYOIP prefix. - "validations": [ - { - "name": "str", # Optional. Name of the validation. - "note": "str", # Optional. Additional notes or - details about the validation. - "status": "str" # Optional. Status of the - validation. - } - ] - } + "links": { + "pages": { + "first": "str", # Optional. + "last": "str", # Optional. + "next": "str", # Optional. + "prev": "str" # Optional. + } + }, + "meta": { + "page": 0, # Optional. + "pages": 0, # Optional. + "total": 0 # Optional. + }, + "resources": [ + { + "id": "str", # Optional. Unique identifier for the + suppressed resource. + "resource_id": "str", # Optional. Unique identifier for the + resource suppressed. + "resource_type": "str", # Optional. Resource type for the + resource suppressed. + "rule_name": "str", # Optional. Human-readable rule name for + the suppressed rule. + "rule_uuid": "str" # Optional. Unique identifier for the + suppressed rule. + } + ] } - # response body for status code(s): 404, 422 + # response body for status code(s): 400, 404 response == { "id": "str", # A short identifier corresponding to the HTTP status code returned. For example, the ID for a response returning a 404 status code would @@ -175572,24 +186563,14 @@ async def patch( """ @overload - async def patch( - self, - byoip_prefix_uuid: str, - body: IO[bytes], - *, - content_type: str = "application/json", - **kwargs: Any + async def create_suppression( + self, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any ) -> JSON: # pylint: disable=line-too-long - """Update a BYOIP Prefix. - - To update a BYOIP prefix, send a PATCH request to ``/v2/byoip_prefixes/$byoip_prefix_uuid``. + """Create Suppression. - Currently, you can update the advertisement status of the prefix. - The response will include the updated details of the prefix. + To suppress scan findings, send a POST request to ``/v2/security/settings/suppressions``. - :param byoip_prefix_uuid: A unique identifier for a BYOIP prefix. Required. - :type byoip_prefix_uuid: str :param body: Required. :type body: IO[bytes] :keyword content_type: Body Parameter content-type. Content type parameter for binary body. @@ -175602,34 +186583,37 @@ async def patch( Example: .. code-block:: python - # response body for status code(s): 202 + # response body for status code(s): 201 response == { - "byoip_prefix": { - "advertised": bool, # Optional. Whether the BYOIP prefix is being - advertised. - "failure_reason": "str", # Optional. Reason for failure, if - applicable. - "locked": bool, # Optional. Whether the BYOIP prefix is locked. - "name": "str", # Optional. Name of the BYOIP prefix. - "prefix": "str", # Optional. The IP prefix in CIDR notation. - "project_id": "str", # Optional. The ID of the project associated - with the BYOIP prefix. - "region": "str", # Optional. Region where the BYOIP prefix is - located. - "status": "str", # Optional. Status of the BYOIP prefix. - "uuid": "str", # Optional. Unique identifier for the BYOIP prefix. - "validations": [ - { - "name": "str", # Optional. Name of the validation. - "note": "str", # Optional. Additional notes or - details about the validation. - "status": "str" # Optional. Status of the - validation. - } - ] - } + "links": { + "pages": { + "first": "str", # Optional. + "last": "str", # Optional. + "next": "str", # Optional. + "prev": "str" # Optional. + } + }, + "meta": { + "page": 0, # Optional. + "pages": 0, # Optional. + "total": 0 # Optional. + }, + "resources": [ + { + "id": "str", # Optional. Unique identifier for the + suppressed resource. + "resource_id": "str", # Optional. Unique identifier for the + resource suppressed. + "resource_type": "str", # Optional. Resource type for the + resource suppressed. + "rule_name": "str", # Optional. Human-readable rule name for + the suppressed rule. + "rule_uuid": "str" # Optional. Unique identifier for the + suppressed rule. + } + ] } - # response body for status code(s): 404, 422 + # response body for status code(s): 400, 404 response == { "id": "str", # A short identifier corresponding to the HTTP status code returned. For example, the ID for a response returning a 404 status code would @@ -175643,19 +186627,14 @@ async def patch( """ @distributed_trace_async - async def patch( - self, byoip_prefix_uuid: str, body: Union[JSON, IO[bytes]], **kwargs: Any + async def create_suppression( + self, body: Union[JSON, IO[bytes]], **kwargs: Any ) -> JSON: # pylint: disable=line-too-long - """Update a BYOIP Prefix. - - To update a BYOIP prefix, send a PATCH request to ``/v2/byoip_prefixes/$byoip_prefix_uuid``. + """Create Suppression. - Currently, you can update the advertisement status of the prefix. - The response will include the updated details of the prefix. + To suppress scan findings, send a POST request to ``/v2/security/settings/suppressions``. - :param byoip_prefix_uuid: A unique identifier for a BYOIP prefix. Required. - :type byoip_prefix_uuid: str :param body: Is either a JSON type or a IO[bytes] type. Required. :type body: JSON or IO[bytes] :return: JSON object @@ -175667,37 +186646,44 @@ async def patch( # JSON input template you can fill out and use as your body input. body = { - "advertise": bool # Optional. Whether the BYOIP prefix should be advertised. + "resources": [ + "str" # Optional. The URNs of resources to suppress for the rule. + ], + "rule_uuid": "str" # Optional. The rule UUID to suppress for the listed + resources. } - # response body for status code(s): 202 + # response body for status code(s): 201 response == { - "byoip_prefix": { - "advertised": bool, # Optional. Whether the BYOIP prefix is being - advertised. - "failure_reason": "str", # Optional. Reason for failure, if - applicable. - "locked": bool, # Optional. Whether the BYOIP prefix is locked. - "name": "str", # Optional. Name of the BYOIP prefix. - "prefix": "str", # Optional. The IP prefix in CIDR notation. - "project_id": "str", # Optional. The ID of the project associated - with the BYOIP prefix. - "region": "str", # Optional. Region where the BYOIP prefix is - located. - "status": "str", # Optional. Status of the BYOIP prefix. - "uuid": "str", # Optional. Unique identifier for the BYOIP prefix. - "validations": [ - { - "name": "str", # Optional. Name of the validation. - "note": "str", # Optional. Additional notes or - details about the validation. - "status": "str" # Optional. Status of the - validation. - } - ] - } + "links": { + "pages": { + "first": "str", # Optional. + "last": "str", # Optional. + "next": "str", # Optional. + "prev": "str" # Optional. + } + }, + "meta": { + "page": 0, # Optional. + "pages": 0, # Optional. + "total": 0 # Optional. + }, + "resources": [ + { + "id": "str", # Optional. Unique identifier for the + suppressed resource. + "resource_id": "str", # Optional. Unique identifier for the + resource suppressed. + "resource_type": "str", # Optional. Resource type for the + resource suppressed. + "rule_name": "str", # Optional. Human-readable rule name for + the suppressed rule. + "rule_uuid": "str" # Optional. Unique identifier for the + suppressed rule. + } + ] } - # response body for status code(s): 404, 422 + # response body for status code(s): 400, 404 response == { "id": "str", # A short identifier corresponding to the HTTP status code returned. For example, the ID for a response returning a 404 status code would @@ -175738,8 +186724,7 @@ async def patch( else: _json = body - _request = build_byoip_prefixes_patch_request( - byoip_prefix_uuid=byoip_prefix_uuid, + _request = build_security_create_suppression_request( content_type=content_type, json=_json, content=_content, @@ -175757,14 +186742,14 @@ async def patch( response = pipeline_response.http_response - if response.status_code not in [202, 404, 422]: + if response.status_code not in [201, 400, 404]: if _stream: await response.read() # Load the body in memory and close the socket map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore raise HttpResponseError(response=response) response_headers = {} - if response.status_code == 202: + if response.status_code == 201: response_headers["ratelimit-limit"] = self._deserialize( "int", response.headers.get("ratelimit-limit") ) @@ -175780,7 +186765,7 @@ async def patch( else: deserialized = None - if response.status_code == 404: + if response.status_code == 400: response_headers["ratelimit-limit"] = self._deserialize( "int", response.headers.get("ratelimit-limit") ) @@ -175796,7 +186781,7 @@ async def patch( else: deserialized = None - if response.status_code == 422: + if response.status_code == 404: response_headers["ratelimit-limit"] = self._deserialize( "int", response.headers.get("ratelimit-limit") ) @@ -175818,57 +186803,24 @@ async def patch( return cast(JSON, deserialized) # type: ignore @distributed_trace_async - async def list_resources( - self, - byoip_prefix_uuid: str, - *, - per_page: int = 20, - page: int = 1, - **kwargs: Any - ) -> JSON: + async def delete_suppression( + self, suppression_uuid: str, **kwargs: Any + ) -> Optional[JSON]: # pylint: disable=line-too-long - """List BYOIP Prefix Resources. + """Delete Suppression. - To list resources associated with BYOIP prefixes, send a GET request to - ``/v2/byoip_prefixes/{byoip_prefix_uuid}/ips``. - - A successful response will return a list of resources associated with the specified BYOIP - prefix. + To remove a suppression, send a DELETE request to + ``/v2/security/settings/suppressions/{suppression_uuid}``. - :param byoip_prefix_uuid: The unique identifier for the BYOIP Prefix. Required. - :type byoip_prefix_uuid: str - :keyword per_page: Number of items returned per page. Default value is 20. - :paramtype per_page: int - :keyword page: Which 'page' of paginated results to return. Default value is 1. - :paramtype page: int - :return: JSON object - :rtype: JSON + :param suppression_uuid: The suppression UUID to remove. Required. + :type suppression_uuid: str + :return: JSON object or None + :rtype: JSON or None :raises ~azure.core.exceptions.HttpResponseError: Example: .. code-block:: python - # response body for status code(s): 200 - response == { - "meta": { - "total": 0 # Optional. Number of objects returned by the request. - }, - "ips": [ - { - "assigned_at": "2020-02-20 00:00:00", # Optional. Time when - the allocation was assigned. - "byoip": "str", # Optional. The BYOIP prefix UUID. - "id": 0, # Optional. Unique identifier for the allocation. - "region": "str", # Optional. Region where the allocation is - made. - "resource": "str" # Optional. The resource associated with - the allocation. - } - ], - "links": { - "pages": {} - } - } # response body for status code(s): 404 response == { "id": "str", # A short identifier corresponding to the HTTP status code @@ -175897,12 +186849,10 @@ async def list_resources( _headers = kwargs.pop("headers", {}) or {} _params = kwargs.pop("params", {}) or {} - cls: ClsType[JSON] = kwargs.pop("cls", None) + cls: ClsType[Optional[JSON]] = kwargs.pop("cls", None) - _request = build_byoip_prefixes_list_resources_request( - byoip_prefix_uuid=byoip_prefix_uuid, - per_page=per_page, - page=page, + _request = build_security_delete_suppression_request( + suppression_uuid=suppression_uuid, headers=_headers, params=_params, ) @@ -175917,14 +186867,15 @@ async def list_resources( response = pipeline_response.http_response - if response.status_code not in [200, 404]: + if response.status_code not in [204, 404]: if _stream: await response.read() # Load the body in memory and close the socket map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore raise HttpResponseError(response=response) + deserialized = None response_headers = {} - if response.status_code == 200: + if response.status_code == 204: response_headers["ratelimit-limit"] = self._deserialize( "int", response.headers.get("ratelimit-limit") ) @@ -175935,11 +186886,6 @@ async def list_resources( "int", response.headers.get("ratelimit-reset") ) - if response.content: - deserialized = response.json() - else: - deserialized = None - if response.status_code == 404: response_headers["ratelimit-limit"] = self._deserialize( "int", response.headers.get("ratelimit-limit") @@ -175957,9 +186903,9 @@ async def list_resources( deserialized = None if cls: - return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore + return cls(pipeline_response, deserialized, response_headers) # type: ignore - return cast(JSON, deserialized) # type: ignore + return deserialized # type: ignore class SizesOperations: diff --git a/src/pydo/operations/__init__.py b/src/pydo/operations/__init__.py index 4a74b7c..0c8c048 100644 --- a/src/pydo/operations/__init__.py +++ b/src/pydo/operations/__init__.py @@ -17,12 +17,14 @@ from ._operations import InvoicesOperations from ._operations import BillingInsightsOperations from ._operations import DatabasesOperations +from ._operations import DedicatedInferencesOperations from ._operations import DomainsOperations from ._operations import DropletsOperations from ._operations import DropletActionsOperations from ._operations import AutoscalepoolsOperations from ._operations import FirewallsOperations from ._operations import FunctionsOperations +from ._operations import FunctionsAccessKeyOperations from ._operations import ImagesOperations from ._operations import ImageActionsOperations from ._operations import KubernetesOperations @@ -39,6 +41,7 @@ from ._operations import ReservedIPv6Operations from ._operations import ReservedIPv6ActionsOperations from ._operations import ByoipPrefixesOperations +from ._operations import SecurityOperations from ._operations import SizesOperations from ._operations import SnapshotsOperations from ._operations import SpacesKeyOperations @@ -70,12 +73,14 @@ "InvoicesOperations", "BillingInsightsOperations", "DatabasesOperations", + "DedicatedInferencesOperations", "DomainsOperations", "DropletsOperations", "DropletActionsOperations", "AutoscalepoolsOperations", "FirewallsOperations", "FunctionsOperations", + "FunctionsAccessKeyOperations", "ImagesOperations", "ImageActionsOperations", "KubernetesOperations", @@ -92,6 +97,7 @@ "ReservedIPv6Operations", "ReservedIPv6ActionsOperations", "ByoipPrefixesOperations", + "SecurityOperations", "SizesOperations", "SnapshotsOperations", "SpacesKeyOperations", diff --git a/src/pydo/operations/_operations.py b/src/pydo/operations/_operations.py index f06ed50..4795b9e 100644 --- a/src/pydo/operations/_operations.py +++ b/src/pydo/operations/_operations.py @@ -1124,6 +1124,129 @@ def build_apps_get_job_invocation_logs_request( # pylint: disable=name-too-long ) +def build_apps_list_events_request( + app_id: str, + *, + page: int = 1, + per_page: int = 20, + event_types: Optional[List[str]] = None, + **kwargs: Any, +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/v2/apps/{app_id}/events" + path_format_arguments = { + "app_id": _SERIALIZER.url("app_id", app_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + if page is not None: + _params["page"] = _SERIALIZER.query("page", page, "int", minimum=1) + if per_page is not None: + _params["per_page"] = _SERIALIZER.query( + "per_page", per_page, "int", maximum=200, minimum=1 + ) + if event_types is not None: + _params["event_types"] = _SERIALIZER.query("event_types", event_types, "[str]") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest( + method="GET", url=_url, params=_params, headers=_headers, **kwargs + ) + + +def build_apps_get_event_request( + app_id: str, event_id: str, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/v2/apps/{app_id}/events/{event_id}" + path_format_arguments = { + "app_id": _SERIALIZER.url("app_id", app_id, "str"), + "event_id": _SERIALIZER.url("event_id", event_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, headers=_headers, **kwargs) + + +def build_apps_cancel_event_request( + app_id: str, event_id: str, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/v2/apps/{app_id}/events/{event_id}/cancel" + path_format_arguments = { + "app_id": _SERIALIZER.url("app_id", app_id, "str"), + "event_id": _SERIALIZER.url("event_id", event_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="POST", url=_url, headers=_headers, **kwargs) + + +def build_apps_get_event_logs_request( + app_id: str, + event_id: str, + *, + follow: Optional[bool] = None, + type: str = "UNSPECIFIED", + pod_connection_timeout: Optional[str] = None, + **kwargs: Any, +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/v2/apps/{app_id}/events/{event_id}/logs" + path_format_arguments = { + "app_id": _SERIALIZER.url("app_id", app_id, "str"), + "event_id": _SERIALIZER.url("event_id", event_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + if follow is not None: + _params["follow"] = _SERIALIZER.query("follow", follow, "bool") + _params["type"] = _SERIALIZER.query("type", type, "str") + if pod_connection_timeout is not None: + _params["pod_connection_timeout"] = _SERIALIZER.query( + "pod_connection_timeout", pod_connection_timeout, "str" + ) + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest( + method="GET", url=_url, params=_params, headers=_headers, **kwargs + ) + + def build_apps_list_instance_sizes_request(**kwargs: Any) -> HttpRequest: _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) @@ -3581,6 +3704,342 @@ def build_databases_delete_opensearch_index_request( # pylint: disable=name-too return HttpRequest(method="DELETE", url=_url, headers=_headers, **kwargs) +def build_dedicated_inferences_get_request( + dedicated_inference_id: str, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/v2/dedicated-inferences/{dedicated_inference_id}" + path_format_arguments = { + "dedicated_inference_id": _SERIALIZER.url( + "dedicated_inference_id", dedicated_inference_id, "str" + ), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, headers=_headers, **kwargs) + + +def build_dedicated_inferences_patch_request( + dedicated_inference_id: str, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + + content_type: Optional[str] = kwargs.pop( + "content_type", _headers.pop("Content-Type", None) + ) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/v2/dedicated-inferences/{dedicated_inference_id}" + path_format_arguments = { + "dedicated_inference_id": _SERIALIZER.url( + "dedicated_inference_id", dedicated_inference_id, "str" + ), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct headers + if content_type is not None: + _headers["Content-Type"] = _SERIALIZER.header( + "content_type", content_type, "str" + ) + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="PATCH", url=_url, headers=_headers, **kwargs) + + +def build_dedicated_inferences_delete_request( # pylint: disable=name-too-long + dedicated_inference_id: str, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/v2/dedicated-inferences/{dedicated_inference_id}" + path_format_arguments = { + "dedicated_inference_id": _SERIALIZER.url( + "dedicated_inference_id", dedicated_inference_id, "str" + ), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="DELETE", url=_url, headers=_headers, **kwargs) + + +def build_dedicated_inferences_list_request( + *, per_page: int = 20, page: int = 1, region: Optional[str] = None, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/v2/dedicated-inferences" + + # Construct parameters + if per_page is not None: + _params["per_page"] = _SERIALIZER.query( + "per_page", per_page, "int", maximum=200, minimum=1 + ) + if page is not None: + _params["page"] = _SERIALIZER.query("page", page, "int", minimum=1) + if region is not None: + _params["region"] = _SERIALIZER.query("region", region, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest( + method="GET", url=_url, params=_params, headers=_headers, **kwargs + ) + + +def build_dedicated_inferences_create_request( + **kwargs: Any, +) -> HttpRequest: # pylint: disable=name-too-long + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + + content_type: Optional[str] = kwargs.pop( + "content_type", _headers.pop("Content-Type", None) + ) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/v2/dedicated-inferences" + + # Construct headers + if content_type is not None: + _headers["Content-Type"] = _SERIALIZER.header( + "content_type", content_type, "str" + ) + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="POST", url=_url, headers=_headers, **kwargs) + + +def build_dedicated_inferences_list_accelerators_request( # pylint: disable=name-too-long + dedicated_inference_id: str, + *, + per_page: int = 20, + page: int = 1, + slug: Optional[str] = None, + **kwargs: Any, +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/v2/dedicated-inferences/{dedicated_inference_id}/accelerators" + path_format_arguments = { + "dedicated_inference_id": _SERIALIZER.url( + "dedicated_inference_id", dedicated_inference_id, "str" + ), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + if per_page is not None: + _params["per_page"] = _SERIALIZER.query( + "per_page", per_page, "int", maximum=200, minimum=1 + ) + if page is not None: + _params["page"] = _SERIALIZER.query("page", page, "int", minimum=1) + if slug is not None: + _params["slug"] = _SERIALIZER.query("slug", slug, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest( + method="GET", url=_url, params=_params, headers=_headers, **kwargs + ) + + +def build_dedicated_inferences_get_accelerator_request( # pylint: disable=name-too-long + dedicated_inference_id: str, accelerator_id: str, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/v2/dedicated-inferences/{dedicated_inference_id}/accelerators/{accelerator_id}" + path_format_arguments = { + "dedicated_inference_id": _SERIALIZER.url( + "dedicated_inference_id", dedicated_inference_id, "str" + ), + "accelerator_id": _SERIALIZER.url("accelerator_id", accelerator_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, headers=_headers, **kwargs) + + +def build_dedicated_inferences_get_ca_request( # pylint: disable=name-too-long + dedicated_inference_id: str, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/v2/dedicated-inferences/{dedicated_inference_id}/ca" + path_format_arguments = { + "dedicated_inference_id": _SERIALIZER.url( + "dedicated_inference_id", dedicated_inference_id, "str" + ), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, headers=_headers, **kwargs) + + +def build_dedicated_inferences_list_tokens_request( # pylint: disable=name-too-long + dedicated_inference_id: str, *, per_page: int = 20, page: int = 1, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/v2/dedicated-inferences/{dedicated_inference_id}/tokens" + path_format_arguments = { + "dedicated_inference_id": _SERIALIZER.url( + "dedicated_inference_id", dedicated_inference_id, "str" + ), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + if per_page is not None: + _params["per_page"] = _SERIALIZER.query( + "per_page", per_page, "int", maximum=200, minimum=1 + ) + if page is not None: + _params["page"] = _SERIALIZER.query("page", page, "int", minimum=1) + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest( + method="GET", url=_url, params=_params, headers=_headers, **kwargs + ) + + +def build_dedicated_inferences_create_tokens_request( # pylint: disable=name-too-long + dedicated_inference_id: str, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + + content_type: Optional[str] = kwargs.pop( + "content_type", _headers.pop("Content-Type", None) + ) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/v2/dedicated-inferences/{dedicated_inference_id}/tokens" + path_format_arguments = { + "dedicated_inference_id": _SERIALIZER.url( + "dedicated_inference_id", dedicated_inference_id, "str" + ), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct headers + if content_type is not None: + _headers["Content-Type"] = _SERIALIZER.header( + "content_type", content_type, "str" + ) + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="POST", url=_url, headers=_headers, **kwargs) + + +def build_dedicated_inferences_delete_tokens_request( # pylint: disable=name-too-long + dedicated_inference_id: str, token_id: str, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/v2/dedicated-inferences/{dedicated_inference_id}/tokens/{token_id}" + path_format_arguments = { + "dedicated_inference_id": _SERIALIZER.url( + "dedicated_inference_id", dedicated_inference_id, "str" + ), + "token_id": _SERIALIZER.url("token_id", token_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="DELETE", url=_url, headers=_headers, **kwargs) + + +def build_dedicated_inferences_list_sizes_request( + **kwargs: Any, +) -> HttpRequest: # pylint: disable=name-too-long + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/v2/dedicated-inferences/sizes" + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, headers=_headers, **kwargs) + + +def build_dedicated_inferences_get_gpu_model_config_request( # pylint: disable=name-too-long + **kwargs: Any, +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/v2/dedicated-inferences/gpu-model-config" + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, headers=_headers, **kwargs) + + def build_domains_list_request( *, per_page: int = 20, page: int = 1, **kwargs: Any ) -> HttpRequest: @@ -5108,6 +5567,106 @@ def build_functions_delete_trigger_request( return HttpRequest(method="DELETE", url=_url, headers=_headers, **kwargs) +def build_functions_access_key_list_request( + namespace_id: str, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/v2/functions/namespaces/{namespace_id}/keys" + path_format_arguments = { + "namespace_id": _SERIALIZER.url("namespace_id", namespace_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, headers=_headers, **kwargs) + + +def build_functions_access_key_create_request( # pylint: disable=name-too-long + namespace_id: str, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + + content_type: Optional[str] = kwargs.pop( + "content_type", _headers.pop("Content-Type", None) + ) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/v2/functions/namespaces/{namespace_id}/keys" + path_format_arguments = { + "namespace_id": _SERIALIZER.url("namespace_id", namespace_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct headers + if content_type is not None: + _headers["Content-Type"] = _SERIALIZER.header( + "content_type", content_type, "str" + ) + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="POST", url=_url, headers=_headers, **kwargs) + + +def build_functions_access_key_update_request( # pylint: disable=name-too-long + namespace_id: str, key_id: str, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + + content_type: Optional[str] = kwargs.pop( + "content_type", _headers.pop("Content-Type", None) + ) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/v2/functions/namespaces/{namespace_id}/keys/{key_id}" + path_format_arguments = { + "namespace_id": _SERIALIZER.url("namespace_id", namespace_id, "str"), + "key_id": _SERIALIZER.url("key_id", key_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct headers + if content_type is not None: + _headers["Content-Type"] = _SERIALIZER.header( + "content_type", content_type, "str" + ) + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="PUT", url=_url, headers=_headers, **kwargs) + + +def build_functions_access_key_delete_request( # pylint: disable=name-too-long + namespace_id: str, key_id: str, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/v2/functions/namespaces/{namespace_id}/keys/{key_id}" + path_format_arguments = { + "namespace_id": _SERIALIZER.url("namespace_id", namespace_id, "str"), + "key_id": _SERIALIZER.url("key_id", key_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="DELETE", url=_url, headers=_headers, **kwargs) + + def build_images_list_request( *, type: Optional[str] = None, @@ -10078,6 +10637,275 @@ def build_byoip_prefixes_list_resources_request( # pylint: disable=name-too-lon ) +def build_security_list_scans_request( + *, per_page: int = 20, page: int = 1, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/v2/security/scans" + + # Construct parameters + if per_page is not None: + _params["per_page"] = _SERIALIZER.query( + "per_page", per_page, "int", maximum=200, minimum=1 + ) + if page is not None: + _params["page"] = _SERIALIZER.query("page", page, "int", minimum=1) + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest( + method="GET", url=_url, params=_params, headers=_headers, **kwargs + ) + + +def build_security_create_scan_request(**kwargs: Any) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/v2/security/scans" + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="POST", url=_url, headers=_headers, **kwargs) + + +def build_security_get_scan_request( + scan_id: str, + *, + severity: Optional[str] = None, + per_page: int = 20, + page: int = 1, + type: Optional[str] = None, + **kwargs: Any, +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/v2/security/scans/{scan_id}" + path_format_arguments = { + "scan_id": _SERIALIZER.url("scan_id", scan_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + if severity is not None: + _params["severity"] = _SERIALIZER.query("severity", severity, "str") + if per_page is not None: + _params["per_page"] = _SERIALIZER.query( + "per_page", per_page, "int", maximum=200, minimum=1 + ) + if page is not None: + _params["page"] = _SERIALIZER.query("page", page, "int", minimum=1) + if type is not None: + _params["type"] = _SERIALIZER.query("type", type, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest( + method="GET", url=_url, params=_params, headers=_headers, **kwargs + ) + + +def build_security_get_latest_scan_request( + *, + per_page: int = 20, + page: int = 1, + severity: Optional[str] = None, + type: Optional[str] = None, + **kwargs: Any, +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/v2/security/scans/latest" + + # Construct parameters + if per_page is not None: + _params["per_page"] = _SERIALIZER.query( + "per_page", per_page, "int", maximum=200, minimum=1 + ) + if page is not None: + _params["page"] = _SERIALIZER.query("page", page, "int", minimum=1) + if severity is not None: + _params["severity"] = _SERIALIZER.query("severity", severity, "str") + if type is not None: + _params["type"] = _SERIALIZER.query("type", type, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest( + method="GET", url=_url, params=_params, headers=_headers, **kwargs + ) + + +def build_security_create_scan_rule_request(**kwargs: Any) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + + content_type: Optional[str] = kwargs.pop( + "content_type", _headers.pop("Content-Type", None) + ) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/v2/security/scans/rules" + + # Construct headers + if content_type is not None: + _headers["Content-Type"] = _SERIALIZER.header( + "content_type", content_type, "str" + ) + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="POST", url=_url, headers=_headers, **kwargs) + + +def build_security_list_scan_finding_affected_resources_request( # pylint: disable=name-too-long + scan_id: str, finding_uuid: str, *, per_page: int = 20, page: int = 1, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/v2/security/scans/{scan_id}/findings/{finding_uuid}/affected_resources" + path_format_arguments = { + "scan_id": _SERIALIZER.url("scan_id", scan_id, "str"), + "finding_uuid": _SERIALIZER.url("finding_uuid", finding_uuid, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + if per_page is not None: + _params["per_page"] = _SERIALIZER.query( + "per_page", per_page, "int", maximum=200, minimum=1 + ) + if page is not None: + _params["page"] = _SERIALIZER.query("page", page, "int", minimum=1) + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest( + method="GET", url=_url, params=_params, headers=_headers, **kwargs + ) + + +def build_security_list_settings_request( + *, per_page: int = 20, page: int = 1, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/v2/security/settings" + + # Construct parameters + if per_page is not None: + _params["per_page"] = _SERIALIZER.query( + "per_page", per_page, "int", maximum=200, minimum=1 + ) + if page is not None: + _params["page"] = _SERIALIZER.query("page", page, "int", minimum=1) + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest( + method="GET", url=_url, params=_params, headers=_headers, **kwargs + ) + + +def build_security_update_settings_plan_request( + **kwargs: Any, +) -> HttpRequest: # pylint: disable=name-too-long + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + + content_type: Optional[str] = kwargs.pop( + "content_type", _headers.pop("Content-Type", None) + ) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/v2/security/settings/plan" + + # Construct headers + if content_type is not None: + _headers["Content-Type"] = _SERIALIZER.header( + "content_type", content_type, "str" + ) + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="PUT", url=_url, headers=_headers, **kwargs) + + +def build_security_create_suppression_request( + **kwargs: Any, +) -> HttpRequest: # pylint: disable=name-too-long + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + + content_type: Optional[str] = kwargs.pop( + "content_type", _headers.pop("Content-Type", None) + ) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/v2/security/settings/suppressions" + + # Construct headers + if content_type is not None: + _headers["Content-Type"] = _SERIALIZER.header( + "content_type", content_type, "str" + ) + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="POST", url=_url, headers=_headers, **kwargs) + + +def build_security_delete_suppression_request( # pylint: disable=name-too-long + suppression_uuid: str, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/v2/security/settings/suppressions/{suppression_uuid}" + path_format_arguments = { + "suppression_uuid": _SERIALIZER.url( + "suppression_uuid", suppression_uuid, "str" + ), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="DELETE", url=_url, headers=_headers, **kwargs) + + def build_sizes_list_request( *, per_page: int = 20, page: int = 1, **kwargs: Any ) -> HttpRequest: @@ -88909,8 +89737,10 @@ def get_logs_active_deployment( * BUILD: Build-time logs * DEPLOY: Deploy-time logs * RUN: Live run-time logs - * RUN_RESTARTED: Logs of crashed/restarted instances during runtime. Known values are: - "UNSPECIFIED", "BUILD", "DEPLOY", "RUN", and "RUN_RESTARTED". Default value is "UNSPECIFIED". + * RUN_RESTARTED: Logs of crashed/restarted instances during runtime + * AUTOSCALE_EVENT: Logs of an autoscaling event (requires event_id). Known values are: + "UNSPECIFIED", "BUILD", "DEPLOY", "RUN", "RUN_RESTARTED", and "AUTOSCALE_EVENT". Default value + is "UNSPECIFIED". :paramtype type: str :keyword pod_connection_timeout: An optional time duration to wait if the underlying component instance is not immediately available. Default: ``3m``. Default value is None. @@ -98753,8 +99583,10 @@ def get_logs( * BUILD: Build-time logs * DEPLOY: Deploy-time logs * RUN: Live run-time logs - * RUN_RESTARTED: Logs of crashed/restarted instances during runtime. Known values are: - "UNSPECIFIED", "BUILD", "DEPLOY", "RUN", and "RUN_RESTARTED". Default value is "UNSPECIFIED". + * RUN_RESTARTED: Logs of crashed/restarted instances during runtime + * AUTOSCALE_EVENT: Logs of an autoscaling event (requires event_id). Known values are: + "UNSPECIFIED", "BUILD", "DEPLOY", "RUN", "RUN_RESTARTED", and "AUTOSCALE_EVENT". Default value + is "UNSPECIFIED". :paramtype type: str :keyword pod_connection_timeout: An optional time duration to wait if the underlying component instance is not immediately available. Default: ``3m``. Default value is None. @@ -98901,8 +99733,10 @@ def get_logs_aggregate( * BUILD: Build-time logs * DEPLOY: Deploy-time logs * RUN: Live run-time logs - * RUN_RESTARTED: Logs of crashed/restarted instances during runtime. Known values are: - "UNSPECIFIED", "BUILD", "DEPLOY", "RUN", and "RUN_RESTARTED". Default value is "UNSPECIFIED". + * RUN_RESTARTED: Logs of crashed/restarted instances during runtime + * AUTOSCALE_EVENT: Logs of an autoscaling event (requires event_id). Known values are: + "UNSPECIFIED", "BUILD", "DEPLOY", "RUN", "RUN_RESTARTED", and "AUTOSCALE_EVENT". Default value + is "UNSPECIFIED". :paramtype type: str :keyword pod_connection_timeout: An optional time duration to wait if the underlying component instance is not immediately available. Default: ``3m``. Default value is None. @@ -99179,8 +100013,10 @@ def get_logs_active_deployment_aggregate( * BUILD: Build-time logs * DEPLOY: Deploy-time logs * RUN: Live run-time logs - * RUN_RESTARTED: Logs of crashed/restarted instances during runtime. Known values are: - "UNSPECIFIED", "BUILD", "DEPLOY", "RUN", and "RUN_RESTARTED". Default value is "UNSPECIFIED". + * RUN_RESTARTED: Logs of crashed/restarted instances during runtime + * AUTOSCALE_EVENT: Logs of an autoscaling event (requires event_id). Known values are: + "UNSPECIFIED", "BUILD", "DEPLOY", "RUN", "RUN_RESTARTED", and "AUTOSCALE_EVENT". Default value + is "UNSPECIFIED". :paramtype type: str :keyword pod_connection_timeout: An optional time duration to wait if the underlying component instance is not immediately available. Default: ``3m``. Default value is None. @@ -99940,260 +100776,28 @@ def get_job_invocation_logs( return cast(JSON, deserialized) # type: ignore @distributed_trace - def list_instance_sizes(self, **kwargs: Any) -> JSON: - # pylint: disable=line-too-long - """List Instance Sizes. - - List all instance sizes for ``service``\\ , ``worker``\\ , and ``job`` components. - - :return: JSON object - :rtype: JSON - :raises ~azure.core.exceptions.HttpResponseError: - - Example: - .. code-block:: python - - # response body for status code(s): 200 - response == { - "discount_percent": 0.0, # Optional. - "instance_sizes": [ - { - "bandwidth_allowance_gib": "str", # Optional. The bandwidth - allowance in GiB for the instance size. - "cpu_type": "UNSPECIFIED", # Optional. Default value is - "UNSPECIFIED". * SHARED: Shared vCPU cores * DEDICATED: Dedicated vCPU - cores. Known values are: "UNSPECIFIED", "SHARED", and "DEDICATED". - "cpus": "str", # Optional. The number of allotted vCPU - cores. - "deprecation_intent": bool, # Optional. Indicates if the - instance size is intended for deprecation. - "memory_bytes": "str", # Optional. The allotted memory in - bytes. - "name": "str", # Optional. A human-readable name of the - instance size. - "scalable": bool, # Optional. Indicates if the instance size - can enable autoscaling. - "single_instance_only": bool, # Optional. Indicates if the - instance size allows more than one instance. - "slug": "str", # Optional. The slug of the instance size. - "tier_downgrade_to": "str", # Optional. The slug of the - corresponding downgradable instance size on the lower tier. - "tier_slug": "str", # Optional. The slug of the tier to - which this instance size belongs. - "tier_upgrade_to": "str", # Optional. The slug of the - corresponding upgradable instance size on the higher tier. - "usd_per_month": "str", # Optional. The cost of this - instance size in USD per month. - "usd_per_second": "str" # Optional. The cost of this - instance size in USD per second. - } - ] - } - """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - 401: cast( - Type[HttpResponseError], - lambda response: ClientAuthenticationError(response=response), - ), - 429: HttpResponseError, - 500: HttpResponseError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = kwargs.pop("headers", {}) or {} - _params = kwargs.pop("params", {}) or {} - - cls: ClsType[JSON] = kwargs.pop("cls", None) - - _request = build_apps_list_instance_sizes_request( - headers=_headers, - params=_params, - ) - _request.url = self._client.format_url(_request.url) - - _stream = False - pipeline_response: PipelineResponse = ( - self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - ) - - response = pipeline_response.http_response - - if response.status_code not in [200]: - if _stream: - response.read() # Load the body in memory and close the socket - map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore - raise HttpResponseError(response=response) - - response_headers = {} - response_headers["ratelimit-limit"] = self._deserialize( - "int", response.headers.get("ratelimit-limit") - ) - response_headers["ratelimit-remaining"] = self._deserialize( - "int", response.headers.get("ratelimit-remaining") - ) - response_headers["ratelimit-reset"] = self._deserialize( - "int", response.headers.get("ratelimit-reset") - ) - - if response.content: - deserialized = response.json() - else: - deserialized = None - - if cls: - return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore - - return cast(JSON, deserialized) # type: ignore - - @distributed_trace - def get_instance_size(self, slug: str, **kwargs: Any) -> JSON: + def list_events( + self, + app_id: str, + *, + page: int = 1, + per_page: int = 20, + event_types: Optional[List[str]] = None, + **kwargs: Any, + ) -> JSON: # pylint: disable=line-too-long - """Retrieve an Instance Size. - - Retrieve information about a specific instance size for ``service``\\ , ``worker``\\ , and - ``job`` components. - - :param slug: The slug of the instance size. Required. - :type slug: str - :return: JSON object - :rtype: JSON - :raises ~azure.core.exceptions.HttpResponseError: - - Example: - .. code-block:: python - - # response body for status code(s): 200 - response == { - "instance_size": { - "bandwidth_allowance_gib": "str", # Optional. The bandwidth - allowance in GiB for the instance size. - "cpu_type": "UNSPECIFIED", # Optional. Default value is - "UNSPECIFIED". * SHARED: Shared vCPU cores * DEDICATED: Dedicated vCPU - cores. Known values are: "UNSPECIFIED", "SHARED", and "DEDICATED". - "cpus": "str", # Optional. The number of allotted vCPU cores. - "deprecation_intent": bool, # Optional. Indicates if the instance - size is intended for deprecation. - "memory_bytes": "str", # Optional. The allotted memory in bytes. - "name": "str", # Optional. A human-readable name of the instance - size. - "scalable": bool, # Optional. Indicates if the instance size can - enable autoscaling. - "single_instance_only": bool, # Optional. Indicates if the instance - size allows more than one instance. - "slug": "str", # Optional. The slug of the instance size. - "tier_downgrade_to": "str", # Optional. The slug of the - corresponding downgradable instance size on the lower tier. - "tier_slug": "str", # Optional. The slug of the tier to which this - instance size belongs. - "tier_upgrade_to": "str", # Optional. The slug of the corresponding - upgradable instance size on the higher tier. - "usd_per_month": "str", # Optional. The cost of this instance size - in USD per month. - "usd_per_second": "str" # Optional. The cost of this instance size - in USD per second. - } - } - # response body for status code(s): 404 - response == { - "id": "str", # A short identifier corresponding to the HTTP status code - returned. For example, the ID for a response returning a 404 status code would - be "not_found.". Required. - "message": "str", # A message providing additional information about the - error, including details to help resolve it when possible. Required. - "request_id": "str" # Optional. Optionally, some endpoints may include a - request ID that should be provided when reporting bugs or opening support - tickets to help identify the issue. - } - """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - 401: cast( - Type[HttpResponseError], - lambda response: ClientAuthenticationError(response=response), - ), - 429: HttpResponseError, - 500: HttpResponseError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = kwargs.pop("headers", {}) or {} - _params = kwargs.pop("params", {}) or {} - - cls: ClsType[JSON] = kwargs.pop("cls", None) - - _request = build_apps_get_instance_size_request( - slug=slug, - headers=_headers, - params=_params, - ) - _request.url = self._client.format_url(_request.url) - - _stream = False - pipeline_response: PipelineResponse = ( - self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - ) - - response = pipeline_response.http_response - - if response.status_code not in [200, 404]: - if _stream: - response.read() # Load the body in memory and close the socket - map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore - raise HttpResponseError(response=response) - - response_headers = {} - if response.status_code == 200: - response_headers["ratelimit-limit"] = self._deserialize( - "int", response.headers.get("ratelimit-limit") - ) - response_headers["ratelimit-remaining"] = self._deserialize( - "int", response.headers.get("ratelimit-remaining") - ) - response_headers["ratelimit-reset"] = self._deserialize( - "int", response.headers.get("ratelimit-reset") - ) - - if response.content: - deserialized = response.json() - else: - deserialized = None - - if response.status_code == 404: - response_headers["ratelimit-limit"] = self._deserialize( - "int", response.headers.get("ratelimit-limit") - ) - response_headers["ratelimit-remaining"] = self._deserialize( - "int", response.headers.get("ratelimit-remaining") - ) - response_headers["ratelimit-reset"] = self._deserialize( - "int", response.headers.get("ratelimit-reset") - ) - - if response.content: - deserialized = response.json() - else: - deserialized = None - - if cls: - return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore - - return cast(JSON, deserialized) # type: ignore - - @distributed_trace - def list_regions(self, **kwargs: Any) -> JSON: - """List App Regions. + """List App Events. - List all regions supported by App Platform. + List all events for an app, including deployments and autoscaling events. + :param app_id: The app ID. Required. + :type app_id: str + :keyword page: Which 'page' of paginated results to return. Default value is 1. + :paramtype page: int + :keyword per_page: Number of items returned per page. Default value is 20. + :paramtype per_page: int + :keyword event_types: Filter events by event type. Default value is None. + :paramtype event_types: list[str] :return: JSON object :rtype: JSON :raises ~azure.core.exceptions.HttpResponseError: @@ -100203,2699 +100807,3450 @@ def list_regions(self, **kwargs: Any) -> JSON: # response body for status code(s): 200 response == { - "regions": [ + "events": [ { - "continent": "str", # Optional. The continent that this - region is in. - "data_centers": [ - "str" # Optional. Data centers that are in this - region. - ], - "default": bool, # Optional. Whether or not the region is - presented as the default. - "disabled": bool, # Optional. Whether or not the region is - open for new apps. - "flag": "str", # Optional. The flag of this region. - "label": "str", # Optional. A human-readable name of the - region. - "reason": "str", # Optional. Reason that this region is not - available. - "slug": "str" # Optional. The slug form of the region name. - } - ] - } - """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - 401: cast( - Type[HttpResponseError], - lambda response: ClientAuthenticationError(response=response), - ), - 429: HttpResponseError, - 500: HttpResponseError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = kwargs.pop("headers", {}) or {} - _params = kwargs.pop("params", {}) or {} - - cls: ClsType[JSON] = kwargs.pop("cls", None) - - _request = build_apps_list_regions_request( - headers=_headers, - params=_params, - ) - _request.url = self._client.format_url(_request.url) - - _stream = False - pipeline_response: PipelineResponse = ( - self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - ) - - response = pipeline_response.http_response - - if response.status_code not in [200]: - if _stream: - response.read() # Load the body in memory and close the socket - map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore - raise HttpResponseError(response=response) - - response_headers = {} - response_headers["ratelimit-limit"] = self._deserialize( - "int", response.headers.get("ratelimit-limit") - ) - response_headers["ratelimit-remaining"] = self._deserialize( - "int", response.headers.get("ratelimit-remaining") - ) - response_headers["ratelimit-reset"] = self._deserialize( - "int", response.headers.get("ratelimit-reset") - ) - - if response.content: - deserialized = response.json() - else: - deserialized = None - - if cls: - return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore - - return cast(JSON, deserialized) # type: ignore - - @overload - def validate_app_spec( - self, body: JSON, *, content_type: str = "application/json", **kwargs: Any - ) -> JSON: - # pylint: disable=line-too-long - """Propose an App Spec. - - To propose and validate a spec for a new or existing app, send a POST request to the - ``/v2/apps/propose`` endpoint. The request returns some information about the proposed app, - including app cost and upgrade cost. If an existing app ID is specified, the app spec is - treated as a proposed update to the existing app. - - :param body: Required. - :type body: JSON - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: JSON object - :rtype: JSON - :raises ~azure.core.exceptions.HttpResponseError: - - Example: - .. code-block:: python - - # JSON input template you can fill out and use as your body input. - body = { - "spec": { - "name": "str", # The name of the app. Must be unique across all apps - in the same account. Required. - "databases": [ - { - "name": "str", # The database's name. The name must - be unique across all components within the same app and cannot use - capital letters. Required. - "cluster_name": "str", # Optional. The name of the - underlying DigitalOcean DBaaS cluster. This is required for - production databases. For dev databases, if cluster_name is not set, - a new cluster will be provisioned. - "db_name": "str", # Optional. The name of the MySQL - or PostgreSQL database to configure. - "db_user": "str", # Optional. The name of the MySQL - or PostgreSQL user to configure. - "engine": "UNSET", # Optional. Default value is - "UNSET". * MYSQL: MySQL * PG: PostgreSQL * REDIS: Caching * MONGODB: - MongoDB * KAFKA: Kafka * OPENSEARCH: OpenSearch * VALKEY: ValKey. - Known values are: "UNSET", "MYSQL", "PG", "REDIS", "MONGODB", - "KAFKA", "OPENSEARCH", and "VALKEY". - "production": bool, # Optional. Whether this is a - production or dev database. - "version": "str" # Optional. The version of the - database engine. - } - ], - "disable_edge_cache": False, # Optional. Default value is False. .. - role:: raw-html-m2r(raw) :format: html If set to ``true``"" , the app - will **not** be cached at the edge (CDN). Enable this option if you want to - manage CDN configuration yourself"u2014whether by using an external CDN - provider or by handling static content and caching within your app. This - setting is also recommended for apps that require real-time data or serve - dynamic content, such as those using Server-Sent Events (SSE) over GET, or - hosting an MCP (Model Context Protocol) Server that utilizes SSE."" - :raw-html-m2r:`
` **Note:** This feature is not available for static site - components."" :raw-html-m2r:`
` For more information, see `Disable CDN - Cache - `_. - "disable_email_obfuscation": False, # Optional. Default value is - False. If set to ``true``"" , email addresses in the app will not be - obfuscated. This is useful for apps that require email addresses to be - visible (in the HTML markup). - "domains": [ - { - "domain": "str", # The hostname for the domain. - Required. - "minimum_tls_version": "str", # Optional. The - minimum version of TLS a client application can use to access - resources for the domain. Must be one of the following values - wrapped within quotations: ``"1.2"`` or ``"1.3"``. Known values are: - "1.2" and "1.3". - "type": "UNSPECIFIED", # Optional. Default value is - "UNSPECIFIED". * DEFAULT: The default ``.ondigitalocean.app`` domain - assigned to this app * PRIMARY: The primary domain for this app that - is displayed as the default in the control panel, used in bindable - environment variables, and any other places that reference an app's - live URL. Only one domain may be set as primary. * ALIAS: A - non-primary domain. Known values are: "UNSPECIFIED", "DEFAULT", - "PRIMARY", and "ALIAS". - "wildcard": bool, # Optional. Indicates whether the - domain includes all sub-domains, in addition to the given domain. - "zone": "str" # Optional. Optional. If the domain - uses DigitalOcean DNS and you would like App Platform to - automatically manage it for you, set this to the name of the domain - on your account. For example, If the domain you are adding is - ``app.domain.com``"" , the zone could be ``domain.com``. - } - ], - "egress": { - "type": "AUTOASSIGN" # Optional. Default value is - "AUTOASSIGN". The app egress type. Known values are: "AUTOASSIGN" and - "DEDICATED_IP". - }, - "enhanced_threat_control_enabled": False, # Optional. Default value - is False. If set to ``true``"" , suspicious requests will go through - additional security checks to help mitigate layer 7 DDoS attacks. - "functions": [ - { - "name": "str", # The name. Must be unique across all - components within the same app. Required. - "alerts": [ - { - "disabled": bool, # Optional. Is the - alert disabled?. - "operator": "UNSPECIFIED_OPERATOR", - # Optional. Default value is "UNSPECIFIED_OPERATOR". Known - values are: "UNSPECIFIED_OPERATOR", "GREATER_THAN", and - "LESS_THAN". - "rule": "UNSPECIFIED_RULE", # - Optional. Default value is "UNSPECIFIED_RULE". Known values - are: "UNSPECIFIED_RULE", "CPU_UTILIZATION", - "MEM_UTILIZATION", "RESTART_COUNT", "DEPLOYMENT_FAILED", - "DEPLOYMENT_LIVE", "DOMAIN_FAILED", "DOMAIN_LIVE", - "AUTOSCALE_FAILED", "AUTOSCALE_SUCCEEDED", - "FUNCTIONS_ACTIVATION_COUNT", - "FUNCTIONS_AVERAGE_DURATION_MS", - "FUNCTIONS_ERROR_RATE_PER_MINUTE", - "FUNCTIONS_AVERAGE_WAIT_TIME_MS", "FUNCTIONS_ERROR_COUNT", - and "FUNCTIONS_GB_RATE_PER_SECOND". - "value": 0.0, # Optional. Threshold - value for alert. - "window": "UNSPECIFIED_WINDOW" # - Optional. Default value is "UNSPECIFIED_WINDOW". Known values - are: "UNSPECIFIED_WINDOW", "FIVE_MINUTES", "TEN_MINUTES", - "THIRTY_MINUTES", and "ONE_HOUR". - } - ], - "bitbucket": { - "branch": "str", # Optional. The name of the - branch to use. - "deploy_on_push": bool, # Optional. Whether - to automatically deploy new commits made to the repo. - "repo": "str" # Optional. The name of the - repo in the format owner/repo. Example: - ``digitalocean/sample-golang``. - }, - "cors": { - "allow_credentials": bool, # Optional. - Whether browsers should expose the response to the client-side - JavaScript code when the request"u2019s credentials mode is - include. This configures the ``Access-Control-Allow-Credentials`` - header. - "allow_headers": [ - "str" # Optional. The set of allowed - HTTP request headers. This configures the - ``Access-Control-Allow-Headers`` header. - ], - "allow_methods": [ - "str" # Optional. The set of allowed - HTTP methods. This configures the - ``Access-Control-Allow-Methods`` header. - ], - "allow_origins": [ - { - "exact": "str", # Optional. - Exact string match. Only 1 of ``exact``"" , ``prefix``"" - , or ``regex`` must be set. - "prefix": "str", # Optional. - Prefix-based match. Only 1 of ``exact``"" , ``prefix``"" - , or ``regex`` must be set. - "regex": "str" # Optional. - RE2 style regex-based match. Only 1 of ``exact``"" , - ``prefix``"" , or ``regex`` must be set. For more - information about RE2 syntax, see: - https://github.com/google/re2/wiki/Syntax. - } - ], - "expose_headers": [ - "str" # Optional. The set of HTTP - response headers that browsers are allowed to access. This - configures the ``Access-Control-Expose-Headers`` header. - ], - "max_age": "str" # Optional. An optional - duration specifying how long browsers can cache the results of a - preflight request. This configures the ``Access-Control-Max-Age`` - header. - }, - "envs": [ - { - "key": "str", # The variable name. - Required. - "scope": "RUN_AND_BUILD_TIME", # - Optional. Default value is "RUN_AND_BUILD_TIME". * RUN_TIME: - Made available only at run-time * BUILD_TIME: Made available - only at build-time * RUN_AND_BUILD_TIME: Made available at - both build and run-time. Known values are: "UNSET", - "RUN_TIME", "BUILD_TIME", and "RUN_AND_BUILD_TIME". - "type": "GENERAL", # Optional. - Default value is "GENERAL". * GENERAL: A plain-text - environment variable * SECRET: A secret encrypted environment - variable. Known values are: "GENERAL" and "SECRET". - "value": "str" # Optional. The - value. If the type is ``SECRET``"" , the value will be - encrypted on first submission. On following submissions, the - encrypted value should be used. + "autoscaling": { + "components": { + "str": { + "from": 0, # Optional. The number of + replicas before scaling. + "to": 0, # Optional. The number of + replicas after scaling. + "triggering_metric": "str" # + Optional. The metric that triggered the scale change. Known + values are "cpu", "requests_per_second", "request_duration". + For inactivity sleep, "scale_from_zero" and "scale_to_zero" + are used. } - ], - "git": { - "branch": "str", # Optional. The name of the - branch to use. - "repo_clone_url": "str" # Optional. The - clone URL of the repo. Example: - ``https://github.com/digitalocean/sample-golang.git``. - }, - "github": { - "branch": "str", # Optional. The name of the - branch to use. - "deploy_on_push": bool, # Optional. Whether - to automatically deploy new commits made to the repo. - "repo": "str" # Optional. The name of the - repo in the format owner/repo. Example: - ``digitalocean/sample-golang``. - }, - "gitlab": { - "branch": "str", # Optional. The name of the - branch to use. - "deploy_on_push": bool, # Optional. Whether - to automatically deploy new commits made to the repo. - "repo": "str" # Optional. The name of the - repo in the format owner/repo. Example: - ``digitalocean/sample-golang``. }, - "log_destinations": [ + "phase": "str" # Optional. The current phase of the + autoscaling event. Known values are: "UNKNOWN", "PENDING", + "IN_PROGRESS", "SUCCEEDED", "FAILED", and "CANCELED". + }, + "created_at": "2020-02-20 00:00:00", # Optional. When the + event was created. + "deployment": { + "cause": "str", # Optional. What caused this + deployment to be created. + "cloned_from": "str", # Optional. The ID of a + previous deployment that this deployment was cloned from. + "created_at": "2020-02-20 00:00:00", # Optional. The + creation time of the deployment. + "functions": [ { - "name": "str", # Required. - "datadog": { - "api_key": "str", # Datadog - API key. Required. - "endpoint": "str" # - Optional. Datadog HTTP log intake endpoint. - }, - "logtail": { - "token": "str" # Optional. - Logtail token. - }, - "open_search": { - "basic_auth": { - "password": "str", # - Optional. Password for user defined in User. Is - required when ``endpoint`` is set. Cannot be set if - using a DigitalOcean DBaaS OpenSearch cluster. - "user": "str" # - Optional. Username to authenticate with. Only - required when ``endpoint`` is set. Defaults to - ``doadmin`` when ``cluster_name`` is set. - }, - "cluster_name": "str", # - Optional. The name of a DigitalOcean DBaaS OpenSearch - cluster to use as a log forwarding destination. Cannot be - specified if ``endpoint`` is also specified. - "endpoint": "str", # - Optional. OpenSearch API Endpoint. Only HTTPS is - supported. Format: https://:code:``::code:``. - Cannot be specified if ``cluster_name`` is also - specified. - "index_name": "logs" # - Optional. Default value is "logs". The index name to use - for the logs. If not set, the default index name is - "logs". - }, - "papertrail": { - "endpoint": "str" # - Papertrail syslog endpoint. Required. - } + "name": "str", # Optional. The name + of this functions component. + "namespace": "str", # Optional. The + namespace where the functions are deployed. + "source_commit_hash": "str" # + Optional. The commit hash of the repository that was used to + build this functions component. } ], - "routes": [ + "id": "str", # Optional. The ID of the deployment. + "jobs": [ { - "path": "str", # Optional. - (Deprecated - Use Ingress Rules instead). An HTTP path - prefix. Paths must start with / and must be unique across all - components within an app. - "preserve_path_prefix": bool # - Optional. An optional flag to preserve the path that is - forwarded to the backend service. By default, the HTTP - request path will be trimmed from the left when forwarded to - the component. For example, a component with ``path=/api`` - will have requests to ``/api/list`` trimmed to ``/list``. If - this value is ``true``"" , the path will remain - ``/api/list``. + "name": "str", # Optional. The name + of this job. + "source_commit_hash": "str" # + Optional. The commit hash of the repository that was used to + build this job. } ], - "source_dir": "str" # Optional. An optional path to - the working directory to use for the build. For Dockerfile builds, - this will be used as the build context. Must be relative to the root - of the repo. - } - ], - "ingress": { - "rules": [ - { - "component": { - "name": "str", # The name of the - component to route to. Required. - "preserve_path_prefix": "str", # - Optional. An optional flag to preserve the path that is - forwarded to the backend service. By default, the HTTP - request path will be trimmed from the left when forwarded to - the component. For example, a component with ``path=/api`` - will have requests to ``/api/list`` trimmed to ``/list``. If - this value is ``true``"" , the path will remain - ``/api/list``. Note: this is not applicable for Functions - Components and is mutually exclusive with ``rewrite``. - "rewrite": "str" # Optional. An - optional field that will rewrite the path of the component to - be what is specified here. By default, the HTTP request path - will be trimmed from the left when forwarded to the - component. For example, a component with ``path=/api`` will - have requests to ``/api/list`` trimmed to ``/list``. If you - specified the rewrite to be ``/v1/``"" , requests to - ``/api/list`` would be rewritten to ``/v1/list``. Note: this - is mutually exclusive with ``preserve_path_prefix``. - }, - "cors": { - "allow_credentials": bool, # - Optional. Whether browsers should expose the response to the - client-side JavaScript code when the request"u2019s - credentials mode is include. This configures the - ``Access-Control-Allow-Credentials`` header. - "allow_headers": [ - "str" # Optional. The set of - allowed HTTP request headers. This configures the - ``Access-Control-Allow-Headers`` header. - ], - "allow_methods": [ - "str" # Optional. The set of - allowed HTTP methods. This configures the - ``Access-Control-Allow-Methods`` header. - ], - "allow_origins": [ - { - "exact": "str", # - Optional. Exact string match. Only 1 of ``exact``"" , - ``prefix``"" , or ``regex`` must be set. - "prefix": "str", # - Optional. Prefix-based match. Only 1 of ``exact``"" , - ``prefix``"" , or ``regex`` must be set. - "regex": "str" # - Optional. RE2 style regex-based match. Only 1 of - ``exact``"" , ``prefix``"" , or ``regex`` must be - set. For more information about RE2 syntax, see: - https://github.com/google/re2/wiki/Syntax. - } - ], - "expose_headers": [ - "str" # Optional. The set of - HTTP response headers that browsers are allowed to - access. This configures the - ``Access-Control-Expose-Headers`` header. - ], - "max_age": "str" # Optional. An - optional duration specifying how long browsers can cache the - results of a preflight request. This configures the - ``Access-Control-Max-Age`` header. - }, - "match": { - "authority": { - "exact": "str" # Required. - }, - "path": { - "prefix": "str" # - Prefix-based match. For example, ``/api`` will match - ``/api``"" , ``/api/``"" , and any nested paths such as - ``/api/v1/endpoint``. Required. + "phase": "UNKNOWN", # Optional. Default value is + "UNKNOWN". Known values are: "UNKNOWN", "PENDING_BUILD", "BUILDING", + "PENDING_DEPLOY", "DEPLOYING", "ACTIVE", "SUPERSEDED", "ERROR", and + "CANCELED". + "phase_last_updated_at": "2020-02-20 00:00:00", # + Optional. When the deployment phase was last updated. + "progress": { + "error_steps": 0, # Optional. Number of + unsuccessful steps. + "pending_steps": 0, # Optional. Number of + pending steps. + "running_steps": 0, # Optional. Number of + currently running steps. + "steps": [ + { + "component_name": "str", # + Optional. The component name that this step is associated + with. + "ended_at": "2020-02-20 + 00:00:00", # Optional. The end time of this step. + "message_base": "str", # + Optional. The base of a human-readable description of the + step intended to be combined with the component name for + presentation. For example: ``message_base`` = "Building + service" ``component_name`` = "api". + "name": "str", # Optional. + The name of this step. + "reason": { + "code": "str", # + Optional. The error code. + "message": "str" # + Optional. The error message. + }, + "started_at": "2020-02-20 + 00:00:00", # Optional. The start time of this step. + "status": "UNKNOWN", # + Optional. Default value is "UNKNOWN". Known values are: + "UNKNOWN", "PENDING", "RUNNING", "ERROR", and "SUCCESS". + "steps": [ + {} # Optional. Child + steps of this step. + ] } - }, - "redirect": { - "authority": "str", # Optional. The - authority/host to redirect to. This can be a hostname or IP - address. Note: use ``port`` to set the port. - "port": 0, # Optional. The port to - redirect to. - "redirect_code": 0, # Optional. The - redirect code to use. Defaults to ``302``. Supported values - are 300, 301, 302, 303, 304, 307, 308. - "scheme": "str", # Optional. The - scheme to redirect to. Supported values are ``http`` or - ``https``. Default: ``https``. - "uri": "str" # Optional. An optional - URI path to redirect to. Note: if this is specified the whole - URI of the original request will be overwritten to this - value, irrespective of the original request URI being - matched. - } - } - ] - }, - "jobs": [ - { - "autoscaling": { - "max_instance_count": 0, # Optional. The - maximum amount of instances for this component. Must be more than - min_instance_count. - "metrics": { - "cpu": { - "percent": 80 # Optional. - Default value is 80. The average target CPU utilization - for the component. + ], + "success_steps": 0, # Optional. Number of + successful steps. + "summary_steps": [ + { + "component_name": "str", # + Optional. The component name that this step is associated + with. + "ended_at": "2020-02-20 + 00:00:00", # Optional. The end time of this step. + "message_base": "str", # + Optional. The base of a human-readable description of the + step intended to be combined with the component name for + presentation. For example: ``message_base`` = "Building + service" ``component_name`` = "api". + "name": "str", # Optional. + The name of this step. + "reason": { + "code": "str", # + Optional. The error code. + "message": "str" # + Optional. The error message. + }, + "started_at": "2020-02-20 + 00:00:00", # Optional. The start time of this step. + "status": "UNKNOWN", # + Optional. Default value is "UNKNOWN". Known values are: + "UNKNOWN", "PENDING", "RUNNING", "ERROR", and "SUCCESS". + "steps": [ + {} # Optional. Child + steps of this step. + ] } - }, - "min_instance_count": 0 # Optional. The - minimum amount of instances for this component. Must be less than - max_instance_count. - }, - "bitbucket": { - "branch": "str", # Optional. The name of the - branch to use. - "deploy_on_push": bool, # Optional. Whether - to automatically deploy new commits made to the repo. - "repo": "str" # Optional. The name of the - repo in the format owner/repo. Example: - ``digitalocean/sample-golang``. + ], + "total_steps": 0 # Optional. Total number of + steps. }, - "build_command": "str", # Optional. An optional - build command to run while building this component from source. - "dockerfile_path": "str", # Optional. The path to - the Dockerfile relative to the root of the repo. If set, it will be - used to build this component. Otherwise, App Platform will attempt to - build it using buildpacks. - "environment_slug": "str", # Optional. An - environment slug describing the type of this app. For a full list, - please refer to `the product documentation - `_. - "envs": [ + "services": [ { - "key": "str", # The variable name. - Required. - "scope": "RUN_AND_BUILD_TIME", # - Optional. Default value is "RUN_AND_BUILD_TIME". * RUN_TIME: - Made available only at run-time * BUILD_TIME: Made available - only at build-time * RUN_AND_BUILD_TIME: Made available at - both build and run-time. Known values are: "UNSET", - "RUN_TIME", "BUILD_TIME", and "RUN_AND_BUILD_TIME". - "type": "GENERAL", # Optional. - Default value is "GENERAL". * GENERAL: A plain-text - environment variable * SECRET: A secret encrypted environment - variable. Known values are: "GENERAL" and "SECRET". - "value": "str" # Optional. The - value. If the type is ``SECRET``"" , the value will be - encrypted on first submission. On following submissions, the - encrypted value should be used. + "name": "str", # Optional. The name + of this service. + "source_commit_hash": "str" # + Optional. The commit hash of the repository that was used to + build this service. } ], - "git": { - "branch": "str", # Optional. The name of the - branch to use. - "repo_clone_url": "str" # Optional. The - clone URL of the repo. Example: - ``https://github.com/digitalocean/sample-golang.git``. - }, - "github": { - "branch": "str", # Optional. The name of the - branch to use. - "deploy_on_push": bool, # Optional. Whether - to automatically deploy new commits made to the repo. - "repo": "str" # Optional. The name of the - repo in the format owner/repo. Example: - ``digitalocean/sample-golang``. - }, - "gitlab": { - "branch": "str", # Optional. The name of the - branch to use. - "deploy_on_push": bool, # Optional. Whether - to automatically deploy new commits made to the repo. - "repo": "str" # Optional. The name of the - repo in the format owner/repo. Example: - ``digitalocean/sample-golang``. - }, - "image": { - "deploy_on_push": { - "enabled": bool # Optional. Whether - to automatically deploy new images. Can only be used for - images hosted in DOCR and can only be used with an image tag, - not a specific digest. - }, - "digest": "str", # Optional. The image - digest. Cannot be specified if tag is provided. - "registry": "str", # Optional. The registry - name. Must be left empty for the ``DOCR`` registry type. - "registry_credentials": "str", # Optional. - The credentials to be able to pull the image. The value will be - encrypted on first submission. On following submissions, the - encrypted value should be used. * "$username:$access_token" for - registries of type ``DOCKER_HUB``. * "$username:$access_token" - for registries of type ``GHCR``. - "registry_type": "str", # Optional. * - DOCKER_HUB: The DockerHub container registry type. * DOCR: The - DigitalOcean container registry type. * GHCR: The Github - container registry type. Known values are: "DOCKER_HUB", "DOCR", - and "GHCR". - "repository": "str", # Optional. The - repository name. - "tag": "latest" # Optional. Default value is - "latest". The repository tag. Defaults to ``latest`` if not - provided and no digest is provided. Cannot be specified if digest - is provided. - }, - "instance_count": 1, # Optional. Default value is 1. - The amount of instances that this component should be scaled to. - Default: 1. Must not be set if autoscaling is used. - "instance_size_slug": {}, - "kind": "UNSPECIFIED", # Optional. Default value is - "UNSPECIFIED". * UNSPECIFIED: Default job type, will auto-complete to - POST_DEPLOY kind. * PRE_DEPLOY: Indicates a job that runs before an - app deployment. * POST_DEPLOY: Indicates a job that runs after an app - deployment. * FAILED_DEPLOY: Indicates a job that runs after a - component fails to deploy. Known values are: "UNSPECIFIED", - "PRE_DEPLOY", "POST_DEPLOY", and "FAILED_DEPLOY". - "log_destinations": [ - { - "name": "str", # Required. - "datadog": { - "api_key": "str", # Datadog - API key. Required. - "endpoint": "str" # - Optional. Datadog HTTP log intake endpoint. - }, - "logtail": { - "token": "str" # Optional. - Logtail token. - }, - "open_search": { - "basic_auth": { - "password": "str", # - Optional. Password for user defined in User. Is - required when ``endpoint`` is set. Cannot be set if - using a DigitalOcean DBaaS OpenSearch cluster. - "user": "str" # - Optional. Username to authenticate with. Only - required when ``endpoint`` is set. Defaults to - ``doadmin`` when ``cluster_name`` is set. - }, + "spec": { + "name": "str", # The name of the app. Must + be unique across all apps in the same account. Required. + "databases": [ + { + "name": "str", # The + database's name. The name must be unique across all + components within the same app and cannot use capital + letters. Required. "cluster_name": "str", # - Optional. The name of a DigitalOcean DBaaS OpenSearch - cluster to use as a log forwarding destination. Cannot be - specified if ``endpoint`` is also specified. - "endpoint": "str", # - Optional. OpenSearch API Endpoint. Only HTTPS is - supported. Format: https://:code:``::code:``. - Cannot be specified if ``cluster_name`` is also - specified. - "index_name": "logs" # - Optional. Default value is "logs". The index name to use - for the logs. If not set, the default index name is - "logs". - }, - "papertrail": { - "endpoint": "str" # - Papertrail syslog endpoint. Required. + Optional. The name of the underlying DigitalOcean DBaaS + cluster. This is required for production databases. For + dev databases, if cluster_name is not set, a new cluster + will be provisioned. + "db_name": "str", # + Optional. The name of the MySQL or PostgreSQL database to + configure. + "db_user": "str", # + Optional. The name of the MySQL or PostgreSQL user to + configure. + "engine": "UNSET", # + Optional. Default value is "UNSET". * MYSQL: MySQL * PG: + PostgreSQL * REDIS: Caching * MONGODB: MongoDB * KAFKA: + Kafka * OPENSEARCH: OpenSearch * VALKEY: ValKey. Known + values are: "UNSET", "MYSQL", "PG", "REDIS", "MONGODB", + "KAFKA", "OPENSEARCH", and "VALKEY". + "production": bool, # + Optional. Whether this is a production or dev database. + "version": "str" # Optional. + The version of the database engine. } - } - ], - "name": "str", # Optional. The name. Must be unique - across all components within the same app. - "run_command": "str", # Optional. An optional run - command to override the component's default. - "source_dir": "str", # Optional. An optional path to - the working directory to use for the build. For Dockerfile builds, - this will be used as the build context. Must be relative to the root - of the repo. - "termination": { - "grace_period_seconds": 0 # Optional. The - number of seconds to wait between sending a TERM signal to a - container and issuing a KILL which causes immediate shutdown. - (Default 120). - } - } - ], - "maintenance": { - "archive": bool, # Optional. Indicates whether the app - should be archived. Setting this to true implies that enabled is set to - true. - "enabled": bool, # Optional. Indicates whether maintenance - mode should be enabled for the app. - "offline_page_url": "str" # Optional. A custom offline page - to display when maintenance mode is enabled or the app is archived. - }, - "region": "str", # Optional. The slug form of the geographical - origin of the app. Default: ``nearest available``. Known values are: "atl", - "nyc", "sfo", "tor", "ams", "fra", "lon", "blr", "sgp", and "syd". - "services": [ - { - "autoscaling": { - "max_instance_count": 0, # Optional. The - maximum amount of instances for this component. Must be more than - min_instance_count. - "metrics": { - "cpu": { - "percent": 80 # Optional. - Default value is 80. The average target CPU utilization - for the component. - } - }, - "min_instance_count": 0 # Optional. The - minimum amount of instances for this component. Must be less than - max_instance_count. - }, - "bitbucket": { - "branch": "str", # Optional. The name of the - branch to use. - "deploy_on_push": bool, # Optional. Whether - to automatically deploy new commits made to the repo. - "repo": "str" # Optional. The name of the - repo in the format owner/repo. Example: - ``digitalocean/sample-golang``. - }, - "build_command": "str", # Optional. An optional - build command to run while building this component from source. - "cors": { - "allow_credentials": bool, # Optional. - Whether browsers should expose the response to the client-side - JavaScript code when the request"u2019s credentials mode is - include. This configures the ``Access-Control-Allow-Credentials`` - header. - "allow_headers": [ - "str" # Optional. The set of allowed - HTTP request headers. This configures the - ``Access-Control-Allow-Headers`` header. - ], - "allow_methods": [ - "str" # Optional. The set of allowed - HTTP methods. This configures the - ``Access-Control-Allow-Methods`` header. ], - "allow_origins": [ + "disable_edge_cache": False, # Optional. + Default value is False. .. role:: raw-html-m2r(raw) :format: + html If set to ``true``"" , the app will **not** be cached at + the edge (CDN). Enable this option if you want to manage CDN + configuration yourself"u2014whether by using an external CDN + provider or by handling static content and caching within your + app. This setting is also recommended for apps that require + real-time data or serve dynamic content, such as those using + Server-Sent Events (SSE) over GET, or hosting an MCP (Model + Context Protocol) Server that utilizes SSE."" + :raw-html-m2r:`
` **Note:** This feature is not available for + static site components."" :raw-html-m2r:`
` For more + information, see `Disable CDN Cache + `_. + "disable_email_obfuscation": False, # + Optional. Default value is False. If set to ``true``"" , email + addresses in the app will not be obfuscated. This is useful for + apps that require email addresses to be visible (in the HTML + markup). + "domains": [ { - "exact": "str", # Optional. - Exact string match. Only 1 of ``exact``"" , ``prefix``"" - , or ``regex`` must be set. - "prefix": "str", # Optional. - Prefix-based match. Only 1 of ``exact``"" , ``prefix``"" - , or ``regex`` must be set. - "regex": "str" # Optional. - RE2 style regex-based match. Only 1 of ``exact``"" , - ``prefix``"" , or ``regex`` must be set. For more - information about RE2 syntax, see: - https://github.com/google/re2/wiki/Syntax. + "domain": "str", # The + hostname for the domain. Required. + "minimum_tls_version": "str", + # Optional. The minimum version of TLS a client + application can use to access resources for the domain. + Must be one of the following values wrapped within + quotations: ``"1.2"`` or ``"1.3"``. Known values are: + "1.2" and "1.3". + "type": "UNSPECIFIED", # + Optional. Default value is "UNSPECIFIED". * DEFAULT: The + default ``.ondigitalocean.app`` domain assigned to this + app * PRIMARY: The primary domain for this app that is + displayed as the default in the control panel, used in + bindable environment variables, and any other places that + reference an app's live URL. Only one domain may be set + as primary. * ALIAS: A non-primary domain. Known values + are: "UNSPECIFIED", "DEFAULT", "PRIMARY", and "ALIAS". + "wildcard": bool, # + Optional. Indicates whether the domain includes all + sub-domains, in addition to the given domain. + "zone": "str" # Optional. + Optional. If the domain uses DigitalOcean DNS and you + would like App Platform to automatically manage it for + you, set this to the name of the domain on your account. + For example, If the domain you are adding is + ``app.domain.com``"" , the zone could be ``domain.com``. } ], - "expose_headers": [ - "str" # Optional. The set of HTTP - response headers that browsers are allowed to access. This - configures the ``Access-Control-Expose-Headers`` header. - ], - "max_age": "str" # Optional. An optional - duration specifying how long browsers can cache the results of a - preflight request. This configures the ``Access-Control-Max-Age`` - header. - }, - "dockerfile_path": "str", # Optional. The path to - the Dockerfile relative to the root of the repo. If set, it will be - used to build this component. Otherwise, App Platform will attempt to - build it using buildpacks. - "environment_slug": "str", # Optional. An - environment slug describing the type of this app. For a full list, - please refer to `the product documentation - `_. - "envs": [ - { - "key": "str", # The variable name. - Required. - "scope": "RUN_AND_BUILD_TIME", # - Optional. Default value is "RUN_AND_BUILD_TIME". * RUN_TIME: - Made available only at run-time * BUILD_TIME: Made available - only at build-time * RUN_AND_BUILD_TIME: Made available at - both build and run-time. Known values are: "UNSET", - "RUN_TIME", "BUILD_TIME", and "RUN_AND_BUILD_TIME". - "type": "GENERAL", # Optional. - Default value is "GENERAL". * GENERAL: A plain-text - environment variable * SECRET: A secret encrypted environment - variable. Known values are: "GENERAL" and "SECRET". - "value": "str" # Optional. The - value. If the type is ``SECRET``"" , the value will be - encrypted on first submission. On following submissions, the - encrypted value should be used. - } - ], - "git": { - "branch": "str", # Optional. The name of the - branch to use. - "repo_clone_url": "str" # Optional. The - clone URL of the repo. Example: - ``https://github.com/digitalocean/sample-golang.git``. - }, - "github": { - "branch": "str", # Optional. The name of the - branch to use. - "deploy_on_push": bool, # Optional. Whether - to automatically deploy new commits made to the repo. - "repo": "str" # Optional. The name of the - repo in the format owner/repo. Example: - ``digitalocean/sample-golang``. - }, - "gitlab": { - "branch": "str", # Optional. The name of the - branch to use. - "deploy_on_push": bool, # Optional. Whether - to automatically deploy new commits made to the repo. - "repo": "str" # Optional. The name of the - repo in the format owner/repo. Example: - ``digitalocean/sample-golang``. - }, - "health_check": { - "failure_threshold": 0, # Optional. The - number of failed health checks before considered unhealthy. - "http_path": "str", # Optional. The route - path used for the HTTP health check ping. If not set, the HTTP - health check will be disabled and a TCP health check used - instead. - "initial_delay_seconds": 0, # Optional. The - number of seconds to wait before beginning health checks. - "period_seconds": 0, # Optional. The number - of seconds to wait between health checks. - "port": 0, # Optional. The port on which the - health check will be performed. If not set, the health check will - be performed on the component's http_port. - "success_threshold": 0, # Optional. The - number of successful health checks before considered healthy. - "timeout_seconds": 0 # Optional. The number - of seconds after which the check times out. - }, - "http_port": 0, # Optional. The internal port on - which this service's run command will listen. Default: 8080 If there - is not an environment variable with the name ``PORT``"" , one will be - automatically added with its value set to the value of this field. - "image": { - "deploy_on_push": { - "enabled": bool # Optional. Whether - to automatically deploy new images. Can only be used for - images hosted in DOCR and can only be used with an image tag, - not a specific digest. + "egress": { + "type": "AUTOASSIGN" # Optional. + Default value is "AUTOASSIGN". The app egress type. Known + values are: "AUTOASSIGN" and "DEDICATED_IP". }, - "digest": "str", # Optional. The image - digest. Cannot be specified if tag is provided. - "registry": "str", # Optional. The registry - name. Must be left empty for the ``DOCR`` registry type. - "registry_credentials": "str", # Optional. - The credentials to be able to pull the image. The value will be - encrypted on first submission. On following submissions, the - encrypted value should be used. * "$username:$access_token" for - registries of type ``DOCKER_HUB``. * "$username:$access_token" - for registries of type ``GHCR``. - "registry_type": "str", # Optional. * - DOCKER_HUB: The DockerHub container registry type. * DOCR: The - DigitalOcean container registry type. * GHCR: The Github - container registry type. Known values are: "DOCKER_HUB", "DOCR", - and "GHCR". - "repository": "str", # Optional. The - repository name. - "tag": "latest" # Optional. Default value is - "latest". The repository tag. Defaults to ``latest`` if not - provided and no digest is provided. Cannot be specified if digest - is provided. - }, - "instance_count": 1, # Optional. Default value is 1. - The amount of instances that this component should be scaled to. - Default: 1. Must not be set if autoscaling is used. - "instance_size_slug": {}, - "internal_ports": [ - 0 # Optional. The ports on which this - service will listen for internal traffic. - ], - "liveness_health_check": { - "failure_threshold": 0, # Optional. The - number of failed health checks before considered unhealthy. - "http_path": "str", # Optional. The route - path used for the HTTP health check ping. If not set, the HTTP - health check will be disabled and a TCP health check used - instead. - "initial_delay_seconds": 0, # Optional. The - number of seconds to wait before beginning health checks. - "period_seconds": 0, # Optional. The number - of seconds to wait between health checks. - "port": 0, # Optional. The port on which the - health check will be performed. - "success_threshold": 0, # Optional. The - number of successful health checks before considered healthy. - "timeout_seconds": 0 # Optional. The number - of seconds after which the check times out. - }, - "log_destinations": [ - { - "name": "str", # Required. - "datadog": { - "api_key": "str", # Datadog - API key. Required. - "endpoint": "str" # - Optional. Datadog HTTP log intake endpoint. - }, - "logtail": { - "token": "str" # Optional. - Logtail token. - }, - "open_search": { - "basic_auth": { - "password": "str", # - Optional. Password for user defined in User. Is - required when ``endpoint`` is set. Cannot be set if - using a DigitalOcean DBaaS OpenSearch cluster. - "user": "str" # - Optional. Username to authenticate with. Only - required when ``endpoint`` is set. Defaults to - ``doadmin`` when ``cluster_name`` is set. + "enhanced_threat_control_enabled": False, # + Optional. Default value is False. If set to ``true``"" , + suspicious requests will go through additional security checks to + help mitigate layer 7 DDoS attacks. + "functions": [ + { + "name": "str", # The name. + Must be unique across all components within the same app. + Required. + "alerts": [ + { + "disabled": + bool, # Optional. Is the alert disabled?. + "operator": + "UNSPECIFIED_OPERATOR", # Optional. Default + value is "UNSPECIFIED_OPERATOR". Known values + are: "UNSPECIFIED_OPERATOR", "GREATER_THAN", and + "LESS_THAN". + "rule": + "UNSPECIFIED_RULE", # Optional. Default value is + "UNSPECIFIED_RULE". Known values are: + "UNSPECIFIED_RULE", "CPU_UTILIZATION", + "MEM_UTILIZATION", "RESTART_COUNT", + "DEPLOYMENT_FAILED", "DEPLOYMENT_LIVE", + "DOMAIN_FAILED", "DOMAIN_LIVE", + "AUTOSCALE_FAILED", "AUTOSCALE_SUCCEEDED", + "FUNCTIONS_ACTIVATION_COUNT", + "FUNCTIONS_AVERAGE_DURATION_MS", + "FUNCTIONS_ERROR_RATE_PER_MINUTE", + "FUNCTIONS_AVERAGE_WAIT_TIME_MS", + "FUNCTIONS_ERROR_COUNT", and + "FUNCTIONS_GB_RATE_PER_SECOND". + "value": 0.0, + # Optional. Threshold value for alert. + "window": + "UNSPECIFIED_WINDOW" # Optional. Default value + is "UNSPECIFIED_WINDOW". Known values are: + "UNSPECIFIED_WINDOW", "FIVE_MINUTES", + "TEN_MINUTES", "THIRTY_MINUTES", and "ONE_HOUR". + } + ], + "bitbucket": { + "branch": "str", # + Optional. The name of the branch to use. + "deploy_on_push": + bool, # Optional. Whether to automatically deploy + new commits made to the repo. + "repo": "str" # + Optional. The name of the repo in the format + owner/repo. Example: ``digitalocean/sample-golang``. }, - "cluster_name": "str", # - Optional. The name of a DigitalOcean DBaaS OpenSearch - cluster to use as a log forwarding destination. Cannot be - specified if ``endpoint`` is also specified. - "endpoint": "str", # - Optional. OpenSearch API Endpoint. Only HTTPS is - supported. Format: https://:code:``::code:``. - Cannot be specified if ``cluster_name`` is also - specified. - "index_name": "logs" # - Optional. Default value is "logs". The index name to use - for the logs. If not set, the default index name is - "logs". - }, - "papertrail": { - "endpoint": "str" # - Papertrail syslog endpoint. Required. + "cors": { + "allow_credentials": + bool, # Optional. Whether browsers should expose the + response to the client-side JavaScript code when the + request"u2019s credentials mode is include. This + configures the ``Access-Control-Allow-Credentials`` + header. + "allow_headers": [ + "str" # + Optional. The set of allowed HTTP request + headers. This configures the + ``Access-Control-Allow-Headers`` header. + ], + "allow_methods": [ + "str" # + Optional. The set of allowed HTTP methods. This + configures the ``Access-Control-Allow-Methods`` + header. + ], + "allow_origins": [ + { + "exact": "str", # Optional. Exact string + match. Only 1 of ``exact``"" , ``prefix``"" , + or ``regex`` must be set. + "prefix": "str", # Optional. Prefix-based + match. Only 1 of ``exact``"" , ``prefix``"" , + or ``regex`` must be set. + "regex": "str" # Optional. RE2 style + regex-based match. Only 1 of ``exact``"" , + ``prefix``"" , or ``regex`` must be set. For + more information about RE2 syntax, see: + https://github.com/google/re2/wiki/Syntax. + } + ], + "expose_headers": [ + "str" # + Optional. The set of HTTP response headers that + browsers are allowed to access. This configures + the ``Access-Control-Expose-Headers`` header. + ], + "max_age": "str" # + Optional. An optional duration specifying how long + browsers can cache the results of a preflight + request. This configures the + ``Access-Control-Max-Age`` header. + }, + "envs": [ + { + "key": "str", + # The variable name. Required. + "scope": + "RUN_AND_BUILD_TIME", # Optional. Default value + is "RUN_AND_BUILD_TIME". * RUN_TIME: Made + available only at run-time * BUILD_TIME: Made + available only at build-time * + RUN_AND_BUILD_TIME: Made available at both build + and run-time. Known values are: "UNSET", + "RUN_TIME", "BUILD_TIME", and + "RUN_AND_BUILD_TIME". + "type": + "GENERAL", # Optional. Default value is + "GENERAL". * GENERAL: A plain-text environment + variable * SECRET: A secret encrypted environment + variable. Known values are: "GENERAL" and + "SECRET". + "value": + "str" # Optional. The value. If the type is + ``SECRET``"" , the value will be encrypted on + first submission. On following submissions, the + encrypted value should be used. + } + ], + "git": { + "branch": "str", # + Optional. The name of the branch to use. + "repo_clone_url": + "str" # Optional. The clone URL of the repo. + Example: + ``https://github.com/digitalocean/sample-golang.git``. + }, + "github": { + "branch": "str", # + Optional. The name of the branch to use. + "deploy_on_push": + bool, # Optional. Whether to automatically deploy + new commits made to the repo. + "repo": "str" # + Optional. The name of the repo in the format + owner/repo. Example: ``digitalocean/sample-golang``. + }, + "gitlab": { + "branch": "str", # + Optional. The name of the branch to use. + "deploy_on_push": + bool, # Optional. Whether to automatically deploy + new commits made to the repo. + "repo": "str" # + Optional. The name of the repo in the format + owner/repo. Example: ``digitalocean/sample-golang``. + }, + "log_destinations": [ + { + "name": + "str", # Required. + "datadog": { + "api_key": "str", # Datadog API key. + Required. + "endpoint": "str" # Optional. Datadog HTTP + log intake endpoint. + }, + "logtail": { + "token": "str" # Optional. Logtail token. + }, + "open_search": { + "basic_auth": { + "password": "str", # Optional. Password + for user defined in User. Is required + when ``endpoint`` is set. Cannot be set + if using a DigitalOcean DBaaS OpenSearch + cluster. + "user": "str" # Optional. Username to + authenticate with. Only required when + ``endpoint`` is set. Defaults to + ``doadmin`` when ``cluster_name`` is set. + }, + "cluster_name": "str", # Optional. The name + of a DigitalOcean DBaaS OpenSearch cluster to + use as a log forwarding destination. Cannot + be specified if ``endpoint`` is also + specified. + "endpoint": "str", # Optional. OpenSearch + API Endpoint. Only HTTPS is supported. + Format: + https://:code:``::code:``. Cannot + be specified if ``cluster_name`` is also + specified. + "index_name": "logs" # Optional. Default + value is "logs". The index name to use for + the logs. If not set, the default index name + is "logs". + }, + "papertrail": + { + "endpoint": "str" # Papertrail syslog + endpoint. Required. + } + } + ], + "routes": [ + { + "path": + "str", # Optional. (Deprecated - Use Ingress + Rules instead). An HTTP path prefix. Paths must + start with / and must be unique across all + components within an app. + "preserve_path_prefix": bool # Optional. An + optional flag to preserve the path that is + forwarded to the backend service. By default, the + HTTP request path will be trimmed from the left + when forwarded to the component. For example, a + component with ``path=/api`` will have requests + to ``/api/list`` trimmed to ``/list``. If this + value is ``true``"" , the path will remain + ``/api/list``. + } + ], + "source_dir": "str" # + Optional. An optional path to the working directory to + use for the build. For Dockerfile builds, this will be + used as the build context. Must be relative to the root + of the repo. } - } - ], - "name": "str", # Optional. The name. Must be unique - across all components within the same app. - "protocol": "str", # Optional. The protocol which - the service uses to serve traffic on the http_port. * ``HTTP``"" : - The app is serving the HTTP protocol. Default. * ``HTTP2``"" : The - app is serving the HTTP/2 protocol. Currently, this needs to be - implemented in the service by serving HTTP/2 cleartext (h2c). Known - values are: "HTTP" and "HTTP2". - "routes": [ - { - "path": "str", # Optional. - (Deprecated - Use Ingress Rules instead). An HTTP path - prefix. Paths must start with / and must be unique across all - components within an app. - "preserve_path_prefix": bool # - Optional. An optional flag to preserve the path that is - forwarded to the backend service. By default, the HTTP - request path will be trimmed from the left when forwarded to - the component. For example, a component with ``path=/api`` - will have requests to ``/api/list`` trimmed to ``/list``. If - this value is ``true``"" , the path will remain - ``/api/list``. - } - ], - "run_command": "str", # Optional. An optional run - command to override the component's default. - "source_dir": "str", # Optional. An optional path to - the working directory to use for the build. For Dockerfile builds, - this will be used as the build context. Must be relative to the root - of the repo. - "termination": { - "drain_seconds": 0, # Optional. The number - of seconds to wait between selecting a container instance for - termination and issuing the TERM signal. Selecting a container - instance for termination begins an asynchronous drain of new - requests on upstream load-balancers. (Default 15). - "grace_period_seconds": 0 # Optional. The - number of seconds to wait between sending a TERM signal to a - container and issuing a KILL which causes immediate shutdown. - (Default 120). - } - } - ], - "static_sites": [ - { - "bitbucket": { - "branch": "str", # Optional. The name of the - branch to use. - "deploy_on_push": bool, # Optional. Whether - to automatically deploy new commits made to the repo. - "repo": "str" # Optional. The name of the - repo in the format owner/repo. Example: - ``digitalocean/sample-golang``. - }, - "build_command": "str", # Optional. An optional - build command to run while building this component from source. - "catchall_document": "str", # Optional. The name of - the document to use as the fallback for any requests to documents - that are not found when serving this static site. Only 1 of - ``catchall_document`` or ``error_document`` can be set. - "cors": { - "allow_credentials": bool, # Optional. - Whether browsers should expose the response to the client-side - JavaScript code when the request"u2019s credentials mode is - include. This configures the ``Access-Control-Allow-Credentials`` - header. - "allow_headers": [ - "str" # Optional. The set of allowed - HTTP request headers. This configures the - ``Access-Control-Allow-Headers`` header. - ], - "allow_methods": [ - "str" # Optional. The set of allowed - HTTP methods. This configures the - ``Access-Control-Allow-Methods`` header. ], - "allow_origins": [ + "ingress": { + "rules": [ + { + "component": { + "name": + "str", # The name of the component to route to. + Required. + "preserve_path_prefix": "str", # Optional. An + optional flag to preserve the path that is + forwarded to the backend service. By default, the + HTTP request path will be trimmed from the left + when forwarded to the component. For example, a + component with ``path=/api`` will have requests + to ``/api/list`` trimmed to ``/list``. If this + value is ``true``"" , the path will remain + ``/api/list``. Note: this is not applicable for + Functions Components and is mutually exclusive + with ``rewrite``. + "rewrite": + "str" # Optional. An optional field that will + rewrite the path of the component to be what is + specified here. By default, the HTTP request path + will be trimmed from the left when forwarded to + the component. For example, a component with + ``path=/api`` will have requests to ``/api/list`` + trimmed to ``/list``. If you specified the + rewrite to be ``/v1/``"" , requests to + ``/api/list`` would be rewritten to ``/v1/list``. + Note: this is mutually exclusive with + ``preserve_path_prefix``. + }, + "cors": { + "allow_credentials": bool, # Optional. Whether + browsers should expose the response to the + client-side JavaScript code when the + request"u2019s credentials mode is include. This + configures the + ``Access-Control-Allow-Credentials`` header. + "allow_headers": [ + "str" + # Optional. The set of allowed HTTP request + headers. This configures the + ``Access-Control-Allow-Headers`` header. + ], + "allow_methods": [ + "str" + # Optional. The set of allowed HTTP methods. + This configures the + ``Access-Control-Allow-Methods`` header. + ], + "allow_origins": [ + { + "exact": "str", # Optional. Exact string + match. Only 1 of ``exact``"" , + ``prefix``"" , or ``regex`` must be set. + "prefix": "str", # Optional. + Prefix-based match. Only 1 of ``exact``"" + , ``prefix``"" , or ``regex`` must be + set. + "regex": "str" # Optional. RE2 style + regex-based match. Only 1 of ``exact``"" + , ``prefix``"" , or ``regex`` must be + set. For more information about RE2 + syntax, see: + https://github.com/google/re2/wiki/Syntax. + } + ], + "expose_headers": [ + "str" + # Optional. The set of HTTP response headers + that browsers are allowed to access. This + configures the + ``Access-Control-Expose-Headers`` header. + ], + "max_age": + "str" # Optional. An optional duration + specifying how long browsers can cache the + results of a preflight request. This configures + the ``Access-Control-Max-Age`` header. + }, + "match": { + "authority": + { + "exact": "str" # Required. + }, + "path": { + "prefix": "str" # Prefix-based match. For + example, ``/api`` will match ``/api``"" , + ``/api/``"" , and any nested paths such as + ``/api/v1/endpoint``. Required. + } + }, + "redirect": { + "authority": + "str", # Optional. The authority/host to + redirect to. This can be a hostname or IP + address. Note: use ``port`` to set the port. + "port": 0, # + Optional. The port to redirect to. + "redirect_code": 0, # Optional. The redirect + code to use. Defaults to ``302``. Supported + values are 300, 301, 302, 303, 304, 307, 308. + "scheme": + "str", # Optional. The scheme to redirect to. + Supported values are ``http`` or ``https``. + Default: ``https``. + "uri": "str" + # Optional. An optional URI path to redirect to. + Note: if this is specified the whole URI of the + original request will be overwritten to this + value, irrespective of the original request URI + being matched. + } + } + ] + }, + "jobs": [ { - "exact": "str", # Optional. - Exact string match. Only 1 of ``exact``"" , ``prefix``"" - , or ``regex`` must be set. - "prefix": "str", # Optional. - Prefix-based match. Only 1 of ``exact``"" , ``prefix``"" - , or ``regex`` must be set. - "regex": "str" # Optional. - RE2 style regex-based match. Only 1 of ``exact``"" , - ``prefix``"" , or ``regex`` must be set. For more - information about RE2 syntax, see: - https://github.com/google/re2/wiki/Syntax. + "autoscaling": { + "max_instance_count": + 0, # Optional. The maximum amount of instances for + this component. Must be more than min_instance_count. + "metrics": { + "cpu": { + "percent": 80 # Optional. Default value is + 80. The average target CPU utilization for + the component. + } + }, + "min_instance_count": + 0 # Optional. The minimum amount of instances for + this component. Must be less than max_instance_count. + }, + "bitbucket": { + "branch": "str", # + Optional. The name of the branch to use. + "deploy_on_push": + bool, # Optional. Whether to automatically deploy + new commits made to the repo. + "repo": "str" # + Optional. The name of the repo in the format + owner/repo. Example: ``digitalocean/sample-golang``. + }, + "build_command": "str", # + Optional. An optional build command to run while building + this component from source. + "dockerfile_path": "str", # + Optional. The path to the Dockerfile relative to the root + of the repo. If set, it will be used to build this + component. Otherwise, App Platform will attempt to build + it using buildpacks. + "environment_slug": "str", # + Optional. An environment slug describing the type of this + app. For a full list, please refer to `the product + documentation + `_. + "envs": [ + { + "key": "str", + # The variable name. Required. + "scope": + "RUN_AND_BUILD_TIME", # Optional. Default value + is "RUN_AND_BUILD_TIME". * RUN_TIME: Made + available only at run-time * BUILD_TIME: Made + available only at build-time * + RUN_AND_BUILD_TIME: Made available at both build + and run-time. Known values are: "UNSET", + "RUN_TIME", "BUILD_TIME", and + "RUN_AND_BUILD_TIME". + "type": + "GENERAL", # Optional. Default value is + "GENERAL". * GENERAL: A plain-text environment + variable * SECRET: A secret encrypted environment + variable. Known values are: "GENERAL" and + "SECRET". + "value": + "str" # Optional. The value. If the type is + ``SECRET``"" , the value will be encrypted on + first submission. On following submissions, the + encrypted value should be used. + } + ], + "git": { + "branch": "str", # + Optional. The name of the branch to use. + "repo_clone_url": + "str" # Optional. The clone URL of the repo. + Example: + ``https://github.com/digitalocean/sample-golang.git``. + }, + "github": { + "branch": "str", # + Optional. The name of the branch to use. + "deploy_on_push": + bool, # Optional. Whether to automatically deploy + new commits made to the repo. + "repo": "str" # + Optional. The name of the repo in the format + owner/repo. Example: ``digitalocean/sample-golang``. + }, + "gitlab": { + "branch": "str", # + Optional. The name of the branch to use. + "deploy_on_push": + bool, # Optional. Whether to automatically deploy + new commits made to the repo. + "repo": "str" # + Optional. The name of the repo in the format + owner/repo. Example: ``digitalocean/sample-golang``. + }, + "image": { + "deploy_on_push": { + "enabled": + bool # Optional. Whether to automatically deploy + new images. Can only be used for images hosted in + DOCR and can only be used with an image tag, not + a specific digest. + }, + "digest": "str", # + Optional. The image digest. Cannot be specified if + tag is provided. + "registry": "str", # + Optional. The registry name. Must be left empty for + the ``DOCR`` registry type. + "registry_credentials": "str", # Optional. The + credentials to be able to pull the image. The value + will be encrypted on first submission. On following + submissions, the encrypted value should be used. * + "$username:$access_token" for registries of type + ``DOCKER_HUB``. * "$username:$access_token" for + registries of type ``GHCR``. + "registry_type": + "str", # Optional. * DOCKER_HUB: The DockerHub + container registry type. * DOCR: The DigitalOcean + container registry type. * GHCR: The Github container + registry type. Known values are: "DOCKER_HUB", + "DOCR", and "GHCR". + "repository": "str", + # Optional. The repository name. + "tag": "latest" # + Optional. Default value is "latest". The repository + tag. Defaults to ``latest`` if not provided and no + digest is provided. Cannot be specified if digest is + provided. + }, + "instance_count": 1, # + Optional. Default value is 1. The amount of instances + that this component should be scaled to. Default: 1. Must + not be set if autoscaling is used. + "instance_size_slug": {}, + "kind": "UNSPECIFIED", # + Optional. Default value is "UNSPECIFIED". * UNSPECIFIED: + Default job type, will auto-complete to POST_DEPLOY kind. + * PRE_DEPLOY: Indicates a job that runs before an app + deployment. * POST_DEPLOY: Indicates a job that runs + after an app deployment. * FAILED_DEPLOY: Indicates a job + that runs after a component fails to deploy. Known values + are: "UNSPECIFIED", "PRE_DEPLOY", "POST_DEPLOY", and + "FAILED_DEPLOY". + "log_destinations": [ + { + "name": + "str", # Required. + "datadog": { + "api_key": "str", # Datadog API key. + Required. + "endpoint": "str" # Optional. Datadog HTTP + log intake endpoint. + }, + "logtail": { + "token": "str" # Optional. Logtail token. + }, + "open_search": { + "basic_auth": { + "password": "str", # Optional. Password + for user defined in User. Is required + when ``endpoint`` is set. Cannot be set + if using a DigitalOcean DBaaS OpenSearch + cluster. + "user": "str" # Optional. Username to + authenticate with. Only required when + ``endpoint`` is set. Defaults to + ``doadmin`` when ``cluster_name`` is set. + }, + "cluster_name": "str", # Optional. The name + of a DigitalOcean DBaaS OpenSearch cluster to + use as a log forwarding destination. Cannot + be specified if ``endpoint`` is also + specified. + "endpoint": "str", # Optional. OpenSearch + API Endpoint. Only HTTPS is supported. + Format: + https://:code:``::code:``. Cannot + be specified if ``cluster_name`` is also + specified. + "index_name": "logs" # Optional. Default + value is "logs". The index name to use for + the logs. If not set, the default index name + is "logs". + }, + "papertrail": + { + "endpoint": "str" # Papertrail syslog + endpoint. Required. + } + } + ], + "name": "str", # Optional. + The name. Must be unique across all components within the + same app. + "run_command": "str", # + Optional. An optional run command to override the + component's default. + "source_dir": "str", # + Optional. An optional path to the working directory to + use for the build. For Dockerfile builds, this will be + used as the build context. Must be relative to the root + of the repo. + "termination": { + "grace_period_seconds": 0 # Optional. The number of + seconds to wait between sending a TERM signal to a + container and issuing a KILL which causes immediate + shutdown. (Default 120). + } } ], - "expose_headers": [ - "str" # Optional. The set of HTTP - response headers that browsers are allowed to access. This - configures the ``Access-Control-Expose-Headers`` header. - ], - "max_age": "str" # Optional. An optional - duration specifying how long browsers can cache the results of a - preflight request. This configures the ``Access-Control-Max-Age`` - header. - }, - "dockerfile_path": "str", # Optional. The path to - the Dockerfile relative to the root of the repo. If set, it will be - used to build this component. Otherwise, App Platform will attempt to - build it using buildpacks. - "environment_slug": "str", # Optional. An - environment slug describing the type of this app. For a full list, - please refer to `the product documentation - `_. - "envs": [ - { - "key": "str", # The variable name. - Required. - "scope": "RUN_AND_BUILD_TIME", # - Optional. Default value is "RUN_AND_BUILD_TIME". * RUN_TIME: - Made available only at run-time * BUILD_TIME: Made available - only at build-time * RUN_AND_BUILD_TIME: Made available at - both build and run-time. Known values are: "UNSET", - "RUN_TIME", "BUILD_TIME", and "RUN_AND_BUILD_TIME". - "type": "GENERAL", # Optional. - Default value is "GENERAL". * GENERAL: A plain-text - environment variable * SECRET: A secret encrypted environment - variable. Known values are: "GENERAL" and "SECRET". - "value": "str" # Optional. The - value. If the type is ``SECRET``"" , the value will be - encrypted on first submission. On following submissions, the - encrypted value should be used. - } - ], - "error_document": "404.html", # Optional. Default - value is "404.html". The name of the error document to use when - serving this static site. Default: 404.html. If no such file exists - within the built assets, App Platform will supply one. - "git": { - "branch": "str", # Optional. The name of the - branch to use. - "repo_clone_url": "str" # Optional. The - clone URL of the repo. Example: - ``https://github.com/digitalocean/sample-golang.git``. - }, - "github": { - "branch": "str", # Optional. The name of the - branch to use. - "deploy_on_push": bool, # Optional. Whether - to automatically deploy new commits made to the repo. - "repo": "str" # Optional. The name of the - repo in the format owner/repo. Example: - ``digitalocean/sample-golang``. - }, - "gitlab": { - "branch": "str", # Optional. The name of the - branch to use. - "deploy_on_push": bool, # Optional. Whether - to automatically deploy new commits made to the repo. - "repo": "str" # Optional. The name of the - repo in the format owner/repo. Example: - ``digitalocean/sample-golang``. - }, - "image": { - "deploy_on_push": { - "enabled": bool # Optional. Whether - to automatically deploy new images. Can only be used for - images hosted in DOCR and can only be used with an image tag, - not a specific digest. - }, - "digest": "str", # Optional. The image - digest. Cannot be specified if tag is provided. - "registry": "str", # Optional. The registry - name. Must be left empty for the ``DOCR`` registry type. - "registry_credentials": "str", # Optional. - The credentials to be able to pull the image. The value will be - encrypted on first submission. On following submissions, the - encrypted value should be used. * "$username:$access_token" for - registries of type ``DOCKER_HUB``. * "$username:$access_token" - for registries of type ``GHCR``. - "registry_type": "str", # Optional. * - DOCKER_HUB: The DockerHub container registry type. * DOCR: The - DigitalOcean container registry type. * GHCR: The Github - container registry type. Known values are: "DOCKER_HUB", "DOCR", - and "GHCR". - "repository": "str", # Optional. The - repository name. - "tag": "latest" # Optional. Default value is - "latest". The repository tag. Defaults to ``latest`` if not - provided and no digest is provided. Cannot be specified if digest - is provided. - }, - "index_document": "index.html", # Optional. Default - value is "index.html". The name of the index document to use when - serving this static site. Default: index.html. - "log_destinations": [ - { - "name": "str", # Required. - "datadog": { - "api_key": "str", # Datadog - API key. Required. - "endpoint": "str" # - Optional. Datadog HTTP log intake endpoint. - }, - "logtail": { - "token": "str" # Optional. - Logtail token. - }, - "open_search": { - "basic_auth": { - "password": "str", # - Optional. Password for user defined in User. Is - required when ``endpoint`` is set. Cannot be set if - using a DigitalOcean DBaaS OpenSearch cluster. - "user": "str" # - Optional. Username to authenticate with. Only - required when ``endpoint`` is set. Defaults to - ``doadmin`` when ``cluster_name`` is set. + "maintenance": { + "archive": bool, # Optional. + Indicates whether the app should be archived. Setting this to + true implies that enabled is set to true. + "enabled": bool, # Optional. + Indicates whether maintenance mode should be enabled for the + app. + "offline_page_url": "str" # + Optional. A custom offline page to display when maintenance + mode is enabled or the app is archived. + }, + "region": "str", # Optional. The slug form + of the geographical origin of the app. Default: ``nearest + available``. Known values are: "atl", "nyc", "sfo", "tor", "ams", + "fra", "lon", "blr", "sgp", and "syd". + "services": [ + { + "autoscaling": { + "max_instance_count": + 0, # Optional. The maximum amount of instances for + this component. Must be more than min_instance_count. + "metrics": { + "cpu": { + "percent": 80 # Optional. Default value is + 80. The average target CPU utilization for + the component. + } + }, + "min_instance_count": + 0 # Optional. The minimum amount of instances for + this component. Must be less than max_instance_count. }, - "cluster_name": "str", # - Optional. The name of a DigitalOcean DBaaS OpenSearch - cluster to use as a log forwarding destination. Cannot be - specified if ``endpoint`` is also specified. - "endpoint": "str", # - Optional. OpenSearch API Endpoint. Only HTTPS is - supported. Format: https://:code:``::code:``. - Cannot be specified if ``cluster_name`` is also - specified. - "index_name": "logs" # - Optional. Default value is "logs". The index name to use - for the logs. If not set, the default index name is - "logs". - }, - "papertrail": { - "endpoint": "str" # - Papertrail syslog endpoint. Required. + "bitbucket": { + "branch": "str", # + Optional. The name of the branch to use. + "deploy_on_push": + bool, # Optional. Whether to automatically deploy + new commits made to the repo. + "repo": "str" # + Optional. The name of the repo in the format + owner/repo. Example: ``digitalocean/sample-golang``. + }, + "build_command": "str", # + Optional. An optional build command to run while building + this component from source. + "cors": { + "allow_credentials": + bool, # Optional. Whether browsers should expose the + response to the client-side JavaScript code when the + request"u2019s credentials mode is include. This + configures the ``Access-Control-Allow-Credentials`` + header. + "allow_headers": [ + "str" # + Optional. The set of allowed HTTP request + headers. This configures the + ``Access-Control-Allow-Headers`` header. + ], + "allow_methods": [ + "str" # + Optional. The set of allowed HTTP methods. This + configures the ``Access-Control-Allow-Methods`` + header. + ], + "allow_origins": [ + { + "exact": "str", # Optional. Exact string + match. Only 1 of ``exact``"" , ``prefix``"" , + or ``regex`` must be set. + "prefix": "str", # Optional. Prefix-based + match. Only 1 of ``exact``"" , ``prefix``"" , + or ``regex`` must be set. + "regex": "str" # Optional. RE2 style + regex-based match. Only 1 of ``exact``"" , + ``prefix``"" , or ``regex`` must be set. For + more information about RE2 syntax, see: + https://github.com/google/re2/wiki/Syntax. + } + ], + "expose_headers": [ + "str" # + Optional. The set of HTTP response headers that + browsers are allowed to access. This configures + the ``Access-Control-Expose-Headers`` header. + ], + "max_age": "str" # + Optional. An optional duration specifying how long + browsers can cache the results of a preflight + request. This configures the + ``Access-Control-Max-Age`` header. + }, + "dockerfile_path": "str", # + Optional. The path to the Dockerfile relative to the root + of the repo. If set, it will be used to build this + component. Otherwise, App Platform will attempt to build + it using buildpacks. + "environment_slug": "str", # + Optional. An environment slug describing the type of this + app. For a full list, please refer to `the product + documentation + `_. + "envs": [ + { + "key": "str", + # The variable name. Required. + "scope": + "RUN_AND_BUILD_TIME", # Optional. Default value + is "RUN_AND_BUILD_TIME". * RUN_TIME: Made + available only at run-time * BUILD_TIME: Made + available only at build-time * + RUN_AND_BUILD_TIME: Made available at both build + and run-time. Known values are: "UNSET", + "RUN_TIME", "BUILD_TIME", and + "RUN_AND_BUILD_TIME". + "type": + "GENERAL", # Optional. Default value is + "GENERAL". * GENERAL: A plain-text environment + variable * SECRET: A secret encrypted environment + variable. Known values are: "GENERAL" and + "SECRET". + "value": + "str" # Optional. The value. If the type is + ``SECRET``"" , the value will be encrypted on + first submission. On following submissions, the + encrypted value should be used. + } + ], + "git": { + "branch": "str", # + Optional. The name of the branch to use. + "repo_clone_url": + "str" # Optional. The clone URL of the repo. + Example: + ``https://github.com/digitalocean/sample-golang.git``. + }, + "github": { + "branch": "str", # + Optional. The name of the branch to use. + "deploy_on_push": + bool, # Optional. Whether to automatically deploy + new commits made to the repo. + "repo": "str" # + Optional. The name of the repo in the format + owner/repo. Example: ``digitalocean/sample-golang``. + }, + "gitlab": { + "branch": "str", # + Optional. The name of the branch to use. + "deploy_on_push": + bool, # Optional. Whether to automatically deploy + new commits made to the repo. + "repo": "str" # + Optional. The name of the repo in the format + owner/repo. Example: ``digitalocean/sample-golang``. + }, + "health_check": { + "failure_threshold": + 0, # Optional. The number of failed health checks + before considered unhealthy. + "http_path": "str", + # Optional. The route path used for the HTTP health + check ping. If not set, the HTTP health check will be + disabled and a TCP health check used instead. + "initial_delay_seconds": 0, # Optional. The number + of seconds to wait before beginning health checks. + "period_seconds": 0, + # Optional. The number of seconds to wait between + health checks. + "port": 0, # + Optional. The port on which the health check will be + performed. If not set, the health check will be + performed on the component's http_port. + "success_threshold": + 0, # Optional. The number of successful health + checks before considered healthy. + "timeout_seconds": 0 + # Optional. The number of seconds after which the + check times out. + }, + "http_port": 0, # Optional. + The internal port on which this service's run command + will listen. Default: 8080 If there is not an environment + variable with the name ``PORT``"" , one will be + automatically added with its value set to the value of + this field. + "image": { + "deploy_on_push": { + "enabled": + bool # Optional. Whether to automatically deploy + new images. Can only be used for images hosted in + DOCR and can only be used with an image tag, not + a specific digest. + }, + "digest": "str", # + Optional. The image digest. Cannot be specified if + tag is provided. + "registry": "str", # + Optional. The registry name. Must be left empty for + the ``DOCR`` registry type. + "registry_credentials": "str", # Optional. The + credentials to be able to pull the image. The value + will be encrypted on first submission. On following + submissions, the encrypted value should be used. * + "$username:$access_token" for registries of type + ``DOCKER_HUB``. * "$username:$access_token" for + registries of type ``GHCR``. + "registry_type": + "str", # Optional. * DOCKER_HUB: The DockerHub + container registry type. * DOCR: The DigitalOcean + container registry type. * GHCR: The Github container + registry type. Known values are: "DOCKER_HUB", + "DOCR", and "GHCR". + "repository": "str", + # Optional. The repository name. + "tag": "latest" # + Optional. Default value is "latest". The repository + tag. Defaults to ``latest`` if not provided and no + digest is provided. Cannot be specified if digest is + provided. + }, + "instance_count": 1, # + Optional. Default value is 1. The amount of instances + that this component should be scaled to. Default: 1. Must + not be set if autoscaling is used. + "instance_size_slug": {}, + "internal_ports": [ + 0 # Optional. The + ports on which this service will listen for internal + traffic. + ], + "liveness_health_check": { + "failure_threshold": + 0, # Optional. The number of failed health checks + before considered unhealthy. + "http_path": "str", + # Optional. The route path used for the HTTP health + check ping. If not set, the HTTP health check will be + disabled and a TCP health check used instead. + "initial_delay_seconds": 0, # Optional. The number + of seconds to wait before beginning health checks. + "period_seconds": 0, + # Optional. The number of seconds to wait between + health checks. + "port": 0, # + Optional. The port on which the health check will be + performed. + "success_threshold": + 0, # Optional. The number of successful health + checks before considered healthy. + "timeout_seconds": 0 + # Optional. The number of seconds after which the + check times out. + }, + "log_destinations": [ + { + "name": + "str", # Required. + "datadog": { + "api_key": "str", # Datadog API key. + Required. + "endpoint": "str" # Optional. Datadog HTTP + log intake endpoint. + }, + "logtail": { + "token": "str" # Optional. Logtail token. + }, + "open_search": { + "basic_auth": { + "password": "str", # Optional. Password + for user defined in User. Is required + when ``endpoint`` is set. Cannot be set + if using a DigitalOcean DBaaS OpenSearch + cluster. + "user": "str" # Optional. Username to + authenticate with. Only required when + ``endpoint`` is set. Defaults to + ``doadmin`` when ``cluster_name`` is set. + }, + "cluster_name": "str", # Optional. The name + of a DigitalOcean DBaaS OpenSearch cluster to + use as a log forwarding destination. Cannot + be specified if ``endpoint`` is also + specified. + "endpoint": "str", # Optional. OpenSearch + API Endpoint. Only HTTPS is supported. + Format: + https://:code:``::code:``. Cannot + be specified if ``cluster_name`` is also + specified. + "index_name": "logs" # Optional. Default + value is "logs". The index name to use for + the logs. If not set, the default index name + is "logs". + }, + "papertrail": + { + "endpoint": "str" # Papertrail syslog + endpoint. Required. + } + } + ], + "name": "str", # Optional. + The name. Must be unique across all components within the + same app. + "protocol": "str", # + Optional. The protocol which the service uses to serve + traffic on the http_port. * ``HTTP``"" : The app is + serving the HTTP protocol. Default. * ``HTTP2``"" : The + app is serving the HTTP/2 protocol. Currently, this needs + to be implemented in the service by serving HTTP/2 + cleartext (h2c). Known values are: "HTTP" and "HTTP2". + "routes": [ + { + "path": + "str", # Optional. (Deprecated - Use Ingress + Rules instead). An HTTP path prefix. Paths must + start with / and must be unique across all + components within an app. + "preserve_path_prefix": bool # Optional. An + optional flag to preserve the path that is + forwarded to the backend service. By default, the + HTTP request path will be trimmed from the left + when forwarded to the component. For example, a + component with ``path=/api`` will have requests + to ``/api/list`` trimmed to ``/list``. If this + value is ``true``"" , the path will remain + ``/api/list``. + } + ], + "run_command": "str", # + Optional. An optional run command to override the + component's default. + "source_dir": "str", # + Optional. An optional path to the working directory to + use for the build. For Dockerfile builds, this will be + used as the build context. Must be relative to the root + of the repo. + "termination": { + "drain_seconds": 0, + # Optional. The number of seconds to wait between + selecting a container instance for termination and + issuing the TERM signal. Selecting a container + instance for termination begins an asynchronous drain + of new requests on upstream load-balancers. (Default + 15). + "grace_period_seconds": 0 # Optional. The number of + seconds to wait between sending a TERM signal to a + container and issuing a KILL which causes immediate + shutdown. (Default 120). + } } - } - ], - "name": "str", # Optional. The name. Must be unique - across all components within the same app. - "output_dir": "str", # Optional. An optional path to - where the built assets will be located, relative to the build - context. If not set, App Platform will automatically scan for these - directory names: ``_static``"" , ``dist``"" , ``public``"" , - ``build``. - "routes": [ - { - "path": "str", # Optional. - (Deprecated - Use Ingress Rules instead). An HTTP path - prefix. Paths must start with / and must be unique across all - components within an app. - "preserve_path_prefix": bool # - Optional. An optional flag to preserve the path that is - forwarded to the backend service. By default, the HTTP - request path will be trimmed from the left when forwarded to - the component. For example, a component with ``path=/api`` - will have requests to ``/api/list`` trimmed to ``/list``. If - this value is ``true``"" , the path will remain - ``/api/list``. - } - ], - "run_command": "str", # Optional. An optional run - command to override the component's default. - "source_dir": "str" # Optional. An optional path to - the working directory to use for the build. For Dockerfile builds, - this will be used as the build context. Must be relative to the root - of the repo. - } - ], - "vpc": { - "egress_ips": [ - { - "ip": "str" # Optional. The egress ips - associated with the VPC. - } - ], - "id": "str" # Optional. The ID of the VPC. - }, - "workers": [ - { - "autoscaling": { - "max_instance_count": 0, # Optional. The - maximum amount of instances for this component. Must be more than - min_instance_count. - "metrics": { - "cpu": { - "percent": 80 # Optional. - Default value is 80. The average target CPU utilization - for the component. + ], + "static_sites": [ + { + "bitbucket": { + "branch": "str", # + Optional. The name of the branch to use. + "deploy_on_push": + bool, # Optional. Whether to automatically deploy + new commits made to the repo. + "repo": "str" # + Optional. The name of the repo in the format + owner/repo. Example: ``digitalocean/sample-golang``. + }, + "build_command": "str", # + Optional. An optional build command to run while building + this component from source. + "catchall_document": "str", + # Optional. The name of the document to use as the + fallback for any requests to documents that are not found + when serving this static site. Only 1 of + ``catchall_document`` or ``error_document`` can be set. + "cors": { + "allow_credentials": + bool, # Optional. Whether browsers should expose the + response to the client-side JavaScript code when the + request"u2019s credentials mode is include. This + configures the ``Access-Control-Allow-Credentials`` + header. + "allow_headers": [ + "str" # + Optional. The set of allowed HTTP request + headers. This configures the + ``Access-Control-Allow-Headers`` header. + ], + "allow_methods": [ + "str" # + Optional. The set of allowed HTTP methods. This + configures the ``Access-Control-Allow-Methods`` + header. + ], + "allow_origins": [ + { + "exact": "str", # Optional. Exact string + match. Only 1 of ``exact``"" , ``prefix``"" , + or ``regex`` must be set. + "prefix": "str", # Optional. Prefix-based + match. Only 1 of ``exact``"" , ``prefix``"" , + or ``regex`` must be set. + "regex": "str" # Optional. RE2 style + regex-based match. Only 1 of ``exact``"" , + ``prefix``"" , or ``regex`` must be set. For + more information about RE2 syntax, see: + https://github.com/google/re2/wiki/Syntax. + } + ], + "expose_headers": [ + "str" # + Optional. The set of HTTP response headers that + browsers are allowed to access. This configures + the ``Access-Control-Expose-Headers`` header. + ], + "max_age": "str" # + Optional. An optional duration specifying how long + browsers can cache the results of a preflight + request. This configures the + ``Access-Control-Max-Age`` header. + }, + "dockerfile_path": "str", # + Optional. The path to the Dockerfile relative to the root + of the repo. If set, it will be used to build this + component. Otherwise, App Platform will attempt to build + it using buildpacks. + "environment_slug": "str", # + Optional. An environment slug describing the type of this + app. For a full list, please refer to `the product + documentation + `_. + "envs": [ + { + "key": "str", + # The variable name. Required. + "scope": + "RUN_AND_BUILD_TIME", # Optional. Default value + is "RUN_AND_BUILD_TIME". * RUN_TIME: Made + available only at run-time * BUILD_TIME: Made + available only at build-time * + RUN_AND_BUILD_TIME: Made available at both build + and run-time. Known values are: "UNSET", + "RUN_TIME", "BUILD_TIME", and + "RUN_AND_BUILD_TIME". + "type": + "GENERAL", # Optional. Default value is + "GENERAL". * GENERAL: A plain-text environment + variable * SECRET: A secret encrypted environment + variable. Known values are: "GENERAL" and + "SECRET". + "value": + "str" # Optional. The value. If the type is + ``SECRET``"" , the value will be encrypted on + first submission. On following submissions, the + encrypted value should be used. + } + ], + "error_document": "404.html", + # Optional. Default value is "404.html". The name of the + error document to use when serving this static site. + Default: 404.html. If no such file exists within the + built assets, App Platform will supply one. + "git": { + "branch": "str", # + Optional. The name of the branch to use. + "repo_clone_url": + "str" # Optional. The clone URL of the repo. + Example: + ``https://github.com/digitalocean/sample-golang.git``. + }, + "github": { + "branch": "str", # + Optional. The name of the branch to use. + "deploy_on_push": + bool, # Optional. Whether to automatically deploy + new commits made to the repo. + "repo": "str" # + Optional. The name of the repo in the format + owner/repo. Example: ``digitalocean/sample-golang``. + }, + "gitlab": { + "branch": "str", # + Optional. The name of the branch to use. + "deploy_on_push": + bool, # Optional. Whether to automatically deploy + new commits made to the repo. + "repo": "str" # + Optional. The name of the repo in the format + owner/repo. Example: ``digitalocean/sample-golang``. + }, + "image": { + "deploy_on_push": { + "enabled": + bool # Optional. Whether to automatically deploy + new images. Can only be used for images hosted in + DOCR and can only be used with an image tag, not + a specific digest. + }, + "digest": "str", # + Optional. The image digest. Cannot be specified if + tag is provided. + "registry": "str", # + Optional. The registry name. Must be left empty for + the ``DOCR`` registry type. + "registry_credentials": "str", # Optional. The + credentials to be able to pull the image. The value + will be encrypted on first submission. On following + submissions, the encrypted value should be used. * + "$username:$access_token" for registries of type + ``DOCKER_HUB``. * "$username:$access_token" for + registries of type ``GHCR``. + "registry_type": + "str", # Optional. * DOCKER_HUB: The DockerHub + container registry type. * DOCR: The DigitalOcean + container registry type. * GHCR: The Github container + registry type. Known values are: "DOCKER_HUB", + "DOCR", and "GHCR". + "repository": "str", + # Optional. The repository name. + "tag": "latest" # + Optional. Default value is "latest". The repository + tag. Defaults to ``latest`` if not provided and no + digest is provided. Cannot be specified if digest is + provided. + }, + "index_document": + "index.html", # Optional. Default value is "index.html". + The name of the index document to use when serving this + static site. Default: index.html. + "log_destinations": [ + { + "name": + "str", # Required. + "datadog": { + "api_key": "str", # Datadog API key. + Required. + "endpoint": "str" # Optional. Datadog HTTP + log intake endpoint. + }, + "logtail": { + "token": "str" # Optional. Logtail token. + }, + "open_search": { + "basic_auth": { + "password": "str", # Optional. Password + for user defined in User. Is required + when ``endpoint`` is set. Cannot be set + if using a DigitalOcean DBaaS OpenSearch + cluster. + "user": "str" # Optional. Username to + authenticate with. Only required when + ``endpoint`` is set. Defaults to + ``doadmin`` when ``cluster_name`` is set. + }, + "cluster_name": "str", # Optional. The name + of a DigitalOcean DBaaS OpenSearch cluster to + use as a log forwarding destination. Cannot + be specified if ``endpoint`` is also + specified. + "endpoint": "str", # Optional. OpenSearch + API Endpoint. Only HTTPS is supported. + Format: + https://:code:``::code:``. Cannot + be specified if ``cluster_name`` is also + specified. + "index_name": "logs" # Optional. Default + value is "logs". The index name to use for + the logs. If not set, the default index name + is "logs". + }, + "papertrail": + { + "endpoint": "str" # Papertrail syslog + endpoint. Required. + } + } + ], + "name": "str", # Optional. + The name. Must be unique across all components within the + same app. + "output_dir": "str", # + Optional. An optional path to where the built assets will + be located, relative to the build context. If not set, + App Platform will automatically scan for these directory + names: ``_static``"" , ``dist``"" , ``public``"" , + ``build``. + "routes": [ + { + "path": + "str", # Optional. (Deprecated - Use Ingress + Rules instead). An HTTP path prefix. Paths must + start with / and must be unique across all + components within an app. + "preserve_path_prefix": bool # Optional. An + optional flag to preserve the path that is + forwarded to the backend service. By default, the + HTTP request path will be trimmed from the left + when forwarded to the component. For example, a + component with ``path=/api`` will have requests + to ``/api/list`` trimmed to ``/list``. If this + value is ``true``"" , the path will remain + ``/api/list``. + } + ], + "run_command": "str", # + Optional. An optional run command to override the + component's default. + "source_dir": "str" # + Optional. An optional path to the working directory to + use for the build. For Dockerfile builds, this will be + used as the build context. Must be relative to the root + of the repo. } + ], + "vpc": { + "egress_ips": [ + { + "ip": "str" # + Optional. The egress ips associated with the VPC. + } + ], + "id": "str" # Optional. The ID of + the VPC. }, - "min_instance_count": 0 # Optional. The - minimum amount of instances for this component. Must be less than - max_instance_count. - }, - "bitbucket": { - "branch": "str", # Optional. The name of the - branch to use. - "deploy_on_push": bool, # Optional. Whether - to automatically deploy new commits made to the repo. - "repo": "str" # Optional. The name of the - repo in the format owner/repo. Example: - ``digitalocean/sample-golang``. + "workers": [ + { + "autoscaling": { + "max_instance_count": + 0, # Optional. The maximum amount of instances for + this component. Must be more than min_instance_count. + "metrics": { + "cpu": { + "percent": 80 # Optional. Default value is + 80. The average target CPU utilization for + the component. + } + }, + "min_instance_count": + 0 # Optional. The minimum amount of instances for + this component. Must be less than max_instance_count. + }, + "bitbucket": { + "branch": "str", # + Optional. The name of the branch to use. + "deploy_on_push": + bool, # Optional. Whether to automatically deploy + new commits made to the repo. + "repo": "str" # + Optional. The name of the repo in the format + owner/repo. Example: ``digitalocean/sample-golang``. + }, + "build_command": "str", # + Optional. An optional build command to run while building + this component from source. + "dockerfile_path": "str", # + Optional. The path to the Dockerfile relative to the root + of the repo. If set, it will be used to build this + component. Otherwise, App Platform will attempt to build + it using buildpacks. + "environment_slug": "str", # + Optional. An environment slug describing the type of this + app. For a full list, please refer to `the product + documentation + `_. + "envs": [ + { + "key": "str", + # The variable name. Required. + "scope": + "RUN_AND_BUILD_TIME", # Optional. Default value + is "RUN_AND_BUILD_TIME". * RUN_TIME: Made + available only at run-time * BUILD_TIME: Made + available only at build-time * + RUN_AND_BUILD_TIME: Made available at both build + and run-time. Known values are: "UNSET", + "RUN_TIME", "BUILD_TIME", and + "RUN_AND_BUILD_TIME". + "type": + "GENERAL", # Optional. Default value is + "GENERAL". * GENERAL: A plain-text environment + variable * SECRET: A secret encrypted environment + variable. Known values are: "GENERAL" and + "SECRET". + "value": + "str" # Optional. The value. If the type is + ``SECRET``"" , the value will be encrypted on + first submission. On following submissions, the + encrypted value should be used. + } + ], + "git": { + "branch": "str", # + Optional. The name of the branch to use. + "repo_clone_url": + "str" # Optional. The clone URL of the repo. + Example: + ``https://github.com/digitalocean/sample-golang.git``. + }, + "github": { + "branch": "str", # + Optional. The name of the branch to use. + "deploy_on_push": + bool, # Optional. Whether to automatically deploy + new commits made to the repo. + "repo": "str" # + Optional. The name of the repo in the format + owner/repo. Example: ``digitalocean/sample-golang``. + }, + "gitlab": { + "branch": "str", # + Optional. The name of the branch to use. + "deploy_on_push": + bool, # Optional. Whether to automatically deploy + new commits made to the repo. + "repo": "str" # + Optional. The name of the repo in the format + owner/repo. Example: ``digitalocean/sample-golang``. + }, + "image": { + "deploy_on_push": { + "enabled": + bool # Optional. Whether to automatically deploy + new images. Can only be used for images hosted in + DOCR and can only be used with an image tag, not + a specific digest. + }, + "digest": "str", # + Optional. The image digest. Cannot be specified if + tag is provided. + "registry": "str", # + Optional. The registry name. Must be left empty for + the ``DOCR`` registry type. + "registry_credentials": "str", # Optional. The + credentials to be able to pull the image. The value + will be encrypted on first submission. On following + submissions, the encrypted value should be used. * + "$username:$access_token" for registries of type + ``DOCKER_HUB``. * "$username:$access_token" for + registries of type ``GHCR``. + "registry_type": + "str", # Optional. * DOCKER_HUB: The DockerHub + container registry type. * DOCR: The DigitalOcean + container registry type. * GHCR: The Github container + registry type. Known values are: "DOCKER_HUB", + "DOCR", and "GHCR". + "repository": "str", + # Optional. The repository name. + "tag": "latest" # + Optional. Default value is "latest". The repository + tag. Defaults to ``latest`` if not provided and no + digest is provided. Cannot be specified if digest is + provided. + }, + "instance_count": 1, # + Optional. Default value is 1. The amount of instances + that this component should be scaled to. Default: 1. Must + not be set if autoscaling is used. + "instance_size_slug": {}, + "liveness_health_check": { + "failure_threshold": + 0, # Optional. The number of failed health checks + before considered unhealthy. + "http_path": "str", + # Optional. The route path used for the HTTP health + check ping. If not set, the HTTP health check will be + disabled and a TCP health check used instead. + "initial_delay_seconds": 0, # Optional. The number + of seconds to wait before beginning health checks. + "period_seconds": 0, + # Optional. The number of seconds to wait between + health checks. + "port": 0, # + Optional. The port on which the health check will be + performed. + "success_threshold": + 0, # Optional. The number of successful health + checks before considered healthy. + "timeout_seconds": 0 + # Optional. The number of seconds after which the + check times out. + }, + "log_destinations": [ + { + "name": + "str", # Required. + "datadog": { + "api_key": "str", # Datadog API key. + Required. + "endpoint": "str" # Optional. Datadog HTTP + log intake endpoint. + }, + "logtail": { + "token": "str" # Optional. Logtail token. + }, + "open_search": { + "basic_auth": { + "password": "str", # Optional. Password + for user defined in User. Is required + when ``endpoint`` is set. Cannot be set + if using a DigitalOcean DBaaS OpenSearch + cluster. + "user": "str" # Optional. Username to + authenticate with. Only required when + ``endpoint`` is set. Defaults to + ``doadmin`` when ``cluster_name`` is set. + }, + "cluster_name": "str", # Optional. The name + of a DigitalOcean DBaaS OpenSearch cluster to + use as a log forwarding destination. Cannot + be specified if ``endpoint`` is also + specified. + "endpoint": "str", # Optional. OpenSearch + API Endpoint. Only HTTPS is supported. + Format: + https://:code:``::code:``. Cannot + be specified if ``cluster_name`` is also + specified. + "index_name": "logs" # Optional. Default + value is "logs". The index name to use for + the logs. If not set, the default index name + is "logs". + }, + "papertrail": + { + "endpoint": "str" # Papertrail syslog + endpoint. Required. + } + } + ], + "name": "str", # Optional. + The name. Must be unique across all components within the + same app. + "run_command": "str", # + Optional. An optional run command to override the + component's default. + "source_dir": "str", # + Optional. An optional path to the working directory to + use for the build. For Dockerfile builds, this will be + used as the build context. Must be relative to the root + of the repo. + "termination": { + "grace_period_seconds": 0 # Optional. The number of + seconds to wait between sending a TERM signal to a + container and issuing a KILL which causes immediate + shutdown. (Default 120). + } + } + ] }, - "build_command": "str", # Optional. An optional - build command to run while building this component from source. - "dockerfile_path": "str", # Optional. The path to - the Dockerfile relative to the root of the repo. If set, it will be - used to build this component. Otherwise, App Platform will attempt to - build it using buildpacks. - "environment_slug": "str", # Optional. An - environment slug describing the type of this app. For a full list, - please refer to `the product documentation - `_. - "envs": [ + "static_sites": [ { - "key": "str", # The variable name. - Required. - "scope": "RUN_AND_BUILD_TIME", # - Optional. Default value is "RUN_AND_BUILD_TIME". * RUN_TIME: - Made available only at run-time * BUILD_TIME: Made available - only at build-time * RUN_AND_BUILD_TIME: Made available at - both build and run-time. Known values are: "UNSET", - "RUN_TIME", "BUILD_TIME", and "RUN_AND_BUILD_TIME". - "type": "GENERAL", # Optional. - Default value is "GENERAL". * GENERAL: A plain-text - environment variable * SECRET: A secret encrypted environment - variable. Known values are: "GENERAL" and "SECRET". - "value": "str" # Optional. The - value. If the type is ``SECRET``"" , the value will be - encrypted on first submission. On following submissions, the - encrypted value should be used. + "name": "str", # Optional. The name + of this static site. + "source_commit_hash": "str" # + Optional. The commit hash of the repository that was used to + build this static site. } ], - "git": { - "branch": "str", # Optional. The name of the - branch to use. - "repo_clone_url": "str" # Optional. The - clone URL of the repo. Example: - ``https://github.com/digitalocean/sample-golang.git``. - }, - "github": { - "branch": "str", # Optional. The name of the - branch to use. - "deploy_on_push": bool, # Optional. Whether - to automatically deploy new commits made to the repo. - "repo": "str" # Optional. The name of the - repo in the format owner/repo. Example: - ``digitalocean/sample-golang``. - }, - "gitlab": { - "branch": "str", # Optional. The name of the - branch to use. - "deploy_on_push": bool, # Optional. Whether - to automatically deploy new commits made to the repo. - "repo": "str" # Optional. The name of the - repo in the format owner/repo. Example: - ``digitalocean/sample-golang``. - }, - "image": { - "deploy_on_push": { - "enabled": bool # Optional. Whether - to automatically deploy new images. Can only be used for - images hosted in DOCR and can only be used with an image tag, - not a specific digest. - }, - "digest": "str", # Optional. The image - digest. Cannot be specified if tag is provided. - "registry": "str", # Optional. The registry - name. Must be left empty for the ``DOCR`` registry type. - "registry_credentials": "str", # Optional. - The credentials to be able to pull the image. The value will be - encrypted on first submission. On following submissions, the - encrypted value should be used. * "$username:$access_token" for - registries of type ``DOCKER_HUB``. * "$username:$access_token" - for registries of type ``GHCR``. - "registry_type": "str", # Optional. * - DOCKER_HUB: The DockerHub container registry type. * DOCR: The - DigitalOcean container registry type. * GHCR: The Github - container registry type. Known values are: "DOCKER_HUB", "DOCR", - and "GHCR". - "repository": "str", # Optional. The - repository name. - "tag": "latest" # Optional. Default value is - "latest". The repository tag. Defaults to ``latest`` if not - provided and no digest is provided. Cannot be specified if digest - is provided. - }, - "instance_count": 1, # Optional. Default value is 1. - The amount of instances that this component should be scaled to. - Default: 1. Must not be set if autoscaling is used. - "instance_size_slug": {}, - "liveness_health_check": { - "failure_threshold": 0, # Optional. The - number of failed health checks before considered unhealthy. - "http_path": "str", # Optional. The route - path used for the HTTP health check ping. If not set, the HTTP - health check will be disabled and a TCP health check used - instead. - "initial_delay_seconds": 0, # Optional. The - number of seconds to wait before beginning health checks. - "period_seconds": 0, # Optional. The number - of seconds to wait between health checks. - "port": 0, # Optional. The port on which the - health check will be performed. - "success_threshold": 0, # Optional. The - number of successful health checks before considered healthy. - "timeout_seconds": 0 # Optional. The number - of seconds after which the check times out. - }, - "log_destinations": [ + "tier_slug": "str", # Optional. The current pricing + tier slug of the deployment. + "updated_at": "2020-02-20 00:00:00", # Optional. + When the deployment was last updated. + "workers": [ { - "name": "str", # Required. - "datadog": { - "api_key": "str", # Datadog - API key. Required. - "endpoint": "str" # - Optional. Datadog HTTP log intake endpoint. - }, - "logtail": { - "token": "str" # Optional. - Logtail token. - }, - "open_search": { - "basic_auth": { - "password": "str", # - Optional. Password for user defined in User. Is - required when ``endpoint`` is set. Cannot be set if - using a DigitalOcean DBaaS OpenSearch cluster. - "user": "str" # - Optional. Username to authenticate with. Only - required when ``endpoint`` is set. Defaults to - ``doadmin`` when ``cluster_name`` is set. - }, - "cluster_name": "str", # - Optional. The name of a DigitalOcean DBaaS OpenSearch - cluster to use as a log forwarding destination. Cannot be - specified if ``endpoint`` is also specified. - "endpoint": "str", # - Optional. OpenSearch API Endpoint. Only HTTPS is - supported. Format: https://:code:``::code:``. - Cannot be specified if ``cluster_name`` is also - specified. - "index_name": "logs" # - Optional. Default value is "logs". The index name to use - for the logs. If not set, the default index name is - "logs". - }, - "papertrail": { - "endpoint": "str" # - Papertrail syslog endpoint. Required. - } + "name": "str", # Optional. The name + of this worker. + "source_commit_hash": "str" # + Optional. The commit hash of the repository that was used to + build this worker. } - ], - "name": "str", # Optional. The name. Must be unique - across all components within the same app. - "run_command": "str", # Optional. An optional run - command to override the component's default. - "source_dir": "str", # Optional. An optional path to - the working directory to use for the build. For Dockerfile builds, - this will be used as the build context. Must be relative to the root - of the repo. - "termination": { - "grace_period_seconds": 0 # Optional. The - number of seconds to wait between sending a TERM signal to a - container and issuing a KILL which causes immediate shutdown. - (Default 120). - } - } - ] - }, - "app_id": "str" # Optional. An optional ID of an existing app. If set, the - spec will be treated as a proposed update to the specified app. The existing app - is not modified using this method. + ] + }, + "deployment_id": "str", # Optional. For deployment events, + this is the same as the deployment's ID. For autoscaling events, this is + the deployment that was autoscaled. + "id": "str", # Optional. The ID of the event (UUID). + "type": "str" # Optional. The type of event. Known values + are: "UNKNOWN", "DEPLOYMENT", and "AUTOSCALING". + } + ], + "links": { + "pages": {} + } + } + # response body for status code(s): 404 + response == { + "id": "str", # A short identifier corresponding to the HTTP status code + returned. For example, the ID for a response returning a 404 status code would + be "not_found.". Required. + "message": "str", # A message providing additional information about the + error, including details to help resolve it when possible. Required. + "request_id": "str" # Optional. Optionally, some endpoints may include a + request ID that should be provided when reporting bugs or opening support + tickets to help identify the issue. } + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + 401: cast( + Type[HttpResponseError], + lambda response: ClientAuthenticationError(response=response), + ), + 429: HttpResponseError, + 500: HttpResponseError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[JSON] = kwargs.pop("cls", None) + + _request = build_apps_list_events_request( + app_id=app_id, + page=page, + per_page=per_page, + event_types=event_types, + headers=_headers, + params=_params, + ) + _request.url = self._client.format_url(_request.url) + + _stream = False + pipeline_response: PipelineResponse = ( + self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + ) + + response = pipeline_response.http_response + + if response.status_code not in [200, 404]: + if _stream: + response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore + raise HttpResponseError(response=response) + + response_headers = {} + if response.status_code == 200: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.content: + deserialized = response.json() + else: + deserialized = None + + if response.status_code == 404: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.content: + deserialized = response.json() + else: + deserialized = None + + if cls: + return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore + + return cast(JSON, deserialized) # type: ignore + + @distributed_trace + def get_event(self, app_id: str, event_id: str, **kwargs: Any) -> JSON: + # pylint: disable=line-too-long + """Get an Event. + + Get a single event for an app. + + :param app_id: The app ID. Required. + :type app_id: str + :param event_id: The event ID. Required. + :type event_id: str + :return: JSON object + :rtype: JSON + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python # response body for status code(s): 200 response == { - "app_cost": 0, # Optional. The monthly cost of the proposed app in USD. - "app_is_static": bool, # Optional. Indicates whether the app is a static - app. - "app_name_available": bool, # Optional. Indicates whether the app name is - available. - "app_name_suggestion": "str", # Optional. The suggested name if the proposed - app name is unavailable. - "app_tier_downgrade_cost": 0, # Optional. The monthly cost of the proposed - app in USD using the previous pricing plan tier. For example, if you propose an - app that uses the Professional tier, the ``app_tier_downgrade_cost`` field - displays the monthly cost of the app if it were to use the Basic tier. If the - proposed app already uses the lest expensive tier, the field is empty. - "existing_static_apps": "str", # Optional. The maximum number of free static - apps the account can have. We will charge you for any additional static apps. - "spec": { - "name": "str", # The name of the app. Must be unique across all apps - in the same account. Required. - "databases": [ - { - "name": "str", # The database's name. The name must - be unique across all components within the same app and cannot use - capital letters. Required. - "cluster_name": "str", # Optional. The name of the - underlying DigitalOcean DBaaS cluster. This is required for - production databases. For dev databases, if cluster_name is not set, - a new cluster will be provisioned. - "db_name": "str", # Optional. The name of the MySQL - or PostgreSQL database to configure. - "db_user": "str", # Optional. The name of the MySQL - or PostgreSQL user to configure. - "engine": "UNSET", # Optional. Default value is - "UNSET". * MYSQL: MySQL * PG: PostgreSQL * REDIS: Caching * MONGODB: - MongoDB * KAFKA: Kafka * OPENSEARCH: OpenSearch * VALKEY: ValKey. - Known values are: "UNSET", "MYSQL", "PG", "REDIS", "MONGODB", - "KAFKA", "OPENSEARCH", and "VALKEY". - "production": bool, # Optional. Whether this is a - production or dev database. - "version": "str" # Optional. The version of the - database engine. - } - ], - "disable_edge_cache": False, # Optional. Default value is False. .. - role:: raw-html-m2r(raw) :format: html If set to ``true``"" , the app - will **not** be cached at the edge (CDN). Enable this option if you want to - manage CDN configuration yourself"u2014whether by using an external CDN - provider or by handling static content and caching within your app. This - setting is also recommended for apps that require real-time data or serve - dynamic content, such as those using Server-Sent Events (SSE) over GET, or - hosting an MCP (Model Context Protocol) Server that utilizes SSE."" - :raw-html-m2r:`
` **Note:** This feature is not available for static site - components."" :raw-html-m2r:`
` For more information, see `Disable CDN - Cache - `_. - "disable_email_obfuscation": False, # Optional. Default value is - False. If set to ``true``"" , email addresses in the app will not be - obfuscated. This is useful for apps that require email addresses to be - visible (in the HTML markup). - "domains": [ - { - "domain": "str", # The hostname for the domain. - Required. - "minimum_tls_version": "str", # Optional. The - minimum version of TLS a client application can use to access - resources for the domain. Must be one of the following values - wrapped within quotations: ``"1.2"`` or ``"1.3"``. Known values are: - "1.2" and "1.3". - "type": "UNSPECIFIED", # Optional. Default value is - "UNSPECIFIED". * DEFAULT: The default ``.ondigitalocean.app`` domain - assigned to this app * PRIMARY: The primary domain for this app that - is displayed as the default in the control panel, used in bindable - environment variables, and any other places that reference an app's - live URL. Only one domain may be set as primary. * ALIAS: A - non-primary domain. Known values are: "UNSPECIFIED", "DEFAULT", - "PRIMARY", and "ALIAS". - "wildcard": bool, # Optional. Indicates whether the - domain includes all sub-domains, in addition to the given domain. - "zone": "str" # Optional. Optional. If the domain - uses DigitalOcean DNS and you would like App Platform to - automatically manage it for you, set this to the name of the domain - on your account. For example, If the domain you are adding is - ``app.domain.com``"" , the zone could be ``domain.com``. - } - ], - "egress": { - "type": "AUTOASSIGN" # Optional. Default value is - "AUTOASSIGN". The app egress type. Known values are: "AUTOASSIGN" and - "DEDICATED_IP". + "event": { + "autoscaling": { + "components": { + "str": { + "from": 0, # Optional. The number of + replicas before scaling. + "to": 0, # Optional. The number of replicas + after scaling. + "triggering_metric": "str" # Optional. The + metric that triggered the scale change. Known values are "cpu", + "requests_per_second", "request_duration". For inactivity sleep, + "scale_from_zero" and "scale_to_zero" are used. + } + }, + "phase": "str" # Optional. The current phase of the + autoscaling event. Known values are: "UNKNOWN", "PENDING", "IN_PROGRESS", + "SUCCEEDED", "FAILED", and "CANCELED". }, - "enhanced_threat_control_enabled": False, # Optional. Default value - is False. If set to ``true``"" , suspicious requests will go through - additional security checks to help mitigate layer 7 DDoS attacks. - "functions": [ - { - "name": "str", # The name. Must be unique across all - components within the same app. Required. - "alerts": [ + "created_at": "2020-02-20 00:00:00", # Optional. When the event was + created. + "deployment": { + "cause": "str", # Optional. What caused this deployment to + be created. + "cloned_from": "str", # Optional. The ID of a previous + deployment that this deployment was cloned from. + "created_at": "2020-02-20 00:00:00", # Optional. The + creation time of the deployment. + "functions": [ + { + "name": "str", # Optional. The name of this + functions component. + "namespace": "str", # Optional. The + namespace where the functions are deployed. + "source_commit_hash": "str" # Optional. The + commit hash of the repository that was used to build this + functions component. + } + ], + "id": "str", # Optional. The ID of the deployment. + "jobs": [ + { + "name": "str", # Optional. The name of this + job. + "source_commit_hash": "str" # Optional. The + commit hash of the repository that was used to build this job. + } + ], + "phase": "UNKNOWN", # Optional. Default value is "UNKNOWN". + Known values are: "UNKNOWN", "PENDING_BUILD", "BUILDING", + "PENDING_DEPLOY", "DEPLOYING", "ACTIVE", "SUPERSEDED", "ERROR", and + "CANCELED". + "phase_last_updated_at": "2020-02-20 00:00:00", # Optional. + When the deployment phase was last updated. + "progress": { + "error_steps": 0, # Optional. Number of unsuccessful + steps. + "pending_steps": 0, # Optional. Number of pending + steps. + "running_steps": 0, # Optional. Number of currently + running steps. + "steps": [ { - "disabled": bool, # Optional. Is the - alert disabled?. - "operator": "UNSPECIFIED_OPERATOR", - # Optional. Default value is "UNSPECIFIED_OPERATOR". Known - values are: "UNSPECIFIED_OPERATOR", "GREATER_THAN", and - "LESS_THAN". - "rule": "UNSPECIFIED_RULE", # - Optional. Default value is "UNSPECIFIED_RULE". Known values - are: "UNSPECIFIED_RULE", "CPU_UTILIZATION", - "MEM_UTILIZATION", "RESTART_COUNT", "DEPLOYMENT_FAILED", - "DEPLOYMENT_LIVE", "DOMAIN_FAILED", "DOMAIN_LIVE", - "AUTOSCALE_FAILED", "AUTOSCALE_SUCCEEDED", - "FUNCTIONS_ACTIVATION_COUNT", - "FUNCTIONS_AVERAGE_DURATION_MS", - "FUNCTIONS_ERROR_RATE_PER_MINUTE", - "FUNCTIONS_AVERAGE_WAIT_TIME_MS", "FUNCTIONS_ERROR_COUNT", - and "FUNCTIONS_GB_RATE_PER_SECOND". - "value": 0.0, # Optional. Threshold - value for alert. - "window": "UNSPECIFIED_WINDOW" # - Optional. Default value is "UNSPECIFIED_WINDOW". Known values - are: "UNSPECIFIED_WINDOW", "FIVE_MINUTES", "TEN_MINUTES", - "THIRTY_MINUTES", and "ONE_HOUR". + "component_name": "str", # Optional. + The component name that this step is associated with. + "ended_at": "2020-02-20 00:00:00", # + Optional. The end time of this step. + "message_base": "str", # Optional. + The base of a human-readable description of the step intended + to be combined with the component name for presentation. For + example: ``message_base`` = "Building service" + ``component_name`` = "api". + "name": "str", # Optional. The name + of this step. + "reason": { + "code": "str", # Optional. + The error code. + "message": "str" # Optional. + The error message. + }, + "started_at": "2020-02-20 00:00:00", + # Optional. The start time of this step. + "status": "UNKNOWN", # Optional. + Default value is "UNKNOWN". Known values are: "UNKNOWN", + "PENDING", "RUNNING", "ERROR", and "SUCCESS". + "steps": [ + {} # Optional. Child steps + of this step. + ] } ], - "bitbucket": { - "branch": "str", # Optional. The name of the - branch to use. - "deploy_on_push": bool, # Optional. Whether - to automatically deploy new commits made to the repo. - "repo": "str" # Optional. The name of the - repo in the format owner/repo. Example: - ``digitalocean/sample-golang``. - }, - "cors": { - "allow_credentials": bool, # Optional. - Whether browsers should expose the response to the client-side - JavaScript code when the request"u2019s credentials mode is - include. This configures the ``Access-Control-Allow-Credentials`` - header. - "allow_headers": [ - "str" # Optional. The set of allowed - HTTP request headers. This configures the - ``Access-Control-Allow-Headers`` header. - ], - "allow_methods": [ - "str" # Optional. The set of allowed - HTTP methods. This configures the - ``Access-Control-Allow-Methods`` header. - ], - "allow_origins": [ - { - "exact": "str", # Optional. - Exact string match. Only 1 of ``exact``"" , ``prefix``"" - , or ``regex`` must be set. - "prefix": "str", # Optional. - Prefix-based match. Only 1 of ``exact``"" , ``prefix``"" - , or ``regex`` must be set. - "regex": "str" # Optional. - RE2 style regex-based match. Only 1 of ``exact``"" , - ``prefix``"" , or ``regex`` must be set. For more - information about RE2 syntax, see: - https://github.com/google/re2/wiki/Syntax. - } - ], - "expose_headers": [ - "str" # Optional. The set of HTTP - response headers that browsers are allowed to access. This - configures the ``Access-Control-Expose-Headers`` header. - ], - "max_age": "str" # Optional. An optional - duration specifying how long browsers can cache the results of a - preflight request. This configures the ``Access-Control-Max-Age`` - header. - }, - "envs": [ + "success_steps": 0, # Optional. Number of successful + steps. + "summary_steps": [ { - "key": "str", # The variable name. - Required. - "scope": "RUN_AND_BUILD_TIME", # - Optional. Default value is "RUN_AND_BUILD_TIME". * RUN_TIME: - Made available only at run-time * BUILD_TIME: Made available - only at build-time * RUN_AND_BUILD_TIME: Made available at - both build and run-time. Known values are: "UNSET", - "RUN_TIME", "BUILD_TIME", and "RUN_AND_BUILD_TIME". - "type": "GENERAL", # Optional. - Default value is "GENERAL". * GENERAL: A plain-text - environment variable * SECRET: A secret encrypted environment - variable. Known values are: "GENERAL" and "SECRET". - "value": "str" # Optional. The - value. If the type is ``SECRET``"" , the value will be - encrypted on first submission. On following submissions, the - encrypted value should be used. + "component_name": "str", # Optional. + The component name that this step is associated with. + "ended_at": "2020-02-20 00:00:00", # + Optional. The end time of this step. + "message_base": "str", # Optional. + The base of a human-readable description of the step intended + to be combined with the component name for presentation. For + example: ``message_base`` = "Building service" + ``component_name`` = "api". + "name": "str", # Optional. The name + of this step. + "reason": { + "code": "str", # Optional. + The error code. + "message": "str" # Optional. + The error message. + }, + "started_at": "2020-02-20 00:00:00", + # Optional. The start time of this step. + "status": "UNKNOWN", # Optional. + Default value is "UNKNOWN". Known values are: "UNKNOWN", + "PENDING", "RUNNING", "ERROR", and "SUCCESS". + "steps": [ + {} # Optional. Child steps + of this step. + ] } ], - "git": { - "branch": "str", # Optional. The name of the - branch to use. - "repo_clone_url": "str" # Optional. The - clone URL of the repo. Example: - ``https://github.com/digitalocean/sample-golang.git``. - }, - "github": { - "branch": "str", # Optional. The name of the - branch to use. - "deploy_on_push": bool, # Optional. Whether - to automatically deploy new commits made to the repo. - "repo": "str" # Optional. The name of the - repo in the format owner/repo. Example: - ``digitalocean/sample-golang``. - }, - "gitlab": { - "branch": "str", # Optional. The name of the - branch to use. - "deploy_on_push": bool, # Optional. Whether - to automatically deploy new commits made to the repo. - "repo": "str" # Optional. The name of the - repo in the format owner/repo. Example: - ``digitalocean/sample-golang``. - }, - "log_destinations": [ + "total_steps": 0 # Optional. Total number of steps. + }, + "services": [ + { + "name": "str", # Optional. The name of this + service. + "source_commit_hash": "str" # Optional. The + commit hash of the repository that was used to build this + service. + } + ], + "spec": { + "name": "str", # The name of the app. Must be unique + across all apps in the same account. Required. + "databases": [ { - "name": "str", # Required. - "datadog": { - "api_key": "str", # Datadog - API key. Required. - "endpoint": "str" # - Optional. Datadog HTTP log intake endpoint. - }, - "logtail": { - "token": "str" # Optional. - Logtail token. - }, - "open_search": { - "basic_auth": { - "password": "str", # - Optional. Password for user defined in User. Is - required when ``endpoint`` is set. Cannot be set if - using a DigitalOcean DBaaS OpenSearch cluster. - "user": "str" # - Optional. Username to authenticate with. Only - required when ``endpoint`` is set. Defaults to - ``doadmin`` when ``cluster_name`` is set. - }, - "cluster_name": "str", # - Optional. The name of a DigitalOcean DBaaS OpenSearch - cluster to use as a log forwarding destination. Cannot be - specified if ``endpoint`` is also specified. - "endpoint": "str", # - Optional. OpenSearch API Endpoint. Only HTTPS is - supported. Format: https://:code:``::code:``. - Cannot be specified if ``cluster_name`` is also - specified. - "index_name": "logs" # - Optional. Default value is "logs". The index name to use - for the logs. If not set, the default index name is - "logs". - }, - "papertrail": { - "endpoint": "str" # - Papertrail syslog endpoint. Required. - } + "name": "str", # The database's + name. The name must be unique across all components within + the same app and cannot use capital letters. Required. + "cluster_name": "str", # Optional. + The name of the underlying DigitalOcean DBaaS cluster. This + is required for production databases. For dev databases, if + cluster_name is not set, a new cluster will be provisioned. + "db_name": "str", # Optional. The + name of the MySQL or PostgreSQL database to configure. + "db_user": "str", # Optional. The + name of the MySQL or PostgreSQL user to configure. + "engine": "UNSET", # Optional. + Default value is "UNSET". * MYSQL: MySQL * PG: PostgreSQL * + REDIS: Caching * MONGODB: MongoDB * KAFKA: Kafka * + OPENSEARCH: OpenSearch * VALKEY: ValKey. Known values are: + "UNSET", "MYSQL", "PG", "REDIS", "MONGODB", "KAFKA", + "OPENSEARCH", and "VALKEY". + "production": bool, # Optional. + Whether this is a production or dev database. + "version": "str" # Optional. The + version of the database engine. } ], - "routes": [ + "disable_edge_cache": False, # Optional. Default + value is False. .. role:: raw-html-m2r(raw) :format: html If set + to ``true``"" , the app will **not** be cached at the edge (CDN). + Enable this option if you want to manage CDN configuration + yourself"u2014whether by using an external CDN provider or by + handling static content and caching within your app. This setting is + also recommended for apps that require real-time data or serve + dynamic content, such as those using Server-Sent Events (SSE) over + GET, or hosting an MCP (Model Context Protocol) Server that utilizes + SSE."" :raw-html-m2r:`
` **Note:** This feature is not available + for static site components."" :raw-html-m2r:`
` For more + information, see `Disable CDN Cache + `_. + "disable_email_obfuscation": False, # Optional. + Default value is False. If set to ``true``"" , email addresses in the + app will not be obfuscated. This is useful for apps that require + email addresses to be visible (in the HTML markup). + "domains": [ { - "path": "str", # Optional. - (Deprecated - Use Ingress Rules instead). An HTTP path - prefix. Paths must start with / and must be unique across all - components within an app. - "preserve_path_prefix": bool # - Optional. An optional flag to preserve the path that is - forwarded to the backend service. By default, the HTTP - request path will be trimmed from the left when forwarded to - the component. For example, a component with ``path=/api`` - will have requests to ``/api/list`` trimmed to ``/list``. If - this value is ``true``"" , the path will remain - ``/api/list``. + "domain": "str", # The hostname for + the domain. Required. + "minimum_tls_version": "str", # + Optional. The minimum version of TLS a client application can + use to access resources for the domain. Must be one of the + following values wrapped within quotations: ``"1.2"`` or + ``"1.3"``. Known values are: "1.2" and "1.3". + "type": "UNSPECIFIED", # Optional. + Default value is "UNSPECIFIED". * DEFAULT: The default + ``.ondigitalocean.app`` domain assigned to this app * + PRIMARY: The primary domain for this app that is displayed as + the default in the control panel, used in bindable + environment variables, and any other places that reference an + app's live URL. Only one domain may be set as primary. * + ALIAS: A non-primary domain. Known values are: "UNSPECIFIED", + "DEFAULT", "PRIMARY", and "ALIAS". + "wildcard": bool, # Optional. + Indicates whether the domain includes all sub-domains, in + addition to the given domain. + "zone": "str" # Optional. Optional. + If the domain uses DigitalOcean DNS and you would like App + Platform to automatically manage it for you, set this to the + name of the domain on your account. For example, If the + domain you are adding is ``app.domain.com``"" , the zone + could be ``domain.com``. } ], - "source_dir": "str" # Optional. An optional path to - the working directory to use for the build. For Dockerfile builds, - this will be used as the build context. Must be relative to the root - of the repo. - } - ], - "ingress": { - "rules": [ - { - "component": { - "name": "str", # The name of the - component to route to. Required. - "preserve_path_prefix": "str", # - Optional. An optional flag to preserve the path that is - forwarded to the backend service. By default, the HTTP - request path will be trimmed from the left when forwarded to - the component. For example, a component with ``path=/api`` - will have requests to ``/api/list`` trimmed to ``/list``. If - this value is ``true``"" , the path will remain - ``/api/list``. Note: this is not applicable for Functions - Components and is mutually exclusive with ``rewrite``. - "rewrite": "str" # Optional. An - optional field that will rewrite the path of the component to - be what is specified here. By default, the HTTP request path - will be trimmed from the left when forwarded to the - component. For example, a component with ``path=/api`` will - have requests to ``/api/list`` trimmed to ``/list``. If you - specified the rewrite to be ``/v1/``"" , requests to - ``/api/list`` would be rewritten to ``/v1/list``. Note: this - is mutually exclusive with ``preserve_path_prefix``. - }, - "cors": { - "allow_credentials": bool, # - Optional. Whether browsers should expose the response to the - client-side JavaScript code when the request"u2019s - credentials mode is include. This configures the - ``Access-Control-Allow-Credentials`` header. - "allow_headers": [ - "str" # Optional. The set of - allowed HTTP request headers. This configures the - ``Access-Control-Allow-Headers`` header. + "egress": { + "type": "AUTOASSIGN" # Optional. Default + value is "AUTOASSIGN". The app egress type. Known values are: + "AUTOASSIGN" and "DEDICATED_IP". + }, + "enhanced_threat_control_enabled": False, # + Optional. Default value is False. If set to ``true``"" , suspicious + requests will go through additional security checks to help mitigate + layer 7 DDoS attacks. + "functions": [ + { + "name": "str", # The name. Must be + unique across all components within the same app. Required. + "alerts": [ + { + "disabled": bool, # + Optional. Is the alert disabled?. + "operator": + "UNSPECIFIED_OPERATOR", # Optional. Default value is + "UNSPECIFIED_OPERATOR". Known values are: + "UNSPECIFIED_OPERATOR", "GREATER_THAN", and + "LESS_THAN". + "rule": + "UNSPECIFIED_RULE", # Optional. Default value is + "UNSPECIFIED_RULE". Known values are: + "UNSPECIFIED_RULE", "CPU_UTILIZATION", + "MEM_UTILIZATION", "RESTART_COUNT", + "DEPLOYMENT_FAILED", "DEPLOYMENT_LIVE", + "DOMAIN_FAILED", "DOMAIN_LIVE", "AUTOSCALE_FAILED", + "AUTOSCALE_SUCCEEDED", "FUNCTIONS_ACTIVATION_COUNT", + "FUNCTIONS_AVERAGE_DURATION_MS", + "FUNCTIONS_ERROR_RATE_PER_MINUTE", + "FUNCTIONS_AVERAGE_WAIT_TIME_MS", + "FUNCTIONS_ERROR_COUNT", and + "FUNCTIONS_GB_RATE_PER_SECOND". + "value": 0.0, # + Optional. Threshold value for alert. + "window": + "UNSPECIFIED_WINDOW" # Optional. Default value is + "UNSPECIFIED_WINDOW". Known values are: + "UNSPECIFIED_WINDOW", "FIVE_MINUTES", "TEN_MINUTES", + "THIRTY_MINUTES", and "ONE_HOUR". + } ], - "allow_methods": [ - "str" # Optional. The set of - allowed HTTP methods. This configures the - ``Access-Control-Allow-Methods`` header. + "bitbucket": { + "branch": "str", # Optional. + The name of the branch to use. + "deploy_on_push": bool, # + Optional. Whether to automatically deploy new commits + made to the repo. + "repo": "str" # Optional. + The name of the repo in the format owner/repo. Example: + ``digitalocean/sample-golang``. + }, + "cors": { + "allow_credentials": bool, # + Optional. Whether browsers should expose the response to + the client-side JavaScript code when the request"u2019s + credentials mode is include. This configures the + ``Access-Control-Allow-Credentials`` header. + "allow_headers": [ + "str" # Optional. + The set of allowed HTTP request headers. This + configures the ``Access-Control-Allow-Headers`` + header. + ], + "allow_methods": [ + "str" # Optional. + The set of allowed HTTP methods. This configures the + ``Access-Control-Allow-Methods`` header. + ], + "allow_origins": [ + { + "exact": + "str", # Optional. Exact string match. Only 1 of + ``exact``"" , ``prefix``"" , or ``regex`` must be + set. + "prefix": + "str", # Optional. Prefix-based match. Only 1 of + ``exact``"" , ``prefix``"" , or ``regex`` must be + set. + "regex": + "str" # Optional. RE2 style regex-based match. + Only 1 of ``exact``"" , ``prefix``"" , or + ``regex`` must be set. For more information about + RE2 syntax, see: + https://github.com/google/re2/wiki/Syntax. + } + ], + "expose_headers": [ + "str" # Optional. + The set of HTTP response headers that browsers are + allowed to access. This configures the + ``Access-Control-Expose-Headers`` header. + ], + "max_age": "str" # Optional. + An optional duration specifying how long browsers can + cache the results of a preflight request. This configures + the ``Access-Control-Max-Age`` header. + }, + "envs": [ + { + "key": "str", # The + variable name. Required. + "scope": + "RUN_AND_BUILD_TIME", # Optional. Default value is + "RUN_AND_BUILD_TIME". * RUN_TIME: Made available only + at run-time * BUILD_TIME: Made available only at + build-time * RUN_AND_BUILD_TIME: Made available at + both build and run-time. Known values are: "UNSET", + "RUN_TIME", "BUILD_TIME", and "RUN_AND_BUILD_TIME". + "type": "GENERAL", # + Optional. Default value is "GENERAL". * GENERAL: A + plain-text environment variable * SECRET: A secret + encrypted environment variable. Known values are: + "GENERAL" and "SECRET". + "value": "str" # + Optional. The value. If the type is ``SECRET``"" , + the value will be encrypted on first submission. On + following submissions, the encrypted value should be + used. + } ], - "allow_origins": [ + "git": { + "branch": "str", # Optional. + The name of the branch to use. + "repo_clone_url": "str" # + Optional. The clone URL of the repo. Example: + ``https://github.com/digitalocean/sample-golang.git``. + }, + "github": { + "branch": "str", # Optional. + The name of the branch to use. + "deploy_on_push": bool, # + Optional. Whether to automatically deploy new commits + made to the repo. + "repo": "str" # Optional. + The name of the repo in the format owner/repo. Example: + ``digitalocean/sample-golang``. + }, + "gitlab": { + "branch": "str", # Optional. + The name of the branch to use. + "deploy_on_push": bool, # + Optional. Whether to automatically deploy new commits + made to the repo. + "repo": "str" # Optional. + The name of the repo in the format owner/repo. Example: + ``digitalocean/sample-golang``. + }, + "log_destinations": [ { - "exact": "str", # - Optional. Exact string match. Only 1 of ``exact``"" , - ``prefix``"" , or ``regex`` must be set. - "prefix": "str", # - Optional. Prefix-based match. Only 1 of ``exact``"" , - ``prefix``"" , or ``regex`` must be set. - "regex": "str" # - Optional. RE2 style regex-based match. Only 1 of - ``exact``"" , ``prefix``"" , or ``regex`` must be - set. For more information about RE2 syntax, see: - https://github.com/google/re2/wiki/Syntax. + "name": "str", # + Required. + "datadog": { + "api_key": + "str", # Datadog API key. Required. + "endpoint": + "str" # Optional. Datadog HTTP log intake + endpoint. + }, + "logtail": { + "token": + "str" # Optional. Logtail token. + }, + "open_search": { + "basic_auth": + { + "password": "str", # Optional. Password for + user defined in User. Is required when + ``endpoint`` is set. Cannot be set if using a + DigitalOcean DBaaS OpenSearch cluster. + "user": "str" # Optional. Username to + authenticate with. Only required when + ``endpoint`` is set. Defaults to ``doadmin`` + when ``cluster_name`` is set. + }, + "cluster_name": "str", # Optional. The name of a + DigitalOcean DBaaS OpenSearch cluster to use as a + log forwarding destination. Cannot be specified + if ``endpoint`` is also specified. + "endpoint": + "str", # Optional. OpenSearch API Endpoint. Only + HTTPS is supported. Format: + https://:code:``::code:``. Cannot be + specified if ``cluster_name`` is also specified. + "index_name": + "logs" # Optional. Default value is "logs". The + index name to use for the logs. If not set, the + default index name is "logs". + }, + "papertrail": { + "endpoint": + "str" # Papertrail syslog endpoint. Required. + } } ], - "expose_headers": [ - "str" # Optional. The set of - HTTP response headers that browsers are allowed to - access. This configures the - ``Access-Control-Expose-Headers`` header. + "routes": [ + { + "path": "str", # + Optional. (Deprecated - Use Ingress Rules instead). + An HTTP path prefix. Paths must start with / and must + be unique across all components within an app. + "preserve_path_prefix": bool # Optional. An optional + flag to preserve the path that is forwarded to the + backend service. By default, the HTTP request path + will be trimmed from the left when forwarded to the + component. For example, a component with + ``path=/api`` will have requests to ``/api/list`` + trimmed to ``/list``. If this value is ``true``"" , + the path will remain ``/api/list``. + } ], - "max_age": "str" # Optional. An - optional duration specifying how long browsers can cache the - results of a preflight request. This configures the - ``Access-Control-Max-Age`` header. - }, - "match": { - "authority": { - "exact": "str" # Required. - }, - "path": { - "prefix": "str" # - Prefix-based match. For example, ``/api`` will match - ``/api``"" , ``/api/``"" , and any nested paths such as - ``/api/v1/endpoint``. Required. - } - }, - "redirect": { - "authority": "str", # Optional. The - authority/host to redirect to. This can be a hostname or IP - address. Note: use ``port`` to set the port. - "port": 0, # Optional. The port to - redirect to. - "redirect_code": 0, # Optional. The - redirect code to use. Defaults to ``302``. Supported values - are 300, 301, 302, 303, 304, 307, 308. - "scheme": "str", # Optional. The - scheme to redirect to. Supported values are ``http`` or - ``https``. Default: ``https``. - "uri": "str" # Optional. An optional - URI path to redirect to. Note: if this is specified the whole - URI of the original request will be overwritten to this - value, irrespective of the original request URI being - matched. - } - } - ] - }, - "jobs": [ - { - "autoscaling": { - "max_instance_count": 0, # Optional. The - maximum amount of instances for this component. Must be more than - min_instance_count. - "metrics": { - "cpu": { - "percent": 80 # Optional. - Default value is 80. The average target CPU utilization - for the component. - } - }, - "min_instance_count": 0 # Optional. The - minimum amount of instances for this component. Must be less than - max_instance_count. - }, - "bitbucket": { - "branch": "str", # Optional. The name of the - branch to use. - "deploy_on_push": bool, # Optional. Whether - to automatically deploy new commits made to the repo. - "repo": "str" # Optional. The name of the - repo in the format owner/repo. Example: - ``digitalocean/sample-golang``. - }, - "build_command": "str", # Optional. An optional - build command to run while building this component from source. - "dockerfile_path": "str", # Optional. The path to - the Dockerfile relative to the root of the repo. If set, it will be - used to build this component. Otherwise, App Platform will attempt to - build it using buildpacks. - "environment_slug": "str", # Optional. An - environment slug describing the type of this app. For a full list, - please refer to `the product documentation - `_. - "envs": [ - { - "key": "str", # The variable name. - Required. - "scope": "RUN_AND_BUILD_TIME", # - Optional. Default value is "RUN_AND_BUILD_TIME". * RUN_TIME: - Made available only at run-time * BUILD_TIME: Made available - only at build-time * RUN_AND_BUILD_TIME: Made available at - both build and run-time. Known values are: "UNSET", - "RUN_TIME", "BUILD_TIME", and "RUN_AND_BUILD_TIME". - "type": "GENERAL", # Optional. - Default value is "GENERAL". * GENERAL: A plain-text - environment variable * SECRET: A secret encrypted environment - variable. Known values are: "GENERAL" and "SECRET". - "value": "str" # Optional. The - value. If the type is ``SECRET``"" , the value will be - encrypted on first submission. On following submissions, the - encrypted value should be used. + "source_dir": "str" # Optional. An + optional path to the working directory to use for the build. + For Dockerfile builds, this will be used as the build + context. Must be relative to the root of the repo. } ], - "git": { - "branch": "str", # Optional. The name of the - branch to use. - "repo_clone_url": "str" # Optional. The - clone URL of the repo. Example: - ``https://github.com/digitalocean/sample-golang.git``. - }, - "github": { - "branch": "str", # Optional. The name of the - branch to use. - "deploy_on_push": bool, # Optional. Whether - to automatically deploy new commits made to the repo. - "repo": "str" # Optional. The name of the - repo in the format owner/repo. Example: - ``digitalocean/sample-golang``. - }, - "gitlab": { - "branch": "str", # Optional. The name of the - branch to use. - "deploy_on_push": bool, # Optional. Whether - to automatically deploy new commits made to the repo. - "repo": "str" # Optional. The name of the - repo in the format owner/repo. Example: - ``digitalocean/sample-golang``. - }, - "image": { - "deploy_on_push": { - "enabled": bool # Optional. Whether - to automatically deploy new images. Can only be used for - images hosted in DOCR and can only be used with an image tag, - not a specific digest. - }, - "digest": "str", # Optional. The image - digest. Cannot be specified if tag is provided. - "registry": "str", # Optional. The registry - name. Must be left empty for the ``DOCR`` registry type. - "registry_credentials": "str", # Optional. - The credentials to be able to pull the image. The value will be - encrypted on first submission. On following submissions, the - encrypted value should be used. * "$username:$access_token" for - registries of type ``DOCKER_HUB``. * "$username:$access_token" - for registries of type ``GHCR``. - "registry_type": "str", # Optional. * - DOCKER_HUB: The DockerHub container registry type. * DOCR: The - DigitalOcean container registry type. * GHCR: The Github - container registry type. Known values are: "DOCKER_HUB", "DOCR", - and "GHCR". - "repository": "str", # Optional. The - repository name. - "tag": "latest" # Optional. Default value is - "latest". The repository tag. Defaults to ``latest`` if not - provided and no digest is provided. Cannot be specified if digest - is provided. + "ingress": { + "rules": [ + { + "component": { + "name": "str", # The + name of the component to route to. Required. + "preserve_path_prefix": "str", # Optional. An + optional flag to preserve the path that is forwarded + to the backend service. By default, the HTTP request + path will be trimmed from the left when forwarded to + the component. For example, a component with + ``path=/api`` will have requests to ``/api/list`` + trimmed to ``/list``. If this value is ``true``"" , + the path will remain ``/api/list``. Note: this is not + applicable for Functions Components and is mutually + exclusive with ``rewrite``. + "rewrite": "str" # + Optional. An optional field that will rewrite the + path of the component to be what is specified here. + By default, the HTTP request path will be trimmed + from the left when forwarded to the component. For + example, a component with ``path=/api`` will have + requests to ``/api/list`` trimmed to ``/list``. If + you specified the rewrite to be ``/v1/``"" , requests + to ``/api/list`` would be rewritten to ``/v1/list``. + Note: this is mutually exclusive with + ``preserve_path_prefix``. + }, + "cors": { + "allow_credentials": + bool, # Optional. Whether browsers should expose the + response to the client-side JavaScript code when the + request"u2019s credentials mode is include. This + configures the ``Access-Control-Allow-Credentials`` + header. + "allow_headers": [ + "str" # + Optional. The set of allowed HTTP request + headers. This configures the + ``Access-Control-Allow-Headers`` header. + ], + "allow_methods": [ + "str" # + Optional. The set of allowed HTTP methods. This + configures the ``Access-Control-Allow-Methods`` + header. + ], + "allow_origins": [ + { + "exact": "str", # Optional. Exact string + match. Only 1 of ``exact``"" , ``prefix``"" , + or ``regex`` must be set. + "prefix": "str", # Optional. Prefix-based + match. Only 1 of ``exact``"" , ``prefix``"" , + or ``regex`` must be set. + "regex": "str" # Optional. RE2 style + regex-based match. Only 1 of ``exact``"" , + ``prefix``"" , or ``regex`` must be set. For + more information about RE2 syntax, see: + https://github.com/google/re2/wiki/Syntax. + } + ], + "expose_headers": [ + "str" # + Optional. The set of HTTP response headers that + browsers are allowed to access. This configures + the ``Access-Control-Expose-Headers`` header. + ], + "max_age": "str" # + Optional. An optional duration specifying how long + browsers can cache the results of a preflight + request. This configures the + ``Access-Control-Max-Age`` header. + }, + "match": { + "authority": { + "exact": + "str" # Required. + }, + "path": { + "prefix": + "str" # Prefix-based match. For example, + ``/api`` will match ``/api``"" , ``/api/``"" , + and any nested paths such as + ``/api/v1/endpoint``. Required. + } + }, + "redirect": { + "authority": "str", + # Optional. The authority/host to redirect to. This + can be a hostname or IP address. Note: use ``port`` + to set the port. + "port": 0, # + Optional. The port to redirect to. + "redirect_code": 0, + # Optional. The redirect code to use. Defaults to + ``302``. Supported values are 300, 301, 302, 303, + 304, 307, 308. + "scheme": "str", # + Optional. The scheme to redirect to. Supported values + are ``http`` or ``https``. Default: ``https``. + "uri": "str" # + Optional. An optional URI path to redirect to. Note: + if this is specified the whole URI of the original + request will be overwritten to this value, + irrespective of the original request URI being + matched. + } + } + ] }, - "instance_count": 1, # Optional. Default value is 1. - The amount of instances that this component should be scaled to. - Default: 1. Must not be set if autoscaling is used. - "instance_size_slug": {}, - "kind": "UNSPECIFIED", # Optional. Default value is - "UNSPECIFIED". * UNSPECIFIED: Default job type, will auto-complete to - POST_DEPLOY kind. * PRE_DEPLOY: Indicates a job that runs before an - app deployment. * POST_DEPLOY: Indicates a job that runs after an app - deployment. * FAILED_DEPLOY: Indicates a job that runs after a - component fails to deploy. Known values are: "UNSPECIFIED", - "PRE_DEPLOY", "POST_DEPLOY", and "FAILED_DEPLOY". - "log_destinations": [ + "jobs": [ { - "name": "str", # Required. - "datadog": { - "api_key": "str", # Datadog - API key. Required. - "endpoint": "str" # - Optional. Datadog HTTP log intake endpoint. + "autoscaling": { + "max_instance_count": 0, # + Optional. The maximum amount of instances for this + component. Must be more than min_instance_count. + "metrics": { + "cpu": { + "percent": 80 + # Optional. Default value is 80. The average + target CPU utilization for the component. + } + }, + "min_instance_count": 0 # + Optional. The minimum amount of instances for this + component. Must be less than max_instance_count. }, - "logtail": { - "token": "str" # Optional. - Logtail token. + "bitbucket": { + "branch": "str", # Optional. + The name of the branch to use. + "deploy_on_push": bool, # + Optional. Whether to automatically deploy new commits + made to the repo. + "repo": "str" # Optional. + The name of the repo in the format owner/repo. Example: + ``digitalocean/sample-golang``. }, - "open_search": { - "basic_auth": { - "password": "str", # - Optional. Password for user defined in User. Is - required when ``endpoint`` is set. Cannot be set if - using a DigitalOcean DBaaS OpenSearch cluster. - "user": "str" # - Optional. Username to authenticate with. Only - required when ``endpoint`` is set. Defaults to - ``doadmin`` when ``cluster_name`` is set. + "build_command": "str", # Optional. + An optional build command to run while building this + component from source. + "dockerfile_path": "str", # + Optional. The path to the Dockerfile relative to the root of + the repo. If set, it will be used to build this component. + Otherwise, App Platform will attempt to build it using + buildpacks. + "environment_slug": "str", # + Optional. An environment slug describing the type of this + app. For a full list, please refer to `the product + documentation + `_. + "envs": [ + { + "key": "str", # The + variable name. Required. + "scope": + "RUN_AND_BUILD_TIME", # Optional. Default value is + "RUN_AND_BUILD_TIME". * RUN_TIME: Made available only + at run-time * BUILD_TIME: Made available only at + build-time * RUN_AND_BUILD_TIME: Made available at + both build and run-time. Known values are: "UNSET", + "RUN_TIME", "BUILD_TIME", and "RUN_AND_BUILD_TIME". + "type": "GENERAL", # + Optional. Default value is "GENERAL". * GENERAL: A + plain-text environment variable * SECRET: A secret + encrypted environment variable. Known values are: + "GENERAL" and "SECRET". + "value": "str" # + Optional. The value. If the type is ``SECRET``"" , + the value will be encrypted on first submission. On + following submissions, the encrypted value should be + used. + } + ], + "git": { + "branch": "str", # Optional. + The name of the branch to use. + "repo_clone_url": "str" # + Optional. The clone URL of the repo. Example: + ``https://github.com/digitalocean/sample-golang.git``. + }, + "github": { + "branch": "str", # Optional. + The name of the branch to use. + "deploy_on_push": bool, # + Optional. Whether to automatically deploy new commits + made to the repo. + "repo": "str" # Optional. + The name of the repo in the format owner/repo. Example: + ``digitalocean/sample-golang``. + }, + "gitlab": { + "branch": "str", # Optional. + The name of the branch to use. + "deploy_on_push": bool, # + Optional. Whether to automatically deploy new commits + made to the repo. + "repo": "str" # Optional. + The name of the repo in the format owner/repo. Example: + ``digitalocean/sample-golang``. + }, + "image": { + "deploy_on_push": { + "enabled": bool # + Optional. Whether to automatically deploy new images. + Can only be used for images hosted in DOCR and can + only be used with an image tag, not a specific + digest. }, - "cluster_name": "str", # - Optional. The name of a DigitalOcean DBaaS OpenSearch - cluster to use as a log forwarding destination. Cannot be - specified if ``endpoint`` is also specified. - "endpoint": "str", # - Optional. OpenSearch API Endpoint. Only HTTPS is - supported. Format: https://:code:``::code:``. - Cannot be specified if ``cluster_name`` is also - specified. - "index_name": "logs" # - Optional. Default value is "logs". The index name to use - for the logs. If not set, the default index name is - "logs". + "digest": "str", # Optional. + The image digest. Cannot be specified if tag is provided. + "registry": "str", # + Optional. The registry name. Must be left empty for the + ``DOCR`` registry type. + "registry_credentials": + "str", # Optional. The credentials to be able to pull + the image. The value will be encrypted on first + submission. On following submissions, the encrypted value + should be used. * "$username:$access_token" for + registries of type ``DOCKER_HUB``. * + "$username:$access_token" for registries of type + ``GHCR``. + "registry_type": "str", # + Optional. * DOCKER_HUB: The DockerHub container registry + type. * DOCR: The DigitalOcean container registry type. * + GHCR: The Github container registry type. Known values + are: "DOCKER_HUB", "DOCR", and "GHCR". + "repository": "str", # + Optional. The repository name. + "tag": "latest" # Optional. + Default value is "latest". The repository tag. Defaults + to ``latest`` if not provided and no digest is provided. + Cannot be specified if digest is provided. }, - "papertrail": { - "endpoint": "str" # - Papertrail syslog endpoint. Required. - } - } - ], - "name": "str", # Optional. The name. Must be unique - across all components within the same app. - "run_command": "str", # Optional. An optional run - command to override the component's default. - "source_dir": "str", # Optional. An optional path to - the working directory to use for the build. For Dockerfile builds, - this will be used as the build context. Must be relative to the root - of the repo. - "termination": { - "grace_period_seconds": 0 # Optional. The - number of seconds to wait between sending a TERM signal to a - container and issuing a KILL which causes immediate shutdown. - (Default 120). - } - } - ], - "maintenance": { - "archive": bool, # Optional. Indicates whether the app - should be archived. Setting this to true implies that enabled is set to - true. - "enabled": bool, # Optional. Indicates whether maintenance - mode should be enabled for the app. - "offline_page_url": "str" # Optional. A custom offline page - to display when maintenance mode is enabled or the app is archived. - }, - "region": "str", # Optional. The slug form of the geographical - origin of the app. Default: ``nearest available``. Known values are: "atl", - "nyc", "sfo", "tor", "ams", "fra", "lon", "blr", "sgp", and "syd". - "services": [ - { - "autoscaling": { - "max_instance_count": 0, # Optional. The - maximum amount of instances for this component. Must be more than - min_instance_count. - "metrics": { - "cpu": { - "percent": 80 # Optional. - Default value is 80. The average target CPU utilization - for the component. - } - }, - "min_instance_count": 0 # Optional. The - minimum amount of instances for this component. Must be less than - max_instance_count. - }, - "bitbucket": { - "branch": "str", # Optional. The name of the - branch to use. - "deploy_on_push": bool, # Optional. Whether - to automatically deploy new commits made to the repo. - "repo": "str" # Optional. The name of the - repo in the format owner/repo. Example: - ``digitalocean/sample-golang``. - }, - "build_command": "str", # Optional. An optional - build command to run while building this component from source. - "cors": { - "allow_credentials": bool, # Optional. - Whether browsers should expose the response to the client-side - JavaScript code when the request"u2019s credentials mode is - include. This configures the ``Access-Control-Allow-Credentials`` - header. - "allow_headers": [ - "str" # Optional. The set of allowed - HTTP request headers. This configures the - ``Access-Control-Allow-Headers`` header. - ], - "allow_methods": [ - "str" # Optional. The set of allowed - HTTP methods. This configures the - ``Access-Control-Allow-Methods`` header. - ], - "allow_origins": [ - { - "exact": "str", # Optional. - Exact string match. Only 1 of ``exact``"" , ``prefix``"" - , or ``regex`` must be set. - "prefix": "str", # Optional. - Prefix-based match. Only 1 of ``exact``"" , ``prefix``"" - , or ``regex`` must be set. - "regex": "str" # Optional. - RE2 style regex-based match. Only 1 of ``exact``"" , - ``prefix``"" , or ``regex`` must be set. For more - information about RE2 syntax, see: - https://github.com/google/re2/wiki/Syntax. + "instance_count": 1, # Optional. + Default value is 1. The amount of instances that this + component should be scaled to. Default: 1. Must not be set if + autoscaling is used. + "instance_size_slug": {}, + "kind": "UNSPECIFIED", # Optional. + Default value is "UNSPECIFIED". * UNSPECIFIED: Default job + type, will auto-complete to POST_DEPLOY kind. * PRE_DEPLOY: + Indicates a job that runs before an app deployment. * + POST_DEPLOY: Indicates a job that runs after an app + deployment. * FAILED_DEPLOY: Indicates a job that runs after + a component fails to deploy. Known values are: "UNSPECIFIED", + "PRE_DEPLOY", "POST_DEPLOY", and "FAILED_DEPLOY". + "log_destinations": [ + { + "name": "str", # + Required. + "datadog": { + "api_key": + "str", # Datadog API key. Required. + "endpoint": + "str" # Optional. Datadog HTTP log intake + endpoint. + }, + "logtail": { + "token": + "str" # Optional. Logtail token. + }, + "open_search": { + "basic_auth": + { + "password": "str", # Optional. Password for + user defined in User. Is required when + ``endpoint`` is set. Cannot be set if using a + DigitalOcean DBaaS OpenSearch cluster. + "user": "str" # Optional. Username to + authenticate with. Only required when + ``endpoint`` is set. Defaults to ``doadmin`` + when ``cluster_name`` is set. + }, + "cluster_name": "str", # Optional. The name of a + DigitalOcean DBaaS OpenSearch cluster to use as a + log forwarding destination. Cannot be specified + if ``endpoint`` is also specified. + "endpoint": + "str", # Optional. OpenSearch API Endpoint. Only + HTTPS is supported. Format: + https://:code:``::code:``. Cannot be + specified if ``cluster_name`` is also specified. + "index_name": + "logs" # Optional. Default value is "logs". The + index name to use for the logs. If not set, the + default index name is "logs". + }, + "papertrail": { + "endpoint": + "str" # Papertrail syslog endpoint. Required. + } + } + ], + "name": "str", # Optional. The name. + Must be unique across all components within the same app. + "run_command": "str", # Optional. An + optional run command to override the component's default. + "source_dir": "str", # Optional. An + optional path to the working directory to use for the build. + For Dockerfile builds, this will be used as the build + context. Must be relative to the root of the repo. + "termination": { + "grace_period_seconds": 0 # + Optional. The number of seconds to wait between sending a + TERM signal to a container and issuing a KILL which + causes immediate shutdown. (Default 120). } - ], - "expose_headers": [ - "str" # Optional. The set of HTTP - response headers that browsers are allowed to access. This - configures the ``Access-Control-Expose-Headers`` header. - ], - "max_age": "str" # Optional. An optional - duration specifying how long browsers can cache the results of a - preflight request. This configures the ``Access-Control-Max-Age`` - header. - }, - "dockerfile_path": "str", # Optional. The path to - the Dockerfile relative to the root of the repo. If set, it will be - used to build this component. Otherwise, App Platform will attempt to - build it using buildpacks. - "environment_slug": "str", # Optional. An - environment slug describing the type of this app. For a full list, - please refer to `the product documentation - `_. - "envs": [ - { - "key": "str", # The variable name. - Required. - "scope": "RUN_AND_BUILD_TIME", # - Optional. Default value is "RUN_AND_BUILD_TIME". * RUN_TIME: - Made available only at run-time * BUILD_TIME: Made available - only at build-time * RUN_AND_BUILD_TIME: Made available at - both build and run-time. Known values are: "UNSET", - "RUN_TIME", "BUILD_TIME", and "RUN_AND_BUILD_TIME". - "type": "GENERAL", # Optional. - Default value is "GENERAL". * GENERAL: A plain-text - environment variable * SECRET: A secret encrypted environment - variable. Known values are: "GENERAL" and "SECRET". - "value": "str" # Optional. The - value. If the type is ``SECRET``"" , the value will be - encrypted on first submission. On following submissions, the - encrypted value should be used. } ], - "git": { - "branch": "str", # Optional. The name of the - branch to use. - "repo_clone_url": "str" # Optional. The - clone URL of the repo. Example: - ``https://github.com/digitalocean/sample-golang.git``. - }, - "github": { - "branch": "str", # Optional. The name of the - branch to use. - "deploy_on_push": bool, # Optional. Whether - to automatically deploy new commits made to the repo. - "repo": "str" # Optional. The name of the - repo in the format owner/repo. Example: - ``digitalocean/sample-golang``. - }, - "gitlab": { - "branch": "str", # Optional. The name of the - branch to use. - "deploy_on_push": bool, # Optional. Whether - to automatically deploy new commits made to the repo. - "repo": "str" # Optional. The name of the - repo in the format owner/repo. Example: - ``digitalocean/sample-golang``. - }, - "health_check": { - "failure_threshold": 0, # Optional. The - number of failed health checks before considered unhealthy. - "http_path": "str", # Optional. The route - path used for the HTTP health check ping. If not set, the HTTP - health check will be disabled and a TCP health check used - instead. - "initial_delay_seconds": 0, # Optional. The - number of seconds to wait before beginning health checks. - "period_seconds": 0, # Optional. The number - of seconds to wait between health checks. - "port": 0, # Optional. The port on which the - health check will be performed. If not set, the health check will - be performed on the component's http_port. - "success_threshold": 0, # Optional. The - number of successful health checks before considered healthy. - "timeout_seconds": 0 # Optional. The number - of seconds after which the check times out. - }, - "http_port": 0, # Optional. The internal port on - which this service's run command will listen. Default: 8080 If there - is not an environment variable with the name ``PORT``"" , one will be - automatically added with its value set to the value of this field. - "image": { - "deploy_on_push": { - "enabled": bool # Optional. Whether - to automatically deploy new images. Can only be used for - images hosted in DOCR and can only be used with an image tag, - not a specific digest. - }, - "digest": "str", # Optional. The image - digest. Cannot be specified if tag is provided. - "registry": "str", # Optional. The registry - name. Must be left empty for the ``DOCR`` registry type. - "registry_credentials": "str", # Optional. - The credentials to be able to pull the image. The value will be - encrypted on first submission. On following submissions, the - encrypted value should be used. * "$username:$access_token" for - registries of type ``DOCKER_HUB``. * "$username:$access_token" - for registries of type ``GHCR``. - "registry_type": "str", # Optional. * - DOCKER_HUB: The DockerHub container registry type. * DOCR: The - DigitalOcean container registry type. * GHCR: The Github - container registry type. Known values are: "DOCKER_HUB", "DOCR", - and "GHCR". - "repository": "str", # Optional. The - repository name. - "tag": "latest" # Optional. Default value is - "latest". The repository tag. Defaults to ``latest`` if not - provided and no digest is provided. Cannot be specified if digest - is provided. - }, - "instance_count": 1, # Optional. Default value is 1. - The amount of instances that this component should be scaled to. - Default: 1. Must not be set if autoscaling is used. - "instance_size_slug": {}, - "internal_ports": [ - 0 # Optional. The ports on which this - service will listen for internal traffic. - ], - "liveness_health_check": { - "failure_threshold": 0, # Optional. The - number of failed health checks before considered unhealthy. - "http_path": "str", # Optional. The route - path used for the HTTP health check ping. If not set, the HTTP - health check will be disabled and a TCP health check used - instead. - "initial_delay_seconds": 0, # Optional. The - number of seconds to wait before beginning health checks. - "period_seconds": 0, # Optional. The number - of seconds to wait between health checks. - "port": 0, # Optional. The port on which the - health check will be performed. - "success_threshold": 0, # Optional. The - number of successful health checks before considered healthy. - "timeout_seconds": 0 # Optional. The number - of seconds after which the check times out. + "maintenance": { + "archive": bool, # Optional. Indicates + whether the app should be archived. Setting this to true implies + that enabled is set to true. + "enabled": bool, # Optional. Indicates + whether maintenance mode should be enabled for the app. + "offline_page_url": "str" # Optional. A + custom offline page to display when maintenance mode is enabled + or the app is archived. }, - "log_destinations": [ + "region": "str", # Optional. The slug form of the + geographical origin of the app. Default: ``nearest available``. Known + values are: "atl", "nyc", "sfo", "tor", "ams", "fra", "lon", "blr", + "sgp", and "syd". + "services": [ { - "name": "str", # Required. - "datadog": { - "api_key": "str", # Datadog - API key. Required. - "endpoint": "str" # - Optional. Datadog HTTP log intake endpoint. + "autoscaling": { + "max_instance_count": 0, # + Optional. The maximum amount of instances for this + component. Must be more than min_instance_count. + "metrics": { + "cpu": { + "percent": 80 + # Optional. Default value is 80. The average + target CPU utilization for the component. + } + }, + "min_instance_count": 0 # + Optional. The minimum amount of instances for this + component. Must be less than max_instance_count. }, - "logtail": { - "token": "str" # Optional. - Logtail token. + "bitbucket": { + "branch": "str", # Optional. + The name of the branch to use. + "deploy_on_push": bool, # + Optional. Whether to automatically deploy new commits + made to the repo. + "repo": "str" # Optional. + The name of the repo in the format owner/repo. Example: + ``digitalocean/sample-golang``. }, - "open_search": { - "basic_auth": { - "password": "str", # - Optional. Password for user defined in User. Is - required when ``endpoint`` is set. Cannot be set if - using a DigitalOcean DBaaS OpenSearch cluster. - "user": "str" # - Optional. Username to authenticate with. Only - required when ``endpoint`` is set. Defaults to - ``doadmin`` when ``cluster_name`` is set. + "build_command": "str", # Optional. + An optional build command to run while building this + component from source. + "cors": { + "allow_credentials": bool, # + Optional. Whether browsers should expose the response to + the client-side JavaScript code when the request"u2019s + credentials mode is include. This configures the + ``Access-Control-Allow-Credentials`` header. + "allow_headers": [ + "str" # Optional. + The set of allowed HTTP request headers. This + configures the ``Access-Control-Allow-Headers`` + header. + ], + "allow_methods": [ + "str" # Optional. + The set of allowed HTTP methods. This configures the + ``Access-Control-Allow-Methods`` header. + ], + "allow_origins": [ + { + "exact": + "str", # Optional. Exact string match. Only 1 of + ``exact``"" , ``prefix``"" , or ``regex`` must be + set. + "prefix": + "str", # Optional. Prefix-based match. Only 1 of + ``exact``"" , ``prefix``"" , or ``regex`` must be + set. + "regex": + "str" # Optional. RE2 style regex-based match. + Only 1 of ``exact``"" , ``prefix``"" , or + ``regex`` must be set. For more information about + RE2 syntax, see: + https://github.com/google/re2/wiki/Syntax. + } + ], + "expose_headers": [ + "str" # Optional. + The set of HTTP response headers that browsers are + allowed to access. This configures the + ``Access-Control-Expose-Headers`` header. + ], + "max_age": "str" # Optional. + An optional duration specifying how long browsers can + cache the results of a preflight request. This configures + the ``Access-Control-Max-Age`` header. + }, + "dockerfile_path": "str", # + Optional. The path to the Dockerfile relative to the root of + the repo. If set, it will be used to build this component. + Otherwise, App Platform will attempt to build it using + buildpacks. + "environment_slug": "str", # + Optional. An environment slug describing the type of this + app. For a full list, please refer to `the product + documentation + `_. + "envs": [ + { + "key": "str", # The + variable name. Required. + "scope": + "RUN_AND_BUILD_TIME", # Optional. Default value is + "RUN_AND_BUILD_TIME". * RUN_TIME: Made available only + at run-time * BUILD_TIME: Made available only at + build-time * RUN_AND_BUILD_TIME: Made available at + both build and run-time. Known values are: "UNSET", + "RUN_TIME", "BUILD_TIME", and "RUN_AND_BUILD_TIME". + "type": "GENERAL", # + Optional. Default value is "GENERAL". * GENERAL: A + plain-text environment variable * SECRET: A secret + encrypted environment variable. Known values are: + "GENERAL" and "SECRET". + "value": "str" # + Optional. The value. If the type is ``SECRET``"" , + the value will be encrypted on first submission. On + following submissions, the encrypted value should be + used. + } + ], + "git": { + "branch": "str", # Optional. + The name of the branch to use. + "repo_clone_url": "str" # + Optional. The clone URL of the repo. Example: + ``https://github.com/digitalocean/sample-golang.git``. + }, + "github": { + "branch": "str", # Optional. + The name of the branch to use. + "deploy_on_push": bool, # + Optional. Whether to automatically deploy new commits + made to the repo. + "repo": "str" # Optional. + The name of the repo in the format owner/repo. Example: + ``digitalocean/sample-golang``. + }, + "gitlab": { + "branch": "str", # Optional. + The name of the branch to use. + "deploy_on_push": bool, # + Optional. Whether to automatically deploy new commits + made to the repo. + "repo": "str" # Optional. + The name of the repo in the format owner/repo. Example: + ``digitalocean/sample-golang``. + }, + "health_check": { + "failure_threshold": 0, # + Optional. The number of failed health checks before + considered unhealthy. + "http_path": "str", # + Optional. The route path used for the HTTP health check + ping. If not set, the HTTP health check will be disabled + and a TCP health check used instead. + "initial_delay_seconds": 0, + # Optional. The number of seconds to wait before + beginning health checks. + "period_seconds": 0, # + Optional. The number of seconds to wait between health + checks. + "port": 0, # Optional. The + port on which the health check will be performed. If not + set, the health check will be performed on the + component's http_port. + "success_threshold": 0, # + Optional. The number of successful health checks before + considered healthy. + "timeout_seconds": 0 # + Optional. The number of seconds after which the check + times out. + }, + "http_port": 0, # Optional. The + internal port on which this service's run command will + listen. Default: 8080 If there is not an environment variable + with the name ``PORT``"" , one will be automatically added + with its value set to the value of this field. + "image": { + "deploy_on_push": { + "enabled": bool # + Optional. Whether to automatically deploy new images. + Can only be used for images hosted in DOCR and can + only be used with an image tag, not a specific + digest. }, - "cluster_name": "str", # - Optional. The name of a DigitalOcean DBaaS OpenSearch - cluster to use as a log forwarding destination. Cannot be - specified if ``endpoint`` is also specified. - "endpoint": "str", # - Optional. OpenSearch API Endpoint. Only HTTPS is - supported. Format: https://:code:``::code:``. - Cannot be specified if ``cluster_name`` is also - specified. - "index_name": "logs" # - Optional. Default value is "logs". The index name to use - for the logs. If not set, the default index name is - "logs". + "digest": "str", # Optional. + The image digest. Cannot be specified if tag is provided. + "registry": "str", # + Optional. The registry name. Must be left empty for the + ``DOCR`` registry type. + "registry_credentials": + "str", # Optional. The credentials to be able to pull + the image. The value will be encrypted on first + submission. On following submissions, the encrypted value + should be used. * "$username:$access_token" for + registries of type ``DOCKER_HUB``. * + "$username:$access_token" for registries of type + ``GHCR``. + "registry_type": "str", # + Optional. * DOCKER_HUB: The DockerHub container registry + type. * DOCR: The DigitalOcean container registry type. * + GHCR: The Github container registry type. Known values + are: "DOCKER_HUB", "DOCR", and "GHCR". + "repository": "str", # + Optional. The repository name. + "tag": "latest" # Optional. + Default value is "latest". The repository tag. Defaults + to ``latest`` if not provided and no digest is provided. + Cannot be specified if digest is provided. }, - "papertrail": { - "endpoint": "str" # - Papertrail syslog endpoint. Required. + "instance_count": 1, # Optional. + Default value is 1. The amount of instances that this + component should be scaled to. Default: 1. Must not be set if + autoscaling is used. + "instance_size_slug": {}, + "internal_ports": [ + 0 # Optional. The ports on + which this service will listen for internal traffic. + ], + "liveness_health_check": { + "failure_threshold": 0, # + Optional. The number of failed health checks before + considered unhealthy. + "http_path": "str", # + Optional. The route path used for the HTTP health check + ping. If not set, the HTTP health check will be disabled + and a TCP health check used instead. + "initial_delay_seconds": 0, + # Optional. The number of seconds to wait before + beginning health checks. + "period_seconds": 0, # + Optional. The number of seconds to wait between health + checks. + "port": 0, # Optional. The + port on which the health check will be performed. + "success_threshold": 0, # + Optional. The number of successful health checks before + considered healthy. + "timeout_seconds": 0 # + Optional. The number of seconds after which the check + times out. + }, + "log_destinations": [ + { + "name": "str", # + Required. + "datadog": { + "api_key": + "str", # Datadog API key. Required. + "endpoint": + "str" # Optional. Datadog HTTP log intake + endpoint. + }, + "logtail": { + "token": + "str" # Optional. Logtail token. + }, + "open_search": { + "basic_auth": + { + "password": "str", # Optional. Password for + user defined in User. Is required when + ``endpoint`` is set. Cannot be set if using a + DigitalOcean DBaaS OpenSearch cluster. + "user": "str" # Optional. Username to + authenticate with. Only required when + ``endpoint`` is set. Defaults to ``doadmin`` + when ``cluster_name`` is set. + }, + "cluster_name": "str", # Optional. The name of a + DigitalOcean DBaaS OpenSearch cluster to use as a + log forwarding destination. Cannot be specified + if ``endpoint`` is also specified. + "endpoint": + "str", # Optional. OpenSearch API Endpoint. Only + HTTPS is supported. Format: + https://:code:``::code:``. Cannot be + specified if ``cluster_name`` is also specified. + "index_name": + "logs" # Optional. Default value is "logs". The + index name to use for the logs. If not set, the + default index name is "logs". + }, + "papertrail": { + "endpoint": + "str" # Papertrail syslog endpoint. Required. + } + } + ], + "name": "str", # Optional. The name. + Must be unique across all components within the same app. + "protocol": "str", # Optional. The + protocol which the service uses to serve traffic on the + http_port. * ``HTTP``"" : The app is serving the HTTP + protocol. Default. * ``HTTP2``"" : The app is serving the + HTTP/2 protocol. Currently, this needs to be implemented in + the service by serving HTTP/2 cleartext (h2c). Known values + are: "HTTP" and "HTTP2". + "routes": [ + { + "path": "str", # + Optional. (Deprecated - Use Ingress Rules instead). + An HTTP path prefix. Paths must start with / and must + be unique across all components within an app. + "preserve_path_prefix": bool # Optional. An optional + flag to preserve the path that is forwarded to the + backend service. By default, the HTTP request path + will be trimmed from the left when forwarded to the + component. For example, a component with + ``path=/api`` will have requests to ``/api/list`` + trimmed to ``/list``. If this value is ``true``"" , + the path will remain ``/api/list``. + } + ], + "run_command": "str", # Optional. An + optional run command to override the component's default. + "source_dir": "str", # Optional. An + optional path to the working directory to use for the build. + For Dockerfile builds, this will be used as the build + context. Must be relative to the root of the repo. + "termination": { + "drain_seconds": 0, # + Optional. The number of seconds to wait between selecting + a container instance for termination and issuing the TERM + signal. Selecting a container instance for termination + begins an asynchronous drain of new requests on upstream + load-balancers. (Default 15). + "grace_period_seconds": 0 # + Optional. The number of seconds to wait between sending a + TERM signal to a container and issuing a KILL which + causes immediate shutdown. (Default 120). } } ], - "name": "str", # Optional. The name. Must be unique - across all components within the same app. - "protocol": "str", # Optional. The protocol which - the service uses to serve traffic on the http_port. * ``HTTP``"" : - The app is serving the HTTP protocol. Default. * ``HTTP2``"" : The - app is serving the HTTP/2 protocol. Currently, this needs to be - implemented in the service by serving HTTP/2 cleartext (h2c). Known - values are: "HTTP" and "HTTP2". - "routes": [ + "static_sites": [ { - "path": "str", # Optional. - (Deprecated - Use Ingress Rules instead). An HTTP path - prefix. Paths must start with / and must be unique across all - components within an app. - "preserve_path_prefix": bool # - Optional. An optional flag to preserve the path that is - forwarded to the backend service. By default, the HTTP - request path will be trimmed from the left when forwarded to - the component. For example, a component with ``path=/api`` - will have requests to ``/api/list`` trimmed to ``/list``. If - this value is ``true``"" , the path will remain - ``/api/list``. + "bitbucket": { + "branch": "str", # Optional. + The name of the branch to use. + "deploy_on_push": bool, # + Optional. Whether to automatically deploy new commits + made to the repo. + "repo": "str" # Optional. + The name of the repo in the format owner/repo. Example: + ``digitalocean/sample-golang``. + }, + "build_command": "str", # Optional. + An optional build command to run while building this + component from source. + "catchall_document": "str", # + Optional. The name of the document to use as the fallback for + any requests to documents that are not found when serving + this static site. Only 1 of ``catchall_document`` or + ``error_document`` can be set. + "cors": { + "allow_credentials": bool, # + Optional. Whether browsers should expose the response to + the client-side JavaScript code when the request"u2019s + credentials mode is include. This configures the + ``Access-Control-Allow-Credentials`` header. + "allow_headers": [ + "str" # Optional. + The set of allowed HTTP request headers. This + configures the ``Access-Control-Allow-Headers`` + header. + ], + "allow_methods": [ + "str" # Optional. + The set of allowed HTTP methods. This configures the + ``Access-Control-Allow-Methods`` header. + ], + "allow_origins": [ + { + "exact": + "str", # Optional. Exact string match. Only 1 of + ``exact``"" , ``prefix``"" , or ``regex`` must be + set. + "prefix": + "str", # Optional. Prefix-based match. Only 1 of + ``exact``"" , ``prefix``"" , or ``regex`` must be + set. + "regex": + "str" # Optional. RE2 style regex-based match. + Only 1 of ``exact``"" , ``prefix``"" , or + ``regex`` must be set. For more information about + RE2 syntax, see: + https://github.com/google/re2/wiki/Syntax. + } + ], + "expose_headers": [ + "str" # Optional. + The set of HTTP response headers that browsers are + allowed to access. This configures the + ``Access-Control-Expose-Headers`` header. + ], + "max_age": "str" # Optional. + An optional duration specifying how long browsers can + cache the results of a preflight request. This configures + the ``Access-Control-Max-Age`` header. + }, + "dockerfile_path": "str", # + Optional. The path to the Dockerfile relative to the root of + the repo. If set, it will be used to build this component. + Otherwise, App Platform will attempt to build it using + buildpacks. + "environment_slug": "str", # + Optional. An environment slug describing the type of this + app. For a full list, please refer to `the product + documentation + `_. + "envs": [ + { + "key": "str", # The + variable name. Required. + "scope": + "RUN_AND_BUILD_TIME", # Optional. Default value is + "RUN_AND_BUILD_TIME". * RUN_TIME: Made available only + at run-time * BUILD_TIME: Made available only at + build-time * RUN_AND_BUILD_TIME: Made available at + both build and run-time. Known values are: "UNSET", + "RUN_TIME", "BUILD_TIME", and "RUN_AND_BUILD_TIME". + "type": "GENERAL", # + Optional. Default value is "GENERAL". * GENERAL: A + plain-text environment variable * SECRET: A secret + encrypted environment variable. Known values are: + "GENERAL" and "SECRET". + "value": "str" # + Optional. The value. If the type is ``SECRET``"" , + the value will be encrypted on first submission. On + following submissions, the encrypted value should be + used. + } + ], + "error_document": "404.html", # + Optional. Default value is "404.html". The name of the error + document to use when serving this static site. Default: + 404.html. If no such file exists within the built assets, App + Platform will supply one. + "git": { + "branch": "str", # Optional. + The name of the branch to use. + "repo_clone_url": "str" # + Optional. The clone URL of the repo. Example: + ``https://github.com/digitalocean/sample-golang.git``. + }, + "github": { + "branch": "str", # Optional. + The name of the branch to use. + "deploy_on_push": bool, # + Optional. Whether to automatically deploy new commits + made to the repo. + "repo": "str" # Optional. + The name of the repo in the format owner/repo. Example: + ``digitalocean/sample-golang``. + }, + "gitlab": { + "branch": "str", # Optional. + The name of the branch to use. + "deploy_on_push": bool, # + Optional. Whether to automatically deploy new commits + made to the repo. + "repo": "str" # Optional. + The name of the repo in the format owner/repo. Example: + ``digitalocean/sample-golang``. + }, + "image": { + "deploy_on_push": { + "enabled": bool # + Optional. Whether to automatically deploy new images. + Can only be used for images hosted in DOCR and can + only be used with an image tag, not a specific + digest. + }, + "digest": "str", # Optional. + The image digest. Cannot be specified if tag is provided. + "registry": "str", # + Optional. The registry name. Must be left empty for the + ``DOCR`` registry type. + "registry_credentials": + "str", # Optional. The credentials to be able to pull + the image. The value will be encrypted on first + submission. On following submissions, the encrypted value + should be used. * "$username:$access_token" for + registries of type ``DOCKER_HUB``. * + "$username:$access_token" for registries of type + ``GHCR``. + "registry_type": "str", # + Optional. * DOCKER_HUB: The DockerHub container registry + type. * DOCR: The DigitalOcean container registry type. * + GHCR: The Github container registry type. Known values + are: "DOCKER_HUB", "DOCR", and "GHCR". + "repository": "str", # + Optional. The repository name. + "tag": "latest" # Optional. + Default value is "latest". The repository tag. Defaults + to ``latest`` if not provided and no digest is provided. + Cannot be specified if digest is provided. + }, + "index_document": "index.html", # + Optional. Default value is "index.html". The name of the + index document to use when serving this static site. Default: + index.html. + "log_destinations": [ + { + "name": "str", # + Required. + "datadog": { + "api_key": + "str", # Datadog API key. Required. + "endpoint": + "str" # Optional. Datadog HTTP log intake + endpoint. + }, + "logtail": { + "token": + "str" # Optional. Logtail token. + }, + "open_search": { + "basic_auth": + { + "password": "str", # Optional. Password for + user defined in User. Is required when + ``endpoint`` is set. Cannot be set if using a + DigitalOcean DBaaS OpenSearch cluster. + "user": "str" # Optional. Username to + authenticate with. Only required when + ``endpoint`` is set. Defaults to ``doadmin`` + when ``cluster_name`` is set. + }, + "cluster_name": "str", # Optional. The name of a + DigitalOcean DBaaS OpenSearch cluster to use as a + log forwarding destination. Cannot be specified + if ``endpoint`` is also specified. + "endpoint": + "str", # Optional. OpenSearch API Endpoint. Only + HTTPS is supported. Format: + https://:code:``::code:``. Cannot be + specified if ``cluster_name`` is also specified. + "index_name": + "logs" # Optional. Default value is "logs". The + index name to use for the logs. If not set, the + default index name is "logs". + }, + "papertrail": { + "endpoint": + "str" # Papertrail syslog endpoint. Required. + } + } + ], + "name": "str", # Optional. The name. + Must be unique across all components within the same app. + "output_dir": "str", # Optional. An + optional path to where the built assets will be located, + relative to the build context. If not set, App Platform will + automatically scan for these directory names: ``_static``"" , + ``dist``"" , ``public``"" , ``build``. + "routes": [ + { + "path": "str", # + Optional. (Deprecated - Use Ingress Rules instead). + An HTTP path prefix. Paths must start with / and must + be unique across all components within an app. + "preserve_path_prefix": bool # Optional. An optional + flag to preserve the path that is forwarded to the + backend service. By default, the HTTP request path + will be trimmed from the left when forwarded to the + component. For example, a component with + ``path=/api`` will have requests to ``/api/list`` + trimmed to ``/list``. If this value is ``true``"" , + the path will remain ``/api/list``. + } + ], + "run_command": "str", # Optional. An + optional run command to override the component's default. + "source_dir": "str" # Optional. An + optional path to the working directory to use for the build. + For Dockerfile builds, this will be used as the build + context. Must be relative to the root of the repo. } ], - "run_command": "str", # Optional. An optional run - command to override the component's default. - "source_dir": "str", # Optional. An optional path to - the working directory to use for the build. For Dockerfile builds, - this will be used as the build context. Must be relative to the root - of the repo. - "termination": { - "drain_seconds": 0, # Optional. The number - of seconds to wait between selecting a container instance for - termination and issuing the TERM signal. Selecting a container - instance for termination begins an asynchronous drain of new - requests on upstream load-balancers. (Default 15). - "grace_period_seconds": 0 # Optional. The - number of seconds to wait between sending a TERM signal to a - container and issuing a KILL which causes immediate shutdown. - (Default 120). - } - } - ], - "static_sites": [ - { - "bitbucket": { - "branch": "str", # Optional. The name of the - branch to use. - "deploy_on_push": bool, # Optional. Whether - to automatically deploy new commits made to the repo. - "repo": "str" # Optional. The name of the - repo in the format owner/repo. Example: - ``digitalocean/sample-golang``. - }, - "build_command": "str", # Optional. An optional - build command to run while building this component from source. - "catchall_document": "str", # Optional. The name of - the document to use as the fallback for any requests to documents - that are not found when serving this static site. Only 1 of - ``catchall_document`` or ``error_document`` can be set. - "cors": { - "allow_credentials": bool, # Optional. - Whether browsers should expose the response to the client-side - JavaScript code when the request"u2019s credentials mode is - include. This configures the ``Access-Control-Allow-Credentials`` - header. - "allow_headers": [ - "str" # Optional. The set of allowed - HTTP request headers. This configures the - ``Access-Control-Allow-Headers`` header. - ], - "allow_methods": [ - "str" # Optional. The set of allowed - HTTP methods. This configures the - ``Access-Control-Allow-Methods`` header. - ], - "allow_origins": [ + "vpc": { + "egress_ips": [ { - "exact": "str", # Optional. - Exact string match. Only 1 of ``exact``"" , ``prefix``"" - , or ``regex`` must be set. - "prefix": "str", # Optional. - Prefix-based match. Only 1 of ``exact``"" , ``prefix``"" - , or ``regex`` must be set. - "regex": "str" # Optional. - RE2 style regex-based match. Only 1 of ``exact``"" , - ``prefix``"" , or ``regex`` must be set. For more - information about RE2 syntax, see: - https://github.com/google/re2/wiki/Syntax. + "ip": "str" # Optional. The + egress ips associated with the VPC. } ], - "expose_headers": [ - "str" # Optional. The set of HTTP - response headers that browsers are allowed to access. This - configures the ``Access-Control-Expose-Headers`` header. - ], - "max_age": "str" # Optional. An optional - duration specifying how long browsers can cache the results of a - preflight request. This configures the ``Access-Control-Max-Age`` - header. - }, - "dockerfile_path": "str", # Optional. The path to - the Dockerfile relative to the root of the repo. If set, it will be - used to build this component. Otherwise, App Platform will attempt to - build it using buildpacks. - "environment_slug": "str", # Optional. An - environment slug describing the type of this app. For a full list, - please refer to `the product documentation - `_. - "envs": [ - { - "key": "str", # The variable name. - Required. - "scope": "RUN_AND_BUILD_TIME", # - Optional. Default value is "RUN_AND_BUILD_TIME". * RUN_TIME: - Made available only at run-time * BUILD_TIME: Made available - only at build-time * RUN_AND_BUILD_TIME: Made available at - both build and run-time. Known values are: "UNSET", - "RUN_TIME", "BUILD_TIME", and "RUN_AND_BUILD_TIME". - "type": "GENERAL", # Optional. - Default value is "GENERAL". * GENERAL: A plain-text - environment variable * SECRET: A secret encrypted environment - variable. Known values are: "GENERAL" and "SECRET". - "value": "str" # Optional. The - value. If the type is ``SECRET``"" , the value will be - encrypted on first submission. On following submissions, the - encrypted value should be used. - } - ], - "error_document": "404.html", # Optional. Default - value is "404.html". The name of the error document to use when - serving this static site. Default: 404.html. If no such file exists - within the built assets, App Platform will supply one. - "git": { - "branch": "str", # Optional. The name of the - branch to use. - "repo_clone_url": "str" # Optional. The - clone URL of the repo. Example: - ``https://github.com/digitalocean/sample-golang.git``. - }, - "github": { - "branch": "str", # Optional. The name of the - branch to use. - "deploy_on_push": bool, # Optional. Whether - to automatically deploy new commits made to the repo. - "repo": "str" # Optional. The name of the - repo in the format owner/repo. Example: - ``digitalocean/sample-golang``. - }, - "gitlab": { - "branch": "str", # Optional. The name of the - branch to use. - "deploy_on_push": bool, # Optional. Whether - to automatically deploy new commits made to the repo. - "repo": "str" # Optional. The name of the - repo in the format owner/repo. Example: - ``digitalocean/sample-golang``. - }, - "image": { - "deploy_on_push": { - "enabled": bool # Optional. Whether - to automatically deploy new images. Can only be used for - images hosted in DOCR and can only be used with an image tag, - not a specific digest. - }, - "digest": "str", # Optional. The image - digest. Cannot be specified if tag is provided. - "registry": "str", # Optional. The registry - name. Must be left empty for the ``DOCR`` registry type. - "registry_credentials": "str", # Optional. - The credentials to be able to pull the image. The value will be - encrypted on first submission. On following submissions, the - encrypted value should be used. * "$username:$access_token" for - registries of type ``DOCKER_HUB``. * "$username:$access_token" - for registries of type ``GHCR``. - "registry_type": "str", # Optional. * - DOCKER_HUB: The DockerHub container registry type. * DOCR: The - DigitalOcean container registry type. * GHCR: The Github - container registry type. Known values are: "DOCKER_HUB", "DOCR", - and "GHCR". - "repository": "str", # Optional. The - repository name. - "tag": "latest" # Optional. Default value is - "latest". The repository tag. Defaults to ``latest`` if not - provided and no digest is provided. Cannot be specified if digest - is provided. + "id": "str" # Optional. The ID of the VPC. }, - "index_document": "index.html", # Optional. Default - value is "index.html". The name of the index document to use when - serving this static site. Default: index.html. - "log_destinations": [ + "workers": [ { - "name": "str", # Required. - "datadog": { - "api_key": "str", # Datadog - API key. Required. - "endpoint": "str" # - Optional. Datadog HTTP log intake endpoint. + "autoscaling": { + "max_instance_count": 0, # + Optional. The maximum amount of instances for this + component. Must be more than min_instance_count. + "metrics": { + "cpu": { + "percent": 80 + # Optional. Default value is 80. The average + target CPU utilization for the component. + } + }, + "min_instance_count": 0 # + Optional. The minimum amount of instances for this + component. Must be less than max_instance_count. }, - "logtail": { - "token": "str" # Optional. - Logtail token. + "bitbucket": { + "branch": "str", # Optional. + The name of the branch to use. + "deploy_on_push": bool, # + Optional. Whether to automatically deploy new commits + made to the repo. + "repo": "str" # Optional. + The name of the repo in the format owner/repo. Example: + ``digitalocean/sample-golang``. }, - "open_search": { - "basic_auth": { - "password": "str", # - Optional. Password for user defined in User. Is - required when ``endpoint`` is set. Cannot be set if - using a DigitalOcean DBaaS OpenSearch cluster. - "user": "str" # - Optional. Username to authenticate with. Only - required when ``endpoint`` is set. Defaults to - ``doadmin`` when ``cluster_name`` is set. - }, - "cluster_name": "str", # - Optional. The name of a DigitalOcean DBaaS OpenSearch - cluster to use as a log forwarding destination. Cannot be - specified if ``endpoint`` is also specified. - "endpoint": "str", # - Optional. OpenSearch API Endpoint. Only HTTPS is - supported. Format: https://:code:``::code:``. - Cannot be specified if ``cluster_name`` is also - specified. - "index_name": "logs" # - Optional. Default value is "logs". The index name to use - for the logs. If not set, the default index name is - "logs". + "build_command": "str", # Optional. + An optional build command to run while building this + component from source. + "dockerfile_path": "str", # + Optional. The path to the Dockerfile relative to the root of + the repo. If set, it will be used to build this component. + Otherwise, App Platform will attempt to build it using + buildpacks. + "environment_slug": "str", # + Optional. An environment slug describing the type of this + app. For a full list, please refer to `the product + documentation + `_. + "envs": [ + { + "key": "str", # The + variable name. Required. + "scope": + "RUN_AND_BUILD_TIME", # Optional. Default value is + "RUN_AND_BUILD_TIME". * RUN_TIME: Made available only + at run-time * BUILD_TIME: Made available only at + build-time * RUN_AND_BUILD_TIME: Made available at + both build and run-time. Known values are: "UNSET", + "RUN_TIME", "BUILD_TIME", and "RUN_AND_BUILD_TIME". + "type": "GENERAL", # + Optional. Default value is "GENERAL". * GENERAL: A + plain-text environment variable * SECRET: A secret + encrypted environment variable. Known values are: + "GENERAL" and "SECRET". + "value": "str" # + Optional. The value. If the type is ``SECRET``"" , + the value will be encrypted on first submission. On + following submissions, the encrypted value should be + used. + } + ], + "git": { + "branch": "str", # Optional. + The name of the branch to use. + "repo_clone_url": "str" # + Optional. The clone URL of the repo. Example: + ``https://github.com/digitalocean/sample-golang.git``. }, - "papertrail": { - "endpoint": "str" # - Papertrail syslog endpoint. Required. - } - } - ], - "name": "str", # Optional. The name. Must be unique - across all components within the same app. - "output_dir": "str", # Optional. An optional path to - where the built assets will be located, relative to the build - context. If not set, App Platform will automatically scan for these - directory names: ``_static``"" , ``dist``"" , ``public``"" , - ``build``. - "routes": [ - { - "path": "str", # Optional. - (Deprecated - Use Ingress Rules instead). An HTTP path - prefix. Paths must start with / and must be unique across all - components within an app. - "preserve_path_prefix": bool # - Optional. An optional flag to preserve the path that is - forwarded to the backend service. By default, the HTTP - request path will be trimmed from the left when forwarded to - the component. For example, a component with ``path=/api`` - will have requests to ``/api/list`` trimmed to ``/list``. If - this value is ``true``"" , the path will remain - ``/api/list``. - } - ], - "run_command": "str", # Optional. An optional run - command to override the component's default. - "source_dir": "str" # Optional. An optional path to - the working directory to use for the build. For Dockerfile builds, - this will be used as the build context. Must be relative to the root - of the repo. - } - ], - "vpc": { - "egress_ips": [ - { - "ip": "str" # Optional. The egress ips - associated with the VPC. - } - ], - "id": "str" # Optional. The ID of the VPC. - }, - "workers": [ - { - "autoscaling": { - "max_instance_count": 0, # Optional. The - maximum amount of instances for this component. Must be more than - min_instance_count. - "metrics": { - "cpu": { - "percent": 80 # Optional. - Default value is 80. The average target CPU utilization - for the component. - } - }, - "min_instance_count": 0 # Optional. The - minimum amount of instances for this component. Must be less than - max_instance_count. - }, - "bitbucket": { - "branch": "str", # Optional. The name of the - branch to use. - "deploy_on_push": bool, # Optional. Whether - to automatically deploy new commits made to the repo. - "repo": "str" # Optional. The name of the - repo in the format owner/repo. Example: - ``digitalocean/sample-golang``. - }, - "build_command": "str", # Optional. An optional - build command to run while building this component from source. - "dockerfile_path": "str", # Optional. The path to - the Dockerfile relative to the root of the repo. If set, it will be - used to build this component. Otherwise, App Platform will attempt to - build it using buildpacks. - "environment_slug": "str", # Optional. An - environment slug describing the type of this app. For a full list, - please refer to `the product documentation - `_. - "envs": [ - { - "key": "str", # The variable name. - Required. - "scope": "RUN_AND_BUILD_TIME", # - Optional. Default value is "RUN_AND_BUILD_TIME". * RUN_TIME: - Made available only at run-time * BUILD_TIME: Made available - only at build-time * RUN_AND_BUILD_TIME: Made available at - both build and run-time. Known values are: "UNSET", - "RUN_TIME", "BUILD_TIME", and "RUN_AND_BUILD_TIME". - "type": "GENERAL", # Optional. - Default value is "GENERAL". * GENERAL: A plain-text - environment variable * SECRET: A secret encrypted environment - variable. Known values are: "GENERAL" and "SECRET". - "value": "str" # Optional. The - value. If the type is ``SECRET``"" , the value will be - encrypted on first submission. On following submissions, the - encrypted value should be used. - } - ], - "git": { - "branch": "str", # Optional. The name of the - branch to use. - "repo_clone_url": "str" # Optional. The - clone URL of the repo. Example: - ``https://github.com/digitalocean/sample-golang.git``. - }, - "github": { - "branch": "str", # Optional. The name of the - branch to use. - "deploy_on_push": bool, # Optional. Whether - to automatically deploy new commits made to the repo. - "repo": "str" # Optional. The name of the - repo in the format owner/repo. Example: - ``digitalocean/sample-golang``. - }, - "gitlab": { - "branch": "str", # Optional. The name of the - branch to use. - "deploy_on_push": bool, # Optional. Whether - to automatically deploy new commits made to the repo. - "repo": "str" # Optional. The name of the - repo in the format owner/repo. Example: - ``digitalocean/sample-golang``. - }, - "image": { - "deploy_on_push": { - "enabled": bool # Optional. Whether - to automatically deploy new images. Can only be used for - images hosted in DOCR and can only be used with an image tag, - not a specific digest. - }, - "digest": "str", # Optional. The image - digest. Cannot be specified if tag is provided. - "registry": "str", # Optional. The registry - name. Must be left empty for the ``DOCR`` registry type. - "registry_credentials": "str", # Optional. - The credentials to be able to pull the image. The value will be - encrypted on first submission. On following submissions, the - encrypted value should be used. * "$username:$access_token" for - registries of type ``DOCKER_HUB``. * "$username:$access_token" - for registries of type ``GHCR``. - "registry_type": "str", # Optional. * - DOCKER_HUB: The DockerHub container registry type. * DOCR: The - DigitalOcean container registry type. * GHCR: The Github - container registry type. Known values are: "DOCKER_HUB", "DOCR", - and "GHCR". - "repository": "str", # Optional. The - repository name. - "tag": "latest" # Optional. Default value is - "latest". The repository tag. Defaults to ``latest`` if not - provided and no digest is provided. Cannot be specified if digest - is provided. - }, - "instance_count": 1, # Optional. Default value is 1. - The amount of instances that this component should be scaled to. - Default: 1. Must not be set if autoscaling is used. - "instance_size_slug": {}, - "liveness_health_check": { - "failure_threshold": 0, # Optional. The - number of failed health checks before considered unhealthy. - "http_path": "str", # Optional. The route - path used for the HTTP health check ping. If not set, the HTTP - health check will be disabled and a TCP health check used - instead. - "initial_delay_seconds": 0, # Optional. The - number of seconds to wait before beginning health checks. - "period_seconds": 0, # Optional. The number - of seconds to wait between health checks. - "port": 0, # Optional. The port on which the - health check will be performed. - "success_threshold": 0, # Optional. The - number of successful health checks before considered healthy. - "timeout_seconds": 0 # Optional. The number - of seconds after which the check times out. - }, - "log_destinations": [ - { - "name": "str", # Required. - "datadog": { - "api_key": "str", # Datadog - API key. Required. - "endpoint": "str" # - Optional. Datadog HTTP log intake endpoint. + "github": { + "branch": "str", # Optional. + The name of the branch to use. + "deploy_on_push": bool, # + Optional. Whether to automatically deploy new commits + made to the repo. + "repo": "str" # Optional. + The name of the repo in the format owner/repo. Example: + ``digitalocean/sample-golang``. }, - "logtail": { - "token": "str" # Optional. - Logtail token. + "gitlab": { + "branch": "str", # Optional. + The name of the branch to use. + "deploy_on_push": bool, # + Optional. Whether to automatically deploy new commits + made to the repo. + "repo": "str" # Optional. + The name of the repo in the format owner/repo. Example: + ``digitalocean/sample-golang``. }, - "open_search": { - "basic_auth": { - "password": "str", # - Optional. Password for user defined in User. Is - required when ``endpoint`` is set. Cannot be set if - using a DigitalOcean DBaaS OpenSearch cluster. - "user": "str" # - Optional. Username to authenticate with. Only - required when ``endpoint`` is set. Defaults to - ``doadmin`` when ``cluster_name`` is set. + "image": { + "deploy_on_push": { + "enabled": bool # + Optional. Whether to automatically deploy new images. + Can only be used for images hosted in DOCR and can + only be used with an image tag, not a specific + digest. }, - "cluster_name": "str", # - Optional. The name of a DigitalOcean DBaaS OpenSearch - cluster to use as a log forwarding destination. Cannot be - specified if ``endpoint`` is also specified. - "endpoint": "str", # - Optional. OpenSearch API Endpoint. Only HTTPS is - supported. Format: https://:code:``::code:``. - Cannot be specified if ``cluster_name`` is also - specified. - "index_name": "logs" # - Optional. Default value is "logs". The index name to use - for the logs. If not set, the default index name is - "logs". + "digest": "str", # Optional. + The image digest. Cannot be specified if tag is provided. + "registry": "str", # + Optional. The registry name. Must be left empty for the + ``DOCR`` registry type. + "registry_credentials": + "str", # Optional. The credentials to be able to pull + the image. The value will be encrypted on first + submission. On following submissions, the encrypted value + should be used. * "$username:$access_token" for + registries of type ``DOCKER_HUB``. * + "$username:$access_token" for registries of type + ``GHCR``. + "registry_type": "str", # + Optional. * DOCKER_HUB: The DockerHub container registry + type. * DOCR: The DigitalOcean container registry type. * + GHCR: The Github container registry type. Known values + are: "DOCKER_HUB", "DOCR", and "GHCR". + "repository": "str", # + Optional. The repository name. + "tag": "latest" # Optional. + Default value is "latest". The repository tag. Defaults + to ``latest`` if not provided and no digest is provided. + Cannot be specified if digest is provided. }, - "papertrail": { - "endpoint": "str" # - Papertrail syslog endpoint. Required. + "instance_count": 1, # Optional. + Default value is 1. The amount of instances that this + component should be scaled to. Default: 1. Must not be set if + autoscaling is used. + "instance_size_slug": {}, + "liveness_health_check": { + "failure_threshold": 0, # + Optional. The number of failed health checks before + considered unhealthy. + "http_path": "str", # + Optional. The route path used for the HTTP health check + ping. If not set, the HTTP health check will be disabled + and a TCP health check used instead. + "initial_delay_seconds": 0, + # Optional. The number of seconds to wait before + beginning health checks. + "period_seconds": 0, # + Optional. The number of seconds to wait between health + checks. + "port": 0, # Optional. The + port on which the health check will be performed. + "success_threshold": 0, # + Optional. The number of successful health checks before + considered healthy. + "timeout_seconds": 0 # + Optional. The number of seconds after which the check + times out. + }, + "log_destinations": [ + { + "name": "str", # + Required. + "datadog": { + "api_key": + "str", # Datadog API key. Required. + "endpoint": + "str" # Optional. Datadog HTTP log intake + endpoint. + }, + "logtail": { + "token": + "str" # Optional. Logtail token. + }, + "open_search": { + "basic_auth": + { + "password": "str", # Optional. Password for + user defined in User. Is required when + ``endpoint`` is set. Cannot be set if using a + DigitalOcean DBaaS OpenSearch cluster. + "user": "str" # Optional. Username to + authenticate with. Only required when + ``endpoint`` is set. Defaults to ``doadmin`` + when ``cluster_name`` is set. + }, + "cluster_name": "str", # Optional. The name of a + DigitalOcean DBaaS OpenSearch cluster to use as a + log forwarding destination. Cannot be specified + if ``endpoint`` is also specified. + "endpoint": + "str", # Optional. OpenSearch API Endpoint. Only + HTTPS is supported. Format: + https://:code:``::code:``. Cannot be + specified if ``cluster_name`` is also specified. + "index_name": + "logs" # Optional. Default value is "logs". The + index name to use for the logs. If not set, the + default index name is "logs". + }, + "papertrail": { + "endpoint": + "str" # Papertrail syslog endpoint. Required. + } + } + ], + "name": "str", # Optional. The name. + Must be unique across all components within the same app. + "run_command": "str", # Optional. An + optional run command to override the component's default. + "source_dir": "str", # Optional. An + optional path to the working directory to use for the build. + For Dockerfile builds, this will be used as the build + context. Must be relative to the root of the repo. + "termination": { + "grace_period_seconds": 0 # + Optional. The number of seconds to wait between sending a + TERM signal to a container and issuing a KILL which + causes immediate shutdown. (Default 120). } } - ], - "name": "str", # Optional. The name. Must be unique - across all components within the same app. - "run_command": "str", # Optional. An optional run - command to override the component's default. - "source_dir": "str", # Optional. An optional path to - the working directory to use for the build. For Dockerfile builds, - this will be used as the build context. Must be relative to the root - of the repo. - "termination": { - "grace_period_seconds": 0 # Optional. The - number of seconds to wait between sending a TERM signal to a - container and issuing a KILL which causes immediate shutdown. - (Default 120). + ] + }, + "static_sites": [ + { + "name": "str", # Optional. The name of this + static site. + "source_commit_hash": "str" # Optional. The + commit hash of the repository that was used to build this static + site. } - } - ] + ], + "tier_slug": "str", # Optional. The current pricing tier + slug of the deployment. + "updated_at": "2020-02-20 00:00:00", # Optional. When the + deployment was last updated. + "workers": [ + { + "name": "str", # Optional. The name of this + worker. + "source_commit_hash": "str" # Optional. The + commit hash of the repository that was used to build this worker. + } + ] + }, + "deployment_id": "str", # Optional. For deployment events, this is + the same as the deployment's ID. For autoscaling events, this is the + deployment that was autoscaled. + "id": "str", # Optional. The ID of the event (UUID). + "type": "str" # Optional. The type of event. Known values are: + "UNKNOWN", "DEPLOYMENT", and "AUTOSCALING". } } + # response body for status code(s): 404 + response == { + "id": "str", # A short identifier corresponding to the HTTP status code + returned. For example, the ID for a response returning a 404 status code would + be "not_found.". Required. + "message": "str", # A message providing additional information about the + error, including details to help resolve it when possible. Required. + "request_id": "str" # Optional. Optionally, some endpoints may include a + request ID that should be provided when reporting bugs or opening support + tickets to help identify the issue. + } """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + 401: cast( + Type[HttpResponseError], + lambda response: ClientAuthenticationError(response=response), + ), + 429: HttpResponseError, + 500: HttpResponseError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) - @overload - def validate_app_spec( - self, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any - ) -> JSON: + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[JSON] = kwargs.pop("cls", None) + + _request = build_apps_get_event_request( + app_id=app_id, + event_id=event_id, + headers=_headers, + params=_params, + ) + _request.url = self._client.format_url(_request.url) + + _stream = False + pipeline_response: PipelineResponse = ( + self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + ) + + response = pipeline_response.http_response + + if response.status_code not in [200, 404]: + if _stream: + response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore + raise HttpResponseError(response=response) + + response_headers = {} + if response.status_code == 200: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.content: + deserialized = response.json() + else: + deserialized = None + + if response.status_code == 404: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.content: + deserialized = response.json() + else: + deserialized = None + + if cls: + return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore + + return cast(JSON, deserialized) # type: ignore + + @distributed_trace + def cancel_event(self, app_id: str, event_id: str, **kwargs: Any) -> JSON: # pylint: disable=line-too-long - """Propose an App Spec. + """Cancel an Event. - To propose and validate a spec for a new or existing app, send a POST request to the - ``/v2/apps/propose`` endpoint. The request returns some information about the proposed app, - including app cost and upgrade cost. If an existing app ID is specified, the app spec is - treated as a proposed update to the existing app. + Cancel an in-progress autoscaling event. - :param body: Required. - :type body: IO[bytes] - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str + :param app_id: The app ID. Required. + :type app_id: str + :param event_id: The event ID. Required. + :type event_id: str :return: JSON object :rtype: JSON :raises ~azure.core.exceptions.HttpResponseError: @@ -102905,1307 +104260,1706 @@ def validate_app_spec( # response body for status code(s): 200 response == { - "app_cost": 0, # Optional. The monthly cost of the proposed app in USD. - "app_is_static": bool, # Optional. Indicates whether the app is a static - app. - "app_name_available": bool, # Optional. Indicates whether the app name is - available. - "app_name_suggestion": "str", # Optional. The suggested name if the proposed - app name is unavailable. - "app_tier_downgrade_cost": 0, # Optional. The monthly cost of the proposed - app in USD using the previous pricing plan tier. For example, if you propose an - app that uses the Professional tier, the ``app_tier_downgrade_cost`` field - displays the monthly cost of the app if it were to use the Basic tier. If the - proposed app already uses the lest expensive tier, the field is empty. - "existing_static_apps": "str", # Optional. The maximum number of free static - apps the account can have. We will charge you for any additional static apps. - "spec": { - "name": "str", # The name of the app. Must be unique across all apps - in the same account. Required. - "databases": [ - { - "name": "str", # The database's name. The name must - be unique across all components within the same app and cannot use - capital letters. Required. - "cluster_name": "str", # Optional. The name of the - underlying DigitalOcean DBaaS cluster. This is required for - production databases. For dev databases, if cluster_name is not set, - a new cluster will be provisioned. - "db_name": "str", # Optional. The name of the MySQL - or PostgreSQL database to configure. - "db_user": "str", # Optional. The name of the MySQL - or PostgreSQL user to configure. - "engine": "UNSET", # Optional. Default value is - "UNSET". * MYSQL: MySQL * PG: PostgreSQL * REDIS: Caching * MONGODB: - MongoDB * KAFKA: Kafka * OPENSEARCH: OpenSearch * VALKEY: ValKey. - Known values are: "UNSET", "MYSQL", "PG", "REDIS", "MONGODB", - "KAFKA", "OPENSEARCH", and "VALKEY". - "production": bool, # Optional. Whether this is a - production or dev database. - "version": "str" # Optional. The version of the - database engine. - } - ], - "disable_edge_cache": False, # Optional. Default value is False. .. - role:: raw-html-m2r(raw) :format: html If set to ``true``"" , the app - will **not** be cached at the edge (CDN). Enable this option if you want to - manage CDN configuration yourself"u2014whether by using an external CDN - provider or by handling static content and caching within your app. This - setting is also recommended for apps that require real-time data or serve - dynamic content, such as those using Server-Sent Events (SSE) over GET, or - hosting an MCP (Model Context Protocol) Server that utilizes SSE."" - :raw-html-m2r:`
` **Note:** This feature is not available for static site - components."" :raw-html-m2r:`
` For more information, see `Disable CDN - Cache - `_. - "disable_email_obfuscation": False, # Optional. Default value is - False. If set to ``true``"" , email addresses in the app will not be - obfuscated. This is useful for apps that require email addresses to be - visible (in the HTML markup). - "domains": [ - { - "domain": "str", # The hostname for the domain. - Required. - "minimum_tls_version": "str", # Optional. The - minimum version of TLS a client application can use to access - resources for the domain. Must be one of the following values - wrapped within quotations: ``"1.2"`` or ``"1.3"``. Known values are: - "1.2" and "1.3". - "type": "UNSPECIFIED", # Optional. Default value is - "UNSPECIFIED". * DEFAULT: The default ``.ondigitalocean.app`` domain - assigned to this app * PRIMARY: The primary domain for this app that - is displayed as the default in the control panel, used in bindable - environment variables, and any other places that reference an app's - live URL. Only one domain may be set as primary. * ALIAS: A - non-primary domain. Known values are: "UNSPECIFIED", "DEFAULT", - "PRIMARY", and "ALIAS". - "wildcard": bool, # Optional. Indicates whether the - domain includes all sub-domains, in addition to the given domain. - "zone": "str" # Optional. Optional. If the domain - uses DigitalOcean DNS and you would like App Platform to - automatically manage it for you, set this to the name of the domain - on your account. For example, If the domain you are adding is - ``app.domain.com``"" , the zone could be ``domain.com``. - } - ], - "egress": { - "type": "AUTOASSIGN" # Optional. Default value is - "AUTOASSIGN". The app egress type. Known values are: "AUTOASSIGN" and - "DEDICATED_IP". + "event": { + "autoscaling": { + "components": { + "str": { + "from": 0, # Optional. The number of + replicas before scaling. + "to": 0, # Optional. The number of replicas + after scaling. + "triggering_metric": "str" # Optional. The + metric that triggered the scale change. Known values are "cpu", + "requests_per_second", "request_duration". For inactivity sleep, + "scale_from_zero" and "scale_to_zero" are used. + } + }, + "phase": "str" # Optional. The current phase of the + autoscaling event. Known values are: "UNKNOWN", "PENDING", "IN_PROGRESS", + "SUCCEEDED", "FAILED", and "CANCELED". }, - "enhanced_threat_control_enabled": False, # Optional. Default value - is False. If set to ``true``"" , suspicious requests will go through - additional security checks to help mitigate layer 7 DDoS attacks. - "functions": [ - { - "name": "str", # The name. Must be unique across all - components within the same app. Required. - "alerts": [ + "created_at": "2020-02-20 00:00:00", # Optional. When the event was + created. + "deployment": { + "cause": "str", # Optional. What caused this deployment to + be created. + "cloned_from": "str", # Optional. The ID of a previous + deployment that this deployment was cloned from. + "created_at": "2020-02-20 00:00:00", # Optional. The + creation time of the deployment. + "functions": [ + { + "name": "str", # Optional. The name of this + functions component. + "namespace": "str", # Optional. The + namespace where the functions are deployed. + "source_commit_hash": "str" # Optional. The + commit hash of the repository that was used to build this + functions component. + } + ], + "id": "str", # Optional. The ID of the deployment. + "jobs": [ + { + "name": "str", # Optional. The name of this + job. + "source_commit_hash": "str" # Optional. The + commit hash of the repository that was used to build this job. + } + ], + "phase": "UNKNOWN", # Optional. Default value is "UNKNOWN". + Known values are: "UNKNOWN", "PENDING_BUILD", "BUILDING", + "PENDING_DEPLOY", "DEPLOYING", "ACTIVE", "SUPERSEDED", "ERROR", and + "CANCELED". + "phase_last_updated_at": "2020-02-20 00:00:00", # Optional. + When the deployment phase was last updated. + "progress": { + "error_steps": 0, # Optional. Number of unsuccessful + steps. + "pending_steps": 0, # Optional. Number of pending + steps. + "running_steps": 0, # Optional. Number of currently + running steps. + "steps": [ { - "disabled": bool, # Optional. Is the - alert disabled?. - "operator": "UNSPECIFIED_OPERATOR", - # Optional. Default value is "UNSPECIFIED_OPERATOR". Known - values are: "UNSPECIFIED_OPERATOR", "GREATER_THAN", and - "LESS_THAN". - "rule": "UNSPECIFIED_RULE", # - Optional. Default value is "UNSPECIFIED_RULE". Known values - are: "UNSPECIFIED_RULE", "CPU_UTILIZATION", - "MEM_UTILIZATION", "RESTART_COUNT", "DEPLOYMENT_FAILED", - "DEPLOYMENT_LIVE", "DOMAIN_FAILED", "DOMAIN_LIVE", - "AUTOSCALE_FAILED", "AUTOSCALE_SUCCEEDED", - "FUNCTIONS_ACTIVATION_COUNT", - "FUNCTIONS_AVERAGE_DURATION_MS", - "FUNCTIONS_ERROR_RATE_PER_MINUTE", - "FUNCTIONS_AVERAGE_WAIT_TIME_MS", "FUNCTIONS_ERROR_COUNT", - and "FUNCTIONS_GB_RATE_PER_SECOND". - "value": 0.0, # Optional. Threshold - value for alert. - "window": "UNSPECIFIED_WINDOW" # - Optional. Default value is "UNSPECIFIED_WINDOW". Known values - are: "UNSPECIFIED_WINDOW", "FIVE_MINUTES", "TEN_MINUTES", - "THIRTY_MINUTES", and "ONE_HOUR". + "component_name": "str", # Optional. + The component name that this step is associated with. + "ended_at": "2020-02-20 00:00:00", # + Optional. The end time of this step. + "message_base": "str", # Optional. + The base of a human-readable description of the step intended + to be combined with the component name for presentation. For + example: ``message_base`` = "Building service" + ``component_name`` = "api". + "name": "str", # Optional. The name + of this step. + "reason": { + "code": "str", # Optional. + The error code. + "message": "str" # Optional. + The error message. + }, + "started_at": "2020-02-20 00:00:00", + # Optional. The start time of this step. + "status": "UNKNOWN", # Optional. + Default value is "UNKNOWN". Known values are: "UNKNOWN", + "PENDING", "RUNNING", "ERROR", and "SUCCESS". + "steps": [ + {} # Optional. Child steps + of this step. + ] } ], - "bitbucket": { - "branch": "str", # Optional. The name of the - branch to use. - "deploy_on_push": bool, # Optional. Whether - to automatically deploy new commits made to the repo. - "repo": "str" # Optional. The name of the - repo in the format owner/repo. Example: - ``digitalocean/sample-golang``. - }, - "cors": { - "allow_credentials": bool, # Optional. - Whether browsers should expose the response to the client-side - JavaScript code when the request"u2019s credentials mode is - include. This configures the ``Access-Control-Allow-Credentials`` - header. - "allow_headers": [ - "str" # Optional. The set of allowed - HTTP request headers. This configures the - ``Access-Control-Allow-Headers`` header. - ], - "allow_methods": [ - "str" # Optional. The set of allowed - HTTP methods. This configures the - ``Access-Control-Allow-Methods`` header. - ], - "allow_origins": [ - { - "exact": "str", # Optional. - Exact string match. Only 1 of ``exact``"" , ``prefix``"" - , or ``regex`` must be set. - "prefix": "str", # Optional. - Prefix-based match. Only 1 of ``exact``"" , ``prefix``"" - , or ``regex`` must be set. - "regex": "str" # Optional. - RE2 style regex-based match. Only 1 of ``exact``"" , - ``prefix``"" , or ``regex`` must be set. For more - information about RE2 syntax, see: - https://github.com/google/re2/wiki/Syntax. - } - ], - "expose_headers": [ - "str" # Optional. The set of HTTP - response headers that browsers are allowed to access. This - configures the ``Access-Control-Expose-Headers`` header. - ], - "max_age": "str" # Optional. An optional - duration specifying how long browsers can cache the results of a - preflight request. This configures the ``Access-Control-Max-Age`` - header. - }, - "envs": [ + "success_steps": 0, # Optional. Number of successful + steps. + "summary_steps": [ { - "key": "str", # The variable name. - Required. - "scope": "RUN_AND_BUILD_TIME", # - Optional. Default value is "RUN_AND_BUILD_TIME". * RUN_TIME: - Made available only at run-time * BUILD_TIME: Made available - only at build-time * RUN_AND_BUILD_TIME: Made available at - both build and run-time. Known values are: "UNSET", - "RUN_TIME", "BUILD_TIME", and "RUN_AND_BUILD_TIME". - "type": "GENERAL", # Optional. - Default value is "GENERAL". * GENERAL: A plain-text - environment variable * SECRET: A secret encrypted environment - variable. Known values are: "GENERAL" and "SECRET". - "value": "str" # Optional. The - value. If the type is ``SECRET``"" , the value will be - encrypted on first submission. On following submissions, the - encrypted value should be used. + "component_name": "str", # Optional. + The component name that this step is associated with. + "ended_at": "2020-02-20 00:00:00", # + Optional. The end time of this step. + "message_base": "str", # Optional. + The base of a human-readable description of the step intended + to be combined with the component name for presentation. For + example: ``message_base`` = "Building service" + ``component_name`` = "api". + "name": "str", # Optional. The name + of this step. + "reason": { + "code": "str", # Optional. + The error code. + "message": "str" # Optional. + The error message. + }, + "started_at": "2020-02-20 00:00:00", + # Optional. The start time of this step. + "status": "UNKNOWN", # Optional. + Default value is "UNKNOWN". Known values are: "UNKNOWN", + "PENDING", "RUNNING", "ERROR", and "SUCCESS". + "steps": [ + {} # Optional. Child steps + of this step. + ] } ], - "git": { - "branch": "str", # Optional. The name of the - branch to use. - "repo_clone_url": "str" # Optional. The - clone URL of the repo. Example: - ``https://github.com/digitalocean/sample-golang.git``. - }, - "github": { - "branch": "str", # Optional. The name of the - branch to use. - "deploy_on_push": bool, # Optional. Whether - to automatically deploy new commits made to the repo. - "repo": "str" # Optional. The name of the - repo in the format owner/repo. Example: - ``digitalocean/sample-golang``. - }, - "gitlab": { - "branch": "str", # Optional. The name of the - branch to use. - "deploy_on_push": bool, # Optional. Whether - to automatically deploy new commits made to the repo. - "repo": "str" # Optional. The name of the - repo in the format owner/repo. Example: - ``digitalocean/sample-golang``. - }, - "log_destinations": [ + "total_steps": 0 # Optional. Total number of steps. + }, + "services": [ + { + "name": "str", # Optional. The name of this + service. + "source_commit_hash": "str" # Optional. The + commit hash of the repository that was used to build this + service. + } + ], + "spec": { + "name": "str", # The name of the app. Must be unique + across all apps in the same account. Required. + "databases": [ { - "name": "str", # Required. - "datadog": { - "api_key": "str", # Datadog - API key. Required. - "endpoint": "str" # - Optional. Datadog HTTP log intake endpoint. - }, - "logtail": { - "token": "str" # Optional. - Logtail token. - }, - "open_search": { - "basic_auth": { - "password": "str", # - Optional. Password for user defined in User. Is - required when ``endpoint`` is set. Cannot be set if - using a DigitalOcean DBaaS OpenSearch cluster. - "user": "str" # - Optional. Username to authenticate with. Only - required when ``endpoint`` is set. Defaults to - ``doadmin`` when ``cluster_name`` is set. - }, - "cluster_name": "str", # - Optional. The name of a DigitalOcean DBaaS OpenSearch - cluster to use as a log forwarding destination. Cannot be - specified if ``endpoint`` is also specified. - "endpoint": "str", # - Optional. OpenSearch API Endpoint. Only HTTPS is - supported. Format: https://:code:``::code:``. - Cannot be specified if ``cluster_name`` is also - specified. - "index_name": "logs" # - Optional. Default value is "logs". The index name to use - for the logs. If not set, the default index name is - "logs". - }, - "papertrail": { - "endpoint": "str" # - Papertrail syslog endpoint. Required. - } + "name": "str", # The database's + name. The name must be unique across all components within + the same app and cannot use capital letters. Required. + "cluster_name": "str", # Optional. + The name of the underlying DigitalOcean DBaaS cluster. This + is required for production databases. For dev databases, if + cluster_name is not set, a new cluster will be provisioned. + "db_name": "str", # Optional. The + name of the MySQL or PostgreSQL database to configure. + "db_user": "str", # Optional. The + name of the MySQL or PostgreSQL user to configure. + "engine": "UNSET", # Optional. + Default value is "UNSET". * MYSQL: MySQL * PG: PostgreSQL * + REDIS: Caching * MONGODB: MongoDB * KAFKA: Kafka * + OPENSEARCH: OpenSearch * VALKEY: ValKey. Known values are: + "UNSET", "MYSQL", "PG", "REDIS", "MONGODB", "KAFKA", + "OPENSEARCH", and "VALKEY". + "production": bool, # Optional. + Whether this is a production or dev database. + "version": "str" # Optional. The + version of the database engine. } ], - "routes": [ + "disable_edge_cache": False, # Optional. Default + value is False. .. role:: raw-html-m2r(raw) :format: html If set + to ``true``"" , the app will **not** be cached at the edge (CDN). + Enable this option if you want to manage CDN configuration + yourself"u2014whether by using an external CDN provider or by + handling static content and caching within your app. This setting is + also recommended for apps that require real-time data or serve + dynamic content, such as those using Server-Sent Events (SSE) over + GET, or hosting an MCP (Model Context Protocol) Server that utilizes + SSE."" :raw-html-m2r:`
` **Note:** This feature is not available + for static site components."" :raw-html-m2r:`
` For more + information, see `Disable CDN Cache + `_. + "disable_email_obfuscation": False, # Optional. + Default value is False. If set to ``true``"" , email addresses in the + app will not be obfuscated. This is useful for apps that require + email addresses to be visible (in the HTML markup). + "domains": [ { - "path": "str", # Optional. - (Deprecated - Use Ingress Rules instead). An HTTP path - prefix. Paths must start with / and must be unique across all - components within an app. - "preserve_path_prefix": bool # - Optional. An optional flag to preserve the path that is - forwarded to the backend service. By default, the HTTP - request path will be trimmed from the left when forwarded to - the component. For example, a component with ``path=/api`` - will have requests to ``/api/list`` trimmed to ``/list``. If - this value is ``true``"" , the path will remain - ``/api/list``. + "domain": "str", # The hostname for + the domain. Required. + "minimum_tls_version": "str", # + Optional. The minimum version of TLS a client application can + use to access resources for the domain. Must be one of the + following values wrapped within quotations: ``"1.2"`` or + ``"1.3"``. Known values are: "1.2" and "1.3". + "type": "UNSPECIFIED", # Optional. + Default value is "UNSPECIFIED". * DEFAULT: The default + ``.ondigitalocean.app`` domain assigned to this app * + PRIMARY: The primary domain for this app that is displayed as + the default in the control panel, used in bindable + environment variables, and any other places that reference an + app's live URL. Only one domain may be set as primary. * + ALIAS: A non-primary domain. Known values are: "UNSPECIFIED", + "DEFAULT", "PRIMARY", and "ALIAS". + "wildcard": bool, # Optional. + Indicates whether the domain includes all sub-domains, in + addition to the given domain. + "zone": "str" # Optional. Optional. + If the domain uses DigitalOcean DNS and you would like App + Platform to automatically manage it for you, set this to the + name of the domain on your account. For example, If the + domain you are adding is ``app.domain.com``"" , the zone + could be ``domain.com``. } ], - "source_dir": "str" # Optional. An optional path to - the working directory to use for the build. For Dockerfile builds, - this will be used as the build context. Must be relative to the root - of the repo. - } - ], - "ingress": { - "rules": [ - { - "component": { - "name": "str", # The name of the - component to route to. Required. - "preserve_path_prefix": "str", # - Optional. An optional flag to preserve the path that is - forwarded to the backend service. By default, the HTTP - request path will be trimmed from the left when forwarded to - the component. For example, a component with ``path=/api`` - will have requests to ``/api/list`` trimmed to ``/list``. If - this value is ``true``"" , the path will remain - ``/api/list``. Note: this is not applicable for Functions - Components and is mutually exclusive with ``rewrite``. - "rewrite": "str" # Optional. An - optional field that will rewrite the path of the component to - be what is specified here. By default, the HTTP request path - will be trimmed from the left when forwarded to the - component. For example, a component with ``path=/api`` will - have requests to ``/api/list`` trimmed to ``/list``. If you - specified the rewrite to be ``/v1/``"" , requests to - ``/api/list`` would be rewritten to ``/v1/list``. Note: this - is mutually exclusive with ``preserve_path_prefix``. - }, - "cors": { - "allow_credentials": bool, # - Optional. Whether browsers should expose the response to the - client-side JavaScript code when the request"u2019s - credentials mode is include. This configures the - ``Access-Control-Allow-Credentials`` header. - "allow_headers": [ - "str" # Optional. The set of - allowed HTTP request headers. This configures the - ``Access-Control-Allow-Headers`` header. + "egress": { + "type": "AUTOASSIGN" # Optional. Default + value is "AUTOASSIGN". The app egress type. Known values are: + "AUTOASSIGN" and "DEDICATED_IP". + }, + "enhanced_threat_control_enabled": False, # + Optional. Default value is False. If set to ``true``"" , suspicious + requests will go through additional security checks to help mitigate + layer 7 DDoS attacks. + "functions": [ + { + "name": "str", # The name. Must be + unique across all components within the same app. Required. + "alerts": [ + { + "disabled": bool, # + Optional. Is the alert disabled?. + "operator": + "UNSPECIFIED_OPERATOR", # Optional. Default value is + "UNSPECIFIED_OPERATOR". Known values are: + "UNSPECIFIED_OPERATOR", "GREATER_THAN", and + "LESS_THAN". + "rule": + "UNSPECIFIED_RULE", # Optional. Default value is + "UNSPECIFIED_RULE". Known values are: + "UNSPECIFIED_RULE", "CPU_UTILIZATION", + "MEM_UTILIZATION", "RESTART_COUNT", + "DEPLOYMENT_FAILED", "DEPLOYMENT_LIVE", + "DOMAIN_FAILED", "DOMAIN_LIVE", "AUTOSCALE_FAILED", + "AUTOSCALE_SUCCEEDED", "FUNCTIONS_ACTIVATION_COUNT", + "FUNCTIONS_AVERAGE_DURATION_MS", + "FUNCTIONS_ERROR_RATE_PER_MINUTE", + "FUNCTIONS_AVERAGE_WAIT_TIME_MS", + "FUNCTIONS_ERROR_COUNT", and + "FUNCTIONS_GB_RATE_PER_SECOND". + "value": 0.0, # + Optional. Threshold value for alert. + "window": + "UNSPECIFIED_WINDOW" # Optional. Default value is + "UNSPECIFIED_WINDOW". Known values are: + "UNSPECIFIED_WINDOW", "FIVE_MINUTES", "TEN_MINUTES", + "THIRTY_MINUTES", and "ONE_HOUR". + } ], - "allow_methods": [ - "str" # Optional. The set of - allowed HTTP methods. This configures the - ``Access-Control-Allow-Methods`` header. + "bitbucket": { + "branch": "str", # Optional. + The name of the branch to use. + "deploy_on_push": bool, # + Optional. Whether to automatically deploy new commits + made to the repo. + "repo": "str" # Optional. + The name of the repo in the format owner/repo. Example: + ``digitalocean/sample-golang``. + }, + "cors": { + "allow_credentials": bool, # + Optional. Whether browsers should expose the response to + the client-side JavaScript code when the request"u2019s + credentials mode is include. This configures the + ``Access-Control-Allow-Credentials`` header. + "allow_headers": [ + "str" # Optional. + The set of allowed HTTP request headers. This + configures the ``Access-Control-Allow-Headers`` + header. + ], + "allow_methods": [ + "str" # Optional. + The set of allowed HTTP methods. This configures the + ``Access-Control-Allow-Methods`` header. + ], + "allow_origins": [ + { + "exact": + "str", # Optional. Exact string match. Only 1 of + ``exact``"" , ``prefix``"" , or ``regex`` must be + set. + "prefix": + "str", # Optional. Prefix-based match. Only 1 of + ``exact``"" , ``prefix``"" , or ``regex`` must be + set. + "regex": + "str" # Optional. RE2 style regex-based match. + Only 1 of ``exact``"" , ``prefix``"" , or + ``regex`` must be set. For more information about + RE2 syntax, see: + https://github.com/google/re2/wiki/Syntax. + } + ], + "expose_headers": [ + "str" # Optional. + The set of HTTP response headers that browsers are + allowed to access. This configures the + ``Access-Control-Expose-Headers`` header. + ], + "max_age": "str" # Optional. + An optional duration specifying how long browsers can + cache the results of a preflight request. This configures + the ``Access-Control-Max-Age`` header. + }, + "envs": [ + { + "key": "str", # The + variable name. Required. + "scope": + "RUN_AND_BUILD_TIME", # Optional. Default value is + "RUN_AND_BUILD_TIME". * RUN_TIME: Made available only + at run-time * BUILD_TIME: Made available only at + build-time * RUN_AND_BUILD_TIME: Made available at + both build and run-time. Known values are: "UNSET", + "RUN_TIME", "BUILD_TIME", and "RUN_AND_BUILD_TIME". + "type": "GENERAL", # + Optional. Default value is "GENERAL". * GENERAL: A + plain-text environment variable * SECRET: A secret + encrypted environment variable. Known values are: + "GENERAL" and "SECRET". + "value": "str" # + Optional. The value. If the type is ``SECRET``"" , + the value will be encrypted on first submission. On + following submissions, the encrypted value should be + used. + } ], - "allow_origins": [ + "git": { + "branch": "str", # Optional. + The name of the branch to use. + "repo_clone_url": "str" # + Optional. The clone URL of the repo. Example: + ``https://github.com/digitalocean/sample-golang.git``. + }, + "github": { + "branch": "str", # Optional. + The name of the branch to use. + "deploy_on_push": bool, # + Optional. Whether to automatically deploy new commits + made to the repo. + "repo": "str" # Optional. + The name of the repo in the format owner/repo. Example: + ``digitalocean/sample-golang``. + }, + "gitlab": { + "branch": "str", # Optional. + The name of the branch to use. + "deploy_on_push": bool, # + Optional. Whether to automatically deploy new commits + made to the repo. + "repo": "str" # Optional. + The name of the repo in the format owner/repo. Example: + ``digitalocean/sample-golang``. + }, + "log_destinations": [ { - "exact": "str", # - Optional. Exact string match. Only 1 of ``exact``"" , - ``prefix``"" , or ``regex`` must be set. - "prefix": "str", # - Optional. Prefix-based match. Only 1 of ``exact``"" , - ``prefix``"" , or ``regex`` must be set. - "regex": "str" # - Optional. RE2 style regex-based match. Only 1 of - ``exact``"" , ``prefix``"" , or ``regex`` must be - set. For more information about RE2 syntax, see: - https://github.com/google/re2/wiki/Syntax. + "name": "str", # + Required. + "datadog": { + "api_key": + "str", # Datadog API key. Required. + "endpoint": + "str" # Optional. Datadog HTTP log intake + endpoint. + }, + "logtail": { + "token": + "str" # Optional. Logtail token. + }, + "open_search": { + "basic_auth": + { + "password": "str", # Optional. Password for + user defined in User. Is required when + ``endpoint`` is set. Cannot be set if using a + DigitalOcean DBaaS OpenSearch cluster. + "user": "str" # Optional. Username to + authenticate with. Only required when + ``endpoint`` is set. Defaults to ``doadmin`` + when ``cluster_name`` is set. + }, + "cluster_name": "str", # Optional. The name of a + DigitalOcean DBaaS OpenSearch cluster to use as a + log forwarding destination. Cannot be specified + if ``endpoint`` is also specified. + "endpoint": + "str", # Optional. OpenSearch API Endpoint. Only + HTTPS is supported. Format: + https://:code:``::code:``. Cannot be + specified if ``cluster_name`` is also specified. + "index_name": + "logs" # Optional. Default value is "logs". The + index name to use for the logs. If not set, the + default index name is "logs". + }, + "papertrail": { + "endpoint": + "str" # Papertrail syslog endpoint. Required. + } } ], - "expose_headers": [ - "str" # Optional. The set of - HTTP response headers that browsers are allowed to - access. This configures the - ``Access-Control-Expose-Headers`` header. + "routes": [ + { + "path": "str", # + Optional. (Deprecated - Use Ingress Rules instead). + An HTTP path prefix. Paths must start with / and must + be unique across all components within an app. + "preserve_path_prefix": bool # Optional. An optional + flag to preserve the path that is forwarded to the + backend service. By default, the HTTP request path + will be trimmed from the left when forwarded to the + component. For example, a component with + ``path=/api`` will have requests to ``/api/list`` + trimmed to ``/list``. If this value is ``true``"" , + the path will remain ``/api/list``. + } ], - "max_age": "str" # Optional. An - optional duration specifying how long browsers can cache the - results of a preflight request. This configures the - ``Access-Control-Max-Age`` header. - }, - "match": { - "authority": { - "exact": "str" # Required. - }, - "path": { - "prefix": "str" # - Prefix-based match. For example, ``/api`` will match - ``/api``"" , ``/api/``"" , and any nested paths such as - ``/api/v1/endpoint``. Required. - } - }, - "redirect": { - "authority": "str", # Optional. The - authority/host to redirect to. This can be a hostname or IP - address. Note: use ``port`` to set the port. - "port": 0, # Optional. The port to - redirect to. - "redirect_code": 0, # Optional. The - redirect code to use. Defaults to ``302``. Supported values - are 300, 301, 302, 303, 304, 307, 308. - "scheme": "str", # Optional. The - scheme to redirect to. Supported values are ``http`` or - ``https``. Default: ``https``. - "uri": "str" # Optional. An optional - URI path to redirect to. Note: if this is specified the whole - URI of the original request will be overwritten to this - value, irrespective of the original request URI being - matched. - } - } - ] - }, - "jobs": [ - { - "autoscaling": { - "max_instance_count": 0, # Optional. The - maximum amount of instances for this component. Must be more than - min_instance_count. - "metrics": { - "cpu": { - "percent": 80 # Optional. - Default value is 80. The average target CPU utilization - for the component. - } - }, - "min_instance_count": 0 # Optional. The - minimum amount of instances for this component. Must be less than - max_instance_count. - }, - "bitbucket": { - "branch": "str", # Optional. The name of the - branch to use. - "deploy_on_push": bool, # Optional. Whether - to automatically deploy new commits made to the repo. - "repo": "str" # Optional. The name of the - repo in the format owner/repo. Example: - ``digitalocean/sample-golang``. - }, - "build_command": "str", # Optional. An optional - build command to run while building this component from source. - "dockerfile_path": "str", # Optional. The path to - the Dockerfile relative to the root of the repo. If set, it will be - used to build this component. Otherwise, App Platform will attempt to - build it using buildpacks. - "environment_slug": "str", # Optional. An - environment slug describing the type of this app. For a full list, - please refer to `the product documentation - `_. - "envs": [ - { - "key": "str", # The variable name. - Required. - "scope": "RUN_AND_BUILD_TIME", # - Optional. Default value is "RUN_AND_BUILD_TIME". * RUN_TIME: - Made available only at run-time * BUILD_TIME: Made available - only at build-time * RUN_AND_BUILD_TIME: Made available at - both build and run-time. Known values are: "UNSET", - "RUN_TIME", "BUILD_TIME", and "RUN_AND_BUILD_TIME". - "type": "GENERAL", # Optional. - Default value is "GENERAL". * GENERAL: A plain-text - environment variable * SECRET: A secret encrypted environment - variable. Known values are: "GENERAL" and "SECRET". - "value": "str" # Optional. The - value. If the type is ``SECRET``"" , the value will be - encrypted on first submission. On following submissions, the - encrypted value should be used. + "source_dir": "str" # Optional. An + optional path to the working directory to use for the build. + For Dockerfile builds, this will be used as the build + context. Must be relative to the root of the repo. } ], - "git": { - "branch": "str", # Optional. The name of the - branch to use. - "repo_clone_url": "str" # Optional. The - clone URL of the repo. Example: - ``https://github.com/digitalocean/sample-golang.git``. - }, - "github": { - "branch": "str", # Optional. The name of the - branch to use. - "deploy_on_push": bool, # Optional. Whether - to automatically deploy new commits made to the repo. - "repo": "str" # Optional. The name of the - repo in the format owner/repo. Example: - ``digitalocean/sample-golang``. - }, - "gitlab": { - "branch": "str", # Optional. The name of the - branch to use. - "deploy_on_push": bool, # Optional. Whether - to automatically deploy new commits made to the repo. - "repo": "str" # Optional. The name of the - repo in the format owner/repo. Example: - ``digitalocean/sample-golang``. - }, - "image": { - "deploy_on_push": { - "enabled": bool # Optional. Whether - to automatically deploy new images. Can only be used for - images hosted in DOCR and can only be used with an image tag, - not a specific digest. - }, - "digest": "str", # Optional. The image - digest. Cannot be specified if tag is provided. - "registry": "str", # Optional. The registry - name. Must be left empty for the ``DOCR`` registry type. - "registry_credentials": "str", # Optional. - The credentials to be able to pull the image. The value will be - encrypted on first submission. On following submissions, the - encrypted value should be used. * "$username:$access_token" for - registries of type ``DOCKER_HUB``. * "$username:$access_token" - for registries of type ``GHCR``. - "registry_type": "str", # Optional. * - DOCKER_HUB: The DockerHub container registry type. * DOCR: The - DigitalOcean container registry type. * GHCR: The Github - container registry type. Known values are: "DOCKER_HUB", "DOCR", - and "GHCR". - "repository": "str", # Optional. The - repository name. - "tag": "latest" # Optional. Default value is - "latest". The repository tag. Defaults to ``latest`` if not - provided and no digest is provided. Cannot be specified if digest - is provided. + "ingress": { + "rules": [ + { + "component": { + "name": "str", # The + name of the component to route to. Required. + "preserve_path_prefix": "str", # Optional. An + optional flag to preserve the path that is forwarded + to the backend service. By default, the HTTP request + path will be trimmed from the left when forwarded to + the component. For example, a component with + ``path=/api`` will have requests to ``/api/list`` + trimmed to ``/list``. If this value is ``true``"" , + the path will remain ``/api/list``. Note: this is not + applicable for Functions Components and is mutually + exclusive with ``rewrite``. + "rewrite": "str" # + Optional. An optional field that will rewrite the + path of the component to be what is specified here. + By default, the HTTP request path will be trimmed + from the left when forwarded to the component. For + example, a component with ``path=/api`` will have + requests to ``/api/list`` trimmed to ``/list``. If + you specified the rewrite to be ``/v1/``"" , requests + to ``/api/list`` would be rewritten to ``/v1/list``. + Note: this is mutually exclusive with + ``preserve_path_prefix``. + }, + "cors": { + "allow_credentials": + bool, # Optional. Whether browsers should expose the + response to the client-side JavaScript code when the + request"u2019s credentials mode is include. This + configures the ``Access-Control-Allow-Credentials`` + header. + "allow_headers": [ + "str" # + Optional. The set of allowed HTTP request + headers. This configures the + ``Access-Control-Allow-Headers`` header. + ], + "allow_methods": [ + "str" # + Optional. The set of allowed HTTP methods. This + configures the ``Access-Control-Allow-Methods`` + header. + ], + "allow_origins": [ + { + "exact": "str", # Optional. Exact string + match. Only 1 of ``exact``"" , ``prefix``"" , + or ``regex`` must be set. + "prefix": "str", # Optional. Prefix-based + match. Only 1 of ``exact``"" , ``prefix``"" , + or ``regex`` must be set. + "regex": "str" # Optional. RE2 style + regex-based match. Only 1 of ``exact``"" , + ``prefix``"" , or ``regex`` must be set. For + more information about RE2 syntax, see: + https://github.com/google/re2/wiki/Syntax. + } + ], + "expose_headers": [ + "str" # + Optional. The set of HTTP response headers that + browsers are allowed to access. This configures + the ``Access-Control-Expose-Headers`` header. + ], + "max_age": "str" # + Optional. An optional duration specifying how long + browsers can cache the results of a preflight + request. This configures the + ``Access-Control-Max-Age`` header. + }, + "match": { + "authority": { + "exact": + "str" # Required. + }, + "path": { + "prefix": + "str" # Prefix-based match. For example, + ``/api`` will match ``/api``"" , ``/api/``"" , + and any nested paths such as + ``/api/v1/endpoint``. Required. + } + }, + "redirect": { + "authority": "str", + # Optional. The authority/host to redirect to. This + can be a hostname or IP address. Note: use ``port`` + to set the port. + "port": 0, # + Optional. The port to redirect to. + "redirect_code": 0, + # Optional. The redirect code to use. Defaults to + ``302``. Supported values are 300, 301, 302, 303, + 304, 307, 308. + "scheme": "str", # + Optional. The scheme to redirect to. Supported values + are ``http`` or ``https``. Default: ``https``. + "uri": "str" # + Optional. An optional URI path to redirect to. Note: + if this is specified the whole URI of the original + request will be overwritten to this value, + irrespective of the original request URI being + matched. + } + } + ] }, - "instance_count": 1, # Optional. Default value is 1. - The amount of instances that this component should be scaled to. - Default: 1. Must not be set if autoscaling is used. - "instance_size_slug": {}, - "kind": "UNSPECIFIED", # Optional. Default value is - "UNSPECIFIED". * UNSPECIFIED: Default job type, will auto-complete to - POST_DEPLOY kind. * PRE_DEPLOY: Indicates a job that runs before an - app deployment. * POST_DEPLOY: Indicates a job that runs after an app - deployment. * FAILED_DEPLOY: Indicates a job that runs after a - component fails to deploy. Known values are: "UNSPECIFIED", - "PRE_DEPLOY", "POST_DEPLOY", and "FAILED_DEPLOY". - "log_destinations": [ + "jobs": [ { - "name": "str", # Required. - "datadog": { - "api_key": "str", # Datadog - API key. Required. - "endpoint": "str" # - Optional. Datadog HTTP log intake endpoint. - }, - "logtail": { - "token": "str" # Optional. - Logtail token. - }, - "open_search": { - "basic_auth": { - "password": "str", # - Optional. Password for user defined in User. Is - required when ``endpoint`` is set. Cannot be set if - using a DigitalOcean DBaaS OpenSearch cluster. - "user": "str" # - Optional. Username to authenticate with. Only - required when ``endpoint`` is set. Defaults to - ``doadmin`` when ``cluster_name`` is set. + "autoscaling": { + "max_instance_count": 0, # + Optional. The maximum amount of instances for this + component. Must be more than min_instance_count. + "metrics": { + "cpu": { + "percent": 80 + # Optional. Default value is 80. The average + target CPU utilization for the component. + } }, - "cluster_name": "str", # - Optional. The name of a DigitalOcean DBaaS OpenSearch - cluster to use as a log forwarding destination. Cannot be - specified if ``endpoint`` is also specified. - "endpoint": "str", # - Optional. OpenSearch API Endpoint. Only HTTPS is - supported. Format: https://:code:``::code:``. - Cannot be specified if ``cluster_name`` is also - specified. - "index_name": "logs" # - Optional. Default value is "logs". The index name to use - for the logs. If not set, the default index name is - "logs". + "min_instance_count": 0 # + Optional. The minimum amount of instances for this + component. Must be less than max_instance_count. }, - "papertrail": { - "endpoint": "str" # - Papertrail syslog endpoint. Required. - } - } - ], - "name": "str", # Optional. The name. Must be unique - across all components within the same app. - "run_command": "str", # Optional. An optional run - command to override the component's default. - "source_dir": "str", # Optional. An optional path to - the working directory to use for the build. For Dockerfile builds, - this will be used as the build context. Must be relative to the root - of the repo. - "termination": { - "grace_period_seconds": 0 # Optional. The - number of seconds to wait between sending a TERM signal to a - container and issuing a KILL which causes immediate shutdown. - (Default 120). - } - } - ], - "maintenance": { - "archive": bool, # Optional. Indicates whether the app - should be archived. Setting this to true implies that enabled is set to - true. - "enabled": bool, # Optional. Indicates whether maintenance - mode should be enabled for the app. - "offline_page_url": "str" # Optional. A custom offline page - to display when maintenance mode is enabled or the app is archived. - }, - "region": "str", # Optional. The slug form of the geographical - origin of the app. Default: ``nearest available``. Known values are: "atl", - "nyc", "sfo", "tor", "ams", "fra", "lon", "blr", "sgp", and "syd". - "services": [ - { - "autoscaling": { - "max_instance_count": 0, # Optional. The - maximum amount of instances for this component. Must be more than - min_instance_count. - "metrics": { - "cpu": { - "percent": 80 # Optional. - Default value is 80. The average target CPU utilization - for the component. - } - }, - "min_instance_count": 0 # Optional. The - minimum amount of instances for this component. Must be less than - max_instance_count. - }, - "bitbucket": { - "branch": "str", # Optional. The name of the - branch to use. - "deploy_on_push": bool, # Optional. Whether - to automatically deploy new commits made to the repo. - "repo": "str" # Optional. The name of the - repo in the format owner/repo. Example: - ``digitalocean/sample-golang``. - }, - "build_command": "str", # Optional. An optional - build command to run while building this component from source. - "cors": { - "allow_credentials": bool, # Optional. - Whether browsers should expose the response to the client-side - JavaScript code when the request"u2019s credentials mode is - include. This configures the ``Access-Control-Allow-Credentials`` - header. - "allow_headers": [ - "str" # Optional. The set of allowed - HTTP request headers. This configures the - ``Access-Control-Allow-Headers`` header. - ], - "allow_methods": [ - "str" # Optional. The set of allowed - HTTP methods. This configures the - ``Access-Control-Allow-Methods`` header. - ], - "allow_origins": [ - { - "exact": "str", # Optional. - Exact string match. Only 1 of ``exact``"" , ``prefix``"" - , or ``regex`` must be set. - "prefix": "str", # Optional. - Prefix-based match. Only 1 of ``exact``"" , ``prefix``"" - , or ``regex`` must be set. - "regex": "str" # Optional. - RE2 style regex-based match. Only 1 of ``exact``"" , - ``prefix``"" , or ``regex`` must be set. For more - information about RE2 syntax, see: - https://github.com/google/re2/wiki/Syntax. - } - ], - "expose_headers": [ - "str" # Optional. The set of HTTP - response headers that browsers are allowed to access. This - configures the ``Access-Control-Expose-Headers`` header. - ], - "max_age": "str" # Optional. An optional - duration specifying how long browsers can cache the results of a - preflight request. This configures the ``Access-Control-Max-Age`` - header. - }, - "dockerfile_path": "str", # Optional. The path to - the Dockerfile relative to the root of the repo. If set, it will be - used to build this component. Otherwise, App Platform will attempt to - build it using buildpacks. - "environment_slug": "str", # Optional. An - environment slug describing the type of this app. For a full list, - please refer to `the product documentation - `_. - "envs": [ - { - "key": "str", # The variable name. - Required. - "scope": "RUN_AND_BUILD_TIME", # - Optional. Default value is "RUN_AND_BUILD_TIME". * RUN_TIME: - Made available only at run-time * BUILD_TIME: Made available - only at build-time * RUN_AND_BUILD_TIME: Made available at - both build and run-time. Known values are: "UNSET", - "RUN_TIME", "BUILD_TIME", and "RUN_AND_BUILD_TIME". - "type": "GENERAL", # Optional. - Default value is "GENERAL". * GENERAL: A plain-text - environment variable * SECRET: A secret encrypted environment - variable. Known values are: "GENERAL" and "SECRET". - "value": "str" # Optional. The - value. If the type is ``SECRET``"" , the value will be - encrypted on first submission. On following submissions, the - encrypted value should be used. - } - ], - "git": { - "branch": "str", # Optional. The name of the - branch to use. - "repo_clone_url": "str" # Optional. The - clone URL of the repo. Example: - ``https://github.com/digitalocean/sample-golang.git``. - }, - "github": { - "branch": "str", # Optional. The name of the - branch to use. - "deploy_on_push": bool, # Optional. Whether - to automatically deploy new commits made to the repo. - "repo": "str" # Optional. The name of the - repo in the format owner/repo. Example: - ``digitalocean/sample-golang``. - }, - "gitlab": { - "branch": "str", # Optional. The name of the - branch to use. - "deploy_on_push": bool, # Optional. Whether - to automatically deploy new commits made to the repo. - "repo": "str" # Optional. The name of the - repo in the format owner/repo. Example: - ``digitalocean/sample-golang``. - }, - "health_check": { - "failure_threshold": 0, # Optional. The - number of failed health checks before considered unhealthy. - "http_path": "str", # Optional. The route - path used for the HTTP health check ping. If not set, the HTTP - health check will be disabled and a TCP health check used - instead. - "initial_delay_seconds": 0, # Optional. The - number of seconds to wait before beginning health checks. - "period_seconds": 0, # Optional. The number - of seconds to wait between health checks. - "port": 0, # Optional. The port on which the - health check will be performed. If not set, the health check will - be performed on the component's http_port. - "success_threshold": 0, # Optional. The - number of successful health checks before considered healthy. - "timeout_seconds": 0 # Optional. The number - of seconds after which the check times out. - }, - "http_port": 0, # Optional. The internal port on - which this service's run command will listen. Default: 8080 If there - is not an environment variable with the name ``PORT``"" , one will be - automatically added with its value set to the value of this field. - "image": { - "deploy_on_push": { - "enabled": bool # Optional. Whether - to automatically deploy new images. Can only be used for - images hosted in DOCR and can only be used with an image tag, - not a specific digest. - }, - "digest": "str", # Optional. The image - digest. Cannot be specified if tag is provided. - "registry": "str", # Optional. The registry - name. Must be left empty for the ``DOCR`` registry type. - "registry_credentials": "str", # Optional. - The credentials to be able to pull the image. The value will be - encrypted on first submission. On following submissions, the - encrypted value should be used. * "$username:$access_token" for - registries of type ``DOCKER_HUB``. * "$username:$access_token" - for registries of type ``GHCR``. - "registry_type": "str", # Optional. * - DOCKER_HUB: The DockerHub container registry type. * DOCR: The - DigitalOcean container registry type. * GHCR: The Github - container registry type. Known values are: "DOCKER_HUB", "DOCR", - and "GHCR". - "repository": "str", # Optional. The - repository name. - "tag": "latest" # Optional. Default value is - "latest". The repository tag. Defaults to ``latest`` if not - provided and no digest is provided. Cannot be specified if digest - is provided. - }, - "instance_count": 1, # Optional. Default value is 1. - The amount of instances that this component should be scaled to. - Default: 1. Must not be set if autoscaling is used. - "instance_size_slug": {}, - "internal_ports": [ - 0 # Optional. The ports on which this - service will listen for internal traffic. - ], - "liveness_health_check": { - "failure_threshold": 0, # Optional. The - number of failed health checks before considered unhealthy. - "http_path": "str", # Optional. The route - path used for the HTTP health check ping. If not set, the HTTP - health check will be disabled and a TCP health check used - instead. - "initial_delay_seconds": 0, # Optional. The - number of seconds to wait before beginning health checks. - "period_seconds": 0, # Optional. The number - of seconds to wait between health checks. - "port": 0, # Optional. The port on which the - health check will be performed. - "success_threshold": 0, # Optional. The - number of successful health checks before considered healthy. - "timeout_seconds": 0 # Optional. The number - of seconds after which the check times out. - }, - "log_destinations": [ - { - "name": "str", # Required. - "datadog": { - "api_key": "str", # Datadog - API key. Required. - "endpoint": "str" # - Optional. Datadog HTTP log intake endpoint. + "bitbucket": { + "branch": "str", # Optional. + The name of the branch to use. + "deploy_on_push": bool, # + Optional. Whether to automatically deploy new commits + made to the repo. + "repo": "str" # Optional. + The name of the repo in the format owner/repo. Example: + ``digitalocean/sample-golang``. }, - "logtail": { - "token": "str" # Optional. - Logtail token. + "build_command": "str", # Optional. + An optional build command to run while building this + component from source. + "dockerfile_path": "str", # + Optional. The path to the Dockerfile relative to the root of + the repo. If set, it will be used to build this component. + Otherwise, App Platform will attempt to build it using + buildpacks. + "environment_slug": "str", # + Optional. An environment slug describing the type of this + app. For a full list, please refer to `the product + documentation + `_. + "envs": [ + { + "key": "str", # The + variable name. Required. + "scope": + "RUN_AND_BUILD_TIME", # Optional. Default value is + "RUN_AND_BUILD_TIME". * RUN_TIME: Made available only + at run-time * BUILD_TIME: Made available only at + build-time * RUN_AND_BUILD_TIME: Made available at + both build and run-time. Known values are: "UNSET", + "RUN_TIME", "BUILD_TIME", and "RUN_AND_BUILD_TIME". + "type": "GENERAL", # + Optional. Default value is "GENERAL". * GENERAL: A + plain-text environment variable * SECRET: A secret + encrypted environment variable. Known values are: + "GENERAL" and "SECRET". + "value": "str" # + Optional. The value. If the type is ``SECRET``"" , + the value will be encrypted on first submission. On + following submissions, the encrypted value should be + used. + } + ], + "git": { + "branch": "str", # Optional. + The name of the branch to use. + "repo_clone_url": "str" # + Optional. The clone URL of the repo. Example: + ``https://github.com/digitalocean/sample-golang.git``. }, - "open_search": { - "basic_auth": { - "password": "str", # - Optional. Password for user defined in User. Is - required when ``endpoint`` is set. Cannot be set if - using a DigitalOcean DBaaS OpenSearch cluster. - "user": "str" # - Optional. Username to authenticate with. Only - required when ``endpoint`` is set. Defaults to - ``doadmin`` when ``cluster_name`` is set. + "github": { + "branch": "str", # Optional. + The name of the branch to use. + "deploy_on_push": bool, # + Optional. Whether to automatically deploy new commits + made to the repo. + "repo": "str" # Optional. + The name of the repo in the format owner/repo. Example: + ``digitalocean/sample-golang``. + }, + "gitlab": { + "branch": "str", # Optional. + The name of the branch to use. + "deploy_on_push": bool, # + Optional. Whether to automatically deploy new commits + made to the repo. + "repo": "str" # Optional. + The name of the repo in the format owner/repo. Example: + ``digitalocean/sample-golang``. + }, + "image": { + "deploy_on_push": { + "enabled": bool # + Optional. Whether to automatically deploy new images. + Can only be used for images hosted in DOCR and can + only be used with an image tag, not a specific + digest. }, - "cluster_name": "str", # - Optional. The name of a DigitalOcean DBaaS OpenSearch - cluster to use as a log forwarding destination. Cannot be - specified if ``endpoint`` is also specified. - "endpoint": "str", # - Optional. OpenSearch API Endpoint. Only HTTPS is - supported. Format: https://:code:``::code:``. - Cannot be specified if ``cluster_name`` is also - specified. - "index_name": "logs" # - Optional. Default value is "logs". The index name to use - for the logs. If not set, the default index name is - "logs". + "digest": "str", # Optional. + The image digest. Cannot be specified if tag is provided. + "registry": "str", # + Optional. The registry name. Must be left empty for the + ``DOCR`` registry type. + "registry_credentials": + "str", # Optional. The credentials to be able to pull + the image. The value will be encrypted on first + submission. On following submissions, the encrypted value + should be used. * "$username:$access_token" for + registries of type ``DOCKER_HUB``. * + "$username:$access_token" for registries of type + ``GHCR``. + "registry_type": "str", # + Optional. * DOCKER_HUB: The DockerHub container registry + type. * DOCR: The DigitalOcean container registry type. * + GHCR: The Github container registry type. Known values + are: "DOCKER_HUB", "DOCR", and "GHCR". + "repository": "str", # + Optional. The repository name. + "tag": "latest" # Optional. + Default value is "latest". The repository tag. Defaults + to ``latest`` if not provided and no digest is provided. + Cannot be specified if digest is provided. }, - "papertrail": { - "endpoint": "str" # - Papertrail syslog endpoint. Required. - } - } - ], - "name": "str", # Optional. The name. Must be unique - across all components within the same app. - "protocol": "str", # Optional. The protocol which - the service uses to serve traffic on the http_port. * ``HTTP``"" : - The app is serving the HTTP protocol. Default. * ``HTTP2``"" : The - app is serving the HTTP/2 protocol. Currently, this needs to be - implemented in the service by serving HTTP/2 cleartext (h2c). Known - values are: "HTTP" and "HTTP2". - "routes": [ - { - "path": "str", # Optional. - (Deprecated - Use Ingress Rules instead). An HTTP path - prefix. Paths must start with / and must be unique across all - components within an app. - "preserve_path_prefix": bool # - Optional. An optional flag to preserve the path that is - forwarded to the backend service. By default, the HTTP - request path will be trimmed from the left when forwarded to - the component. For example, a component with ``path=/api`` - will have requests to ``/api/list`` trimmed to ``/list``. If - this value is ``true``"" , the path will remain - ``/api/list``. - } - ], - "run_command": "str", # Optional. An optional run - command to override the component's default. - "source_dir": "str", # Optional. An optional path to - the working directory to use for the build. For Dockerfile builds, - this will be used as the build context. Must be relative to the root - of the repo. - "termination": { - "drain_seconds": 0, # Optional. The number - of seconds to wait between selecting a container instance for - termination and issuing the TERM signal. Selecting a container - instance for termination begins an asynchronous drain of new - requests on upstream load-balancers. (Default 15). - "grace_period_seconds": 0 # Optional. The - number of seconds to wait between sending a TERM signal to a - container and issuing a KILL which causes immediate shutdown. - (Default 120). - } - } - ], - "static_sites": [ - { - "bitbucket": { - "branch": "str", # Optional. The name of the - branch to use. - "deploy_on_push": bool, # Optional. Whether - to automatically deploy new commits made to the repo. - "repo": "str" # Optional. The name of the - repo in the format owner/repo. Example: - ``digitalocean/sample-golang``. - }, - "build_command": "str", # Optional. An optional - build command to run while building this component from source. - "catchall_document": "str", # Optional. The name of - the document to use as the fallback for any requests to documents - that are not found when serving this static site. Only 1 of - ``catchall_document`` or ``error_document`` can be set. - "cors": { - "allow_credentials": bool, # Optional. - Whether browsers should expose the response to the client-side - JavaScript code when the request"u2019s credentials mode is - include. This configures the ``Access-Control-Allow-Credentials`` - header. - "allow_headers": [ - "str" # Optional. The set of allowed - HTTP request headers. This configures the - ``Access-Control-Allow-Headers`` header. - ], - "allow_methods": [ - "str" # Optional. The set of allowed - HTTP methods. This configures the - ``Access-Control-Allow-Methods`` header. - ], - "allow_origins": [ - { - "exact": "str", # Optional. - Exact string match. Only 1 of ``exact``"" , ``prefix``"" - , or ``regex`` must be set. - "prefix": "str", # Optional. - Prefix-based match. Only 1 of ``exact``"" , ``prefix``"" - , or ``regex`` must be set. - "regex": "str" # Optional. - RE2 style regex-based match. Only 1 of ``exact``"" , - ``prefix``"" , or ``regex`` must be set. For more - information about RE2 syntax, see: - https://github.com/google/re2/wiki/Syntax. + "instance_count": 1, # Optional. + Default value is 1. The amount of instances that this + component should be scaled to. Default: 1. Must not be set if + autoscaling is used. + "instance_size_slug": {}, + "kind": "UNSPECIFIED", # Optional. + Default value is "UNSPECIFIED". * UNSPECIFIED: Default job + type, will auto-complete to POST_DEPLOY kind. * PRE_DEPLOY: + Indicates a job that runs before an app deployment. * + POST_DEPLOY: Indicates a job that runs after an app + deployment. * FAILED_DEPLOY: Indicates a job that runs after + a component fails to deploy. Known values are: "UNSPECIFIED", + "PRE_DEPLOY", "POST_DEPLOY", and "FAILED_DEPLOY". + "log_destinations": [ + { + "name": "str", # + Required. + "datadog": { + "api_key": + "str", # Datadog API key. Required. + "endpoint": + "str" # Optional. Datadog HTTP log intake + endpoint. + }, + "logtail": { + "token": + "str" # Optional. Logtail token. + }, + "open_search": { + "basic_auth": + { + "password": "str", # Optional. Password for + user defined in User. Is required when + ``endpoint`` is set. Cannot be set if using a + DigitalOcean DBaaS OpenSearch cluster. + "user": "str" # Optional. Username to + authenticate with. Only required when + ``endpoint`` is set. Defaults to ``doadmin`` + when ``cluster_name`` is set. + }, + "cluster_name": "str", # Optional. The name of a + DigitalOcean DBaaS OpenSearch cluster to use as a + log forwarding destination. Cannot be specified + if ``endpoint`` is also specified. + "endpoint": + "str", # Optional. OpenSearch API Endpoint. Only + HTTPS is supported. Format: + https://:code:``::code:``. Cannot be + specified if ``cluster_name`` is also specified. + "index_name": + "logs" # Optional. Default value is "logs". The + index name to use for the logs. If not set, the + default index name is "logs". + }, + "papertrail": { + "endpoint": + "str" # Papertrail syslog endpoint. Required. + } + } + ], + "name": "str", # Optional. The name. + Must be unique across all components within the same app. + "run_command": "str", # Optional. An + optional run command to override the component's default. + "source_dir": "str", # Optional. An + optional path to the working directory to use for the build. + For Dockerfile builds, this will be used as the build + context. Must be relative to the root of the repo. + "termination": { + "grace_period_seconds": 0 # + Optional. The number of seconds to wait between sending a + TERM signal to a container and issuing a KILL which + causes immediate shutdown. (Default 120). } - ], - "expose_headers": [ - "str" # Optional. The set of HTTP - response headers that browsers are allowed to access. This - configures the ``Access-Control-Expose-Headers`` header. - ], - "max_age": "str" # Optional. An optional - duration specifying how long browsers can cache the results of a - preflight request. This configures the ``Access-Control-Max-Age`` - header. - }, - "dockerfile_path": "str", # Optional. The path to - the Dockerfile relative to the root of the repo. If set, it will be - used to build this component. Otherwise, App Platform will attempt to - build it using buildpacks. - "environment_slug": "str", # Optional. An - environment slug describing the type of this app. For a full list, - please refer to `the product documentation - `_. - "envs": [ - { - "key": "str", # The variable name. - Required. - "scope": "RUN_AND_BUILD_TIME", # - Optional. Default value is "RUN_AND_BUILD_TIME". * RUN_TIME: - Made available only at run-time * BUILD_TIME: Made available - only at build-time * RUN_AND_BUILD_TIME: Made available at - both build and run-time. Known values are: "UNSET", - "RUN_TIME", "BUILD_TIME", and "RUN_AND_BUILD_TIME". - "type": "GENERAL", # Optional. - Default value is "GENERAL". * GENERAL: A plain-text - environment variable * SECRET: A secret encrypted environment - variable. Known values are: "GENERAL" and "SECRET". - "value": "str" # Optional. The - value. If the type is ``SECRET``"" , the value will be - encrypted on first submission. On following submissions, the - encrypted value should be used. } ], - "error_document": "404.html", # Optional. Default - value is "404.html". The name of the error document to use when - serving this static site. Default: 404.html. If no such file exists - within the built assets, App Platform will supply one. - "git": { - "branch": "str", # Optional. The name of the - branch to use. - "repo_clone_url": "str" # Optional. The - clone URL of the repo. Example: - ``https://github.com/digitalocean/sample-golang.git``. - }, - "github": { - "branch": "str", # Optional. The name of the - branch to use. - "deploy_on_push": bool, # Optional. Whether - to automatically deploy new commits made to the repo. - "repo": "str" # Optional. The name of the - repo in the format owner/repo. Example: - ``digitalocean/sample-golang``. - }, - "gitlab": { - "branch": "str", # Optional. The name of the - branch to use. - "deploy_on_push": bool, # Optional. Whether - to automatically deploy new commits made to the repo. - "repo": "str" # Optional. The name of the - repo in the format owner/repo. Example: - ``digitalocean/sample-golang``. - }, - "image": { - "deploy_on_push": { - "enabled": bool # Optional. Whether - to automatically deploy new images. Can only be used for - images hosted in DOCR and can only be used with an image tag, - not a specific digest. - }, - "digest": "str", # Optional. The image - digest. Cannot be specified if tag is provided. - "registry": "str", # Optional. The registry - name. Must be left empty for the ``DOCR`` registry type. - "registry_credentials": "str", # Optional. - The credentials to be able to pull the image. The value will be - encrypted on first submission. On following submissions, the - encrypted value should be used. * "$username:$access_token" for - registries of type ``DOCKER_HUB``. * "$username:$access_token" - for registries of type ``GHCR``. - "registry_type": "str", # Optional. * - DOCKER_HUB: The DockerHub container registry type. * DOCR: The - DigitalOcean container registry type. * GHCR: The Github - container registry type. Known values are: "DOCKER_HUB", "DOCR", - and "GHCR". - "repository": "str", # Optional. The - repository name. - "tag": "latest" # Optional. Default value is - "latest". The repository tag. Defaults to ``latest`` if not - provided and no digest is provided. Cannot be specified if digest - is provided. + "maintenance": { + "archive": bool, # Optional. Indicates + whether the app should be archived. Setting this to true implies + that enabled is set to true. + "enabled": bool, # Optional. Indicates + whether maintenance mode should be enabled for the app. + "offline_page_url": "str" # Optional. A + custom offline page to display when maintenance mode is enabled + or the app is archived. }, - "index_document": "index.html", # Optional. Default - value is "index.html". The name of the index document to use when - serving this static site. Default: index.html. - "log_destinations": [ + "region": "str", # Optional. The slug form of the + geographical origin of the app. Default: ``nearest available``. Known + values are: "atl", "nyc", "sfo", "tor", "ams", "fra", "lon", "blr", + "sgp", and "syd". + "services": [ { - "name": "str", # Required. - "datadog": { - "api_key": "str", # Datadog - API key. Required. - "endpoint": "str" # - Optional. Datadog HTTP log intake endpoint. + "autoscaling": { + "max_instance_count": 0, # + Optional. The maximum amount of instances for this + component. Must be more than min_instance_count. + "metrics": { + "cpu": { + "percent": 80 + # Optional. Default value is 80. The average + target CPU utilization for the component. + } + }, + "min_instance_count": 0 # + Optional. The minimum amount of instances for this + component. Must be less than max_instance_count. }, - "logtail": { - "token": "str" # Optional. - Logtail token. + "bitbucket": { + "branch": "str", # Optional. + The name of the branch to use. + "deploy_on_push": bool, # + Optional. Whether to automatically deploy new commits + made to the repo. + "repo": "str" # Optional. + The name of the repo in the format owner/repo. Example: + ``digitalocean/sample-golang``. }, - "open_search": { - "basic_auth": { - "password": "str", # - Optional. Password for user defined in User. Is - required when ``endpoint`` is set. Cannot be set if - using a DigitalOcean DBaaS OpenSearch cluster. - "user": "str" # - Optional. Username to authenticate with. Only - required when ``endpoint`` is set. Defaults to - ``doadmin`` when ``cluster_name`` is set. + "build_command": "str", # Optional. + An optional build command to run while building this + component from source. + "cors": { + "allow_credentials": bool, # + Optional. Whether browsers should expose the response to + the client-side JavaScript code when the request"u2019s + credentials mode is include. This configures the + ``Access-Control-Allow-Credentials`` header. + "allow_headers": [ + "str" # Optional. + The set of allowed HTTP request headers. This + configures the ``Access-Control-Allow-Headers`` + header. + ], + "allow_methods": [ + "str" # Optional. + The set of allowed HTTP methods. This configures the + ``Access-Control-Allow-Methods`` header. + ], + "allow_origins": [ + { + "exact": + "str", # Optional. Exact string match. Only 1 of + ``exact``"" , ``prefix``"" , or ``regex`` must be + set. + "prefix": + "str", # Optional. Prefix-based match. Only 1 of + ``exact``"" , ``prefix``"" , or ``regex`` must be + set. + "regex": + "str" # Optional. RE2 style regex-based match. + Only 1 of ``exact``"" , ``prefix``"" , or + ``regex`` must be set. For more information about + RE2 syntax, see: + https://github.com/google/re2/wiki/Syntax. + } + ], + "expose_headers": [ + "str" # Optional. + The set of HTTP response headers that browsers are + allowed to access. This configures the + ``Access-Control-Expose-Headers`` header. + ], + "max_age": "str" # Optional. + An optional duration specifying how long browsers can + cache the results of a preflight request. This configures + the ``Access-Control-Max-Age`` header. + }, + "dockerfile_path": "str", # + Optional. The path to the Dockerfile relative to the root of + the repo. If set, it will be used to build this component. + Otherwise, App Platform will attempt to build it using + buildpacks. + "environment_slug": "str", # + Optional. An environment slug describing the type of this + app. For a full list, please refer to `the product + documentation + `_. + "envs": [ + { + "key": "str", # The + variable name. Required. + "scope": + "RUN_AND_BUILD_TIME", # Optional. Default value is + "RUN_AND_BUILD_TIME". * RUN_TIME: Made available only + at run-time * BUILD_TIME: Made available only at + build-time * RUN_AND_BUILD_TIME: Made available at + both build and run-time. Known values are: "UNSET", + "RUN_TIME", "BUILD_TIME", and "RUN_AND_BUILD_TIME". + "type": "GENERAL", # + Optional. Default value is "GENERAL". * GENERAL: A + plain-text environment variable * SECRET: A secret + encrypted environment variable. Known values are: + "GENERAL" and "SECRET". + "value": "str" # + Optional. The value. If the type is ``SECRET``"" , + the value will be encrypted on first submission. On + following submissions, the encrypted value should be + used. + } + ], + "git": { + "branch": "str", # Optional. + The name of the branch to use. + "repo_clone_url": "str" # + Optional. The clone URL of the repo. Example: + ``https://github.com/digitalocean/sample-golang.git``. + }, + "github": { + "branch": "str", # Optional. + The name of the branch to use. + "deploy_on_push": bool, # + Optional. Whether to automatically deploy new commits + made to the repo. + "repo": "str" # Optional. + The name of the repo in the format owner/repo. Example: + ``digitalocean/sample-golang``. + }, + "gitlab": { + "branch": "str", # Optional. + The name of the branch to use. + "deploy_on_push": bool, # + Optional. Whether to automatically deploy new commits + made to the repo. + "repo": "str" # Optional. + The name of the repo in the format owner/repo. Example: + ``digitalocean/sample-golang``. + }, + "health_check": { + "failure_threshold": 0, # + Optional. The number of failed health checks before + considered unhealthy. + "http_path": "str", # + Optional. The route path used for the HTTP health check + ping. If not set, the HTTP health check will be disabled + and a TCP health check used instead. + "initial_delay_seconds": 0, + # Optional. The number of seconds to wait before + beginning health checks. + "period_seconds": 0, # + Optional. The number of seconds to wait between health + checks. + "port": 0, # Optional. The + port on which the health check will be performed. If not + set, the health check will be performed on the + component's http_port. + "success_threshold": 0, # + Optional. The number of successful health checks before + considered healthy. + "timeout_seconds": 0 # + Optional. The number of seconds after which the check + times out. + }, + "http_port": 0, # Optional. The + internal port on which this service's run command will + listen. Default: 8080 If there is not an environment variable + with the name ``PORT``"" , one will be automatically added + with its value set to the value of this field. + "image": { + "deploy_on_push": { + "enabled": bool # + Optional. Whether to automatically deploy new images. + Can only be used for images hosted in DOCR and can + only be used with an image tag, not a specific + digest. }, - "cluster_name": "str", # - Optional. The name of a DigitalOcean DBaaS OpenSearch - cluster to use as a log forwarding destination. Cannot be - specified if ``endpoint`` is also specified. - "endpoint": "str", # - Optional. OpenSearch API Endpoint. Only HTTPS is - supported. Format: https://:code:``::code:``. - Cannot be specified if ``cluster_name`` is also - specified. - "index_name": "logs" # - Optional. Default value is "logs". The index name to use - for the logs. If not set, the default index name is - "logs". + "digest": "str", # Optional. + The image digest. Cannot be specified if tag is provided. + "registry": "str", # + Optional. The registry name. Must be left empty for the + ``DOCR`` registry type. + "registry_credentials": + "str", # Optional. The credentials to be able to pull + the image. The value will be encrypted on first + submission. On following submissions, the encrypted value + should be used. * "$username:$access_token" for + registries of type ``DOCKER_HUB``. * + "$username:$access_token" for registries of type + ``GHCR``. + "registry_type": "str", # + Optional. * DOCKER_HUB: The DockerHub container registry + type. * DOCR: The DigitalOcean container registry type. * + GHCR: The Github container registry type. Known values + are: "DOCKER_HUB", "DOCR", and "GHCR". + "repository": "str", # + Optional. The repository name. + "tag": "latest" # Optional. + Default value is "latest". The repository tag. Defaults + to ``latest`` if not provided and no digest is provided. + Cannot be specified if digest is provided. }, - "papertrail": { - "endpoint": "str" # - Papertrail syslog endpoint. Required. + "instance_count": 1, # Optional. + Default value is 1. The amount of instances that this + component should be scaled to. Default: 1. Must not be set if + autoscaling is used. + "instance_size_slug": {}, + "internal_ports": [ + 0 # Optional. The ports on + which this service will listen for internal traffic. + ], + "liveness_health_check": { + "failure_threshold": 0, # + Optional. The number of failed health checks before + considered unhealthy. + "http_path": "str", # + Optional. The route path used for the HTTP health check + ping. If not set, the HTTP health check will be disabled + and a TCP health check used instead. + "initial_delay_seconds": 0, + # Optional. The number of seconds to wait before + beginning health checks. + "period_seconds": 0, # + Optional. The number of seconds to wait between health + checks. + "port": 0, # Optional. The + port on which the health check will be performed. + "success_threshold": 0, # + Optional. The number of successful health checks before + considered healthy. + "timeout_seconds": 0 # + Optional. The number of seconds after which the check + times out. + }, + "log_destinations": [ + { + "name": "str", # + Required. + "datadog": { + "api_key": + "str", # Datadog API key. Required. + "endpoint": + "str" # Optional. Datadog HTTP log intake + endpoint. + }, + "logtail": { + "token": + "str" # Optional. Logtail token. + }, + "open_search": { + "basic_auth": + { + "password": "str", # Optional. Password for + user defined in User. Is required when + ``endpoint`` is set. Cannot be set if using a + DigitalOcean DBaaS OpenSearch cluster. + "user": "str" # Optional. Username to + authenticate with. Only required when + ``endpoint`` is set. Defaults to ``doadmin`` + when ``cluster_name`` is set. + }, + "cluster_name": "str", # Optional. The name of a + DigitalOcean DBaaS OpenSearch cluster to use as a + log forwarding destination. Cannot be specified + if ``endpoint`` is also specified. + "endpoint": + "str", # Optional. OpenSearch API Endpoint. Only + HTTPS is supported. Format: + https://:code:``::code:``. Cannot be + specified if ``cluster_name`` is also specified. + "index_name": + "logs" # Optional. Default value is "logs". The + index name to use for the logs. If not set, the + default index name is "logs". + }, + "papertrail": { + "endpoint": + "str" # Papertrail syslog endpoint. Required. + } + } + ], + "name": "str", # Optional. The name. + Must be unique across all components within the same app. + "protocol": "str", # Optional. The + protocol which the service uses to serve traffic on the + http_port. * ``HTTP``"" : The app is serving the HTTP + protocol. Default. * ``HTTP2``"" : The app is serving the + HTTP/2 protocol. Currently, this needs to be implemented in + the service by serving HTTP/2 cleartext (h2c). Known values + are: "HTTP" and "HTTP2". + "routes": [ + { + "path": "str", # + Optional. (Deprecated - Use Ingress Rules instead). + An HTTP path prefix. Paths must start with / and must + be unique across all components within an app. + "preserve_path_prefix": bool # Optional. An optional + flag to preserve the path that is forwarded to the + backend service. By default, the HTTP request path + will be trimmed from the left when forwarded to the + component. For example, a component with + ``path=/api`` will have requests to ``/api/list`` + trimmed to ``/list``. If this value is ``true``"" , + the path will remain ``/api/list``. + } + ], + "run_command": "str", # Optional. An + optional run command to override the component's default. + "source_dir": "str", # Optional. An + optional path to the working directory to use for the build. + For Dockerfile builds, this will be used as the build + context. Must be relative to the root of the repo. + "termination": { + "drain_seconds": 0, # + Optional. The number of seconds to wait between selecting + a container instance for termination and issuing the TERM + signal. Selecting a container instance for termination + begins an asynchronous drain of new requests on upstream + load-balancers. (Default 15). + "grace_period_seconds": 0 # + Optional. The number of seconds to wait between sending a + TERM signal to a container and issuing a KILL which + causes immediate shutdown. (Default 120). } } ], - "name": "str", # Optional. The name. Must be unique - across all components within the same app. - "output_dir": "str", # Optional. An optional path to - where the built assets will be located, relative to the build - context. If not set, App Platform will automatically scan for these - directory names: ``_static``"" , ``dist``"" , ``public``"" , - ``build``. - "routes": [ + "static_sites": [ { - "path": "str", # Optional. - (Deprecated - Use Ingress Rules instead). An HTTP path - prefix. Paths must start with / and must be unique across all - components within an app. - "preserve_path_prefix": bool # - Optional. An optional flag to preserve the path that is - forwarded to the backend service. By default, the HTTP - request path will be trimmed from the left when forwarded to - the component. For example, a component with ``path=/api`` - will have requests to ``/api/list`` trimmed to ``/list``. If - this value is ``true``"" , the path will remain - ``/api/list``. - } - ], - "run_command": "str", # Optional. An optional run - command to override the component's default. - "source_dir": "str" # Optional. An optional path to - the working directory to use for the build. For Dockerfile builds, - this will be used as the build context. Must be relative to the root - of the repo. - } - ], - "vpc": { - "egress_ips": [ - { - "ip": "str" # Optional. The egress ips - associated with the VPC. - } - ], - "id": "str" # Optional. The ID of the VPC. - }, - "workers": [ - { - "autoscaling": { - "max_instance_count": 0, # Optional. The - maximum amount of instances for this component. Must be more than - min_instance_count. - "metrics": { - "cpu": { - "percent": 80 # Optional. - Default value is 80. The average target CPU utilization - for the component. - } - }, - "min_instance_count": 0 # Optional. The - minimum amount of instances for this component. Must be less than - max_instance_count. - }, - "bitbucket": { - "branch": "str", # Optional. The name of the - branch to use. - "deploy_on_push": bool, # Optional. Whether - to automatically deploy new commits made to the repo. - "repo": "str" # Optional. The name of the - repo in the format owner/repo. Example: - ``digitalocean/sample-golang``. - }, - "build_command": "str", # Optional. An optional - build command to run while building this component from source. - "dockerfile_path": "str", # Optional. The path to - the Dockerfile relative to the root of the repo. If set, it will be - used to build this component. Otherwise, App Platform will attempt to - build it using buildpacks. - "environment_slug": "str", # Optional. An - environment slug describing the type of this app. For a full list, - please refer to `the product documentation - `_. - "envs": [ - { - "key": "str", # The variable name. - Required. - "scope": "RUN_AND_BUILD_TIME", # - Optional. Default value is "RUN_AND_BUILD_TIME". * RUN_TIME: - Made available only at run-time * BUILD_TIME: Made available - only at build-time * RUN_AND_BUILD_TIME: Made available at - both build and run-time. Known values are: "UNSET", - "RUN_TIME", "BUILD_TIME", and "RUN_AND_BUILD_TIME". - "type": "GENERAL", # Optional. - Default value is "GENERAL". * GENERAL: A plain-text - environment variable * SECRET: A secret encrypted environment - variable. Known values are: "GENERAL" and "SECRET". - "value": "str" # Optional. The - value. If the type is ``SECRET``"" , the value will be - encrypted on first submission. On following submissions, the - encrypted value should be used. + "bitbucket": { + "branch": "str", # Optional. + The name of the branch to use. + "deploy_on_push": bool, # + Optional. Whether to automatically deploy new commits + made to the repo. + "repo": "str" # Optional. + The name of the repo in the format owner/repo. Example: + ``digitalocean/sample-golang``. + }, + "build_command": "str", # Optional. + An optional build command to run while building this + component from source. + "catchall_document": "str", # + Optional. The name of the document to use as the fallback for + any requests to documents that are not found when serving + this static site. Only 1 of ``catchall_document`` or + ``error_document`` can be set. + "cors": { + "allow_credentials": bool, # + Optional. Whether browsers should expose the response to + the client-side JavaScript code when the request"u2019s + credentials mode is include. This configures the + ``Access-Control-Allow-Credentials`` header. + "allow_headers": [ + "str" # Optional. + The set of allowed HTTP request headers. This + configures the ``Access-Control-Allow-Headers`` + header. + ], + "allow_methods": [ + "str" # Optional. + The set of allowed HTTP methods. This configures the + ``Access-Control-Allow-Methods`` header. + ], + "allow_origins": [ + { + "exact": + "str", # Optional. Exact string match. Only 1 of + ``exact``"" , ``prefix``"" , or ``regex`` must be + set. + "prefix": + "str", # Optional. Prefix-based match. Only 1 of + ``exact``"" , ``prefix``"" , or ``regex`` must be + set. + "regex": + "str" # Optional. RE2 style regex-based match. + Only 1 of ``exact``"" , ``prefix``"" , or + ``regex`` must be set. For more information about + RE2 syntax, see: + https://github.com/google/re2/wiki/Syntax. + } + ], + "expose_headers": [ + "str" # Optional. + The set of HTTP response headers that browsers are + allowed to access. This configures the + ``Access-Control-Expose-Headers`` header. + ], + "max_age": "str" # Optional. + An optional duration specifying how long browsers can + cache the results of a preflight request. This configures + the ``Access-Control-Max-Age`` header. + }, + "dockerfile_path": "str", # + Optional. The path to the Dockerfile relative to the root of + the repo. If set, it will be used to build this component. + Otherwise, App Platform will attempt to build it using + buildpacks. + "environment_slug": "str", # + Optional. An environment slug describing the type of this + app. For a full list, please refer to `the product + documentation + `_. + "envs": [ + { + "key": "str", # The + variable name. Required. + "scope": + "RUN_AND_BUILD_TIME", # Optional. Default value is + "RUN_AND_BUILD_TIME". * RUN_TIME: Made available only + at run-time * BUILD_TIME: Made available only at + build-time * RUN_AND_BUILD_TIME: Made available at + both build and run-time. Known values are: "UNSET", + "RUN_TIME", "BUILD_TIME", and "RUN_AND_BUILD_TIME". + "type": "GENERAL", # + Optional. Default value is "GENERAL". * GENERAL: A + plain-text environment variable * SECRET: A secret + encrypted environment variable. Known values are: + "GENERAL" and "SECRET". + "value": "str" # + Optional. The value. If the type is ``SECRET``"" , + the value will be encrypted on first submission. On + following submissions, the encrypted value should be + used. + } + ], + "error_document": "404.html", # + Optional. Default value is "404.html". The name of the error + document to use when serving this static site. Default: + 404.html. If no such file exists within the built assets, App + Platform will supply one. + "git": { + "branch": "str", # Optional. + The name of the branch to use. + "repo_clone_url": "str" # + Optional. The clone URL of the repo. Example: + ``https://github.com/digitalocean/sample-golang.git``. + }, + "github": { + "branch": "str", # Optional. + The name of the branch to use. + "deploy_on_push": bool, # + Optional. Whether to automatically deploy new commits + made to the repo. + "repo": "str" # Optional. + The name of the repo in the format owner/repo. Example: + ``digitalocean/sample-golang``. + }, + "gitlab": { + "branch": "str", # Optional. + The name of the branch to use. + "deploy_on_push": bool, # + Optional. Whether to automatically deploy new commits + made to the repo. + "repo": "str" # Optional. + The name of the repo in the format owner/repo. Example: + ``digitalocean/sample-golang``. + }, + "image": { + "deploy_on_push": { + "enabled": bool # + Optional. Whether to automatically deploy new images. + Can only be used for images hosted in DOCR and can + only be used with an image tag, not a specific + digest. + }, + "digest": "str", # Optional. + The image digest. Cannot be specified if tag is provided. + "registry": "str", # + Optional. The registry name. Must be left empty for the + ``DOCR`` registry type. + "registry_credentials": + "str", # Optional. The credentials to be able to pull + the image. The value will be encrypted on first + submission. On following submissions, the encrypted value + should be used. * "$username:$access_token" for + registries of type ``DOCKER_HUB``. * + "$username:$access_token" for registries of type + ``GHCR``. + "registry_type": "str", # + Optional. * DOCKER_HUB: The DockerHub container registry + type. * DOCR: The DigitalOcean container registry type. * + GHCR: The Github container registry type. Known values + are: "DOCKER_HUB", "DOCR", and "GHCR". + "repository": "str", # + Optional. The repository name. + "tag": "latest" # Optional. + Default value is "latest". The repository tag. Defaults + to ``latest`` if not provided and no digest is provided. + Cannot be specified if digest is provided. + }, + "index_document": "index.html", # + Optional. Default value is "index.html". The name of the + index document to use when serving this static site. Default: + index.html. + "log_destinations": [ + { + "name": "str", # + Required. + "datadog": { + "api_key": + "str", # Datadog API key. Required. + "endpoint": + "str" # Optional. Datadog HTTP log intake + endpoint. + }, + "logtail": { + "token": + "str" # Optional. Logtail token. + }, + "open_search": { + "basic_auth": + { + "password": "str", # Optional. Password for + user defined in User. Is required when + ``endpoint`` is set. Cannot be set if using a + DigitalOcean DBaaS OpenSearch cluster. + "user": "str" # Optional. Username to + authenticate with. Only required when + ``endpoint`` is set. Defaults to ``doadmin`` + when ``cluster_name`` is set. + }, + "cluster_name": "str", # Optional. The name of a + DigitalOcean DBaaS OpenSearch cluster to use as a + log forwarding destination. Cannot be specified + if ``endpoint`` is also specified. + "endpoint": + "str", # Optional. OpenSearch API Endpoint. Only + HTTPS is supported. Format: + https://:code:``::code:``. Cannot be + specified if ``cluster_name`` is also specified. + "index_name": + "logs" # Optional. Default value is "logs". The + index name to use for the logs. If not set, the + default index name is "logs". + }, + "papertrail": { + "endpoint": + "str" # Papertrail syslog endpoint. Required. + } + } + ], + "name": "str", # Optional. The name. + Must be unique across all components within the same app. + "output_dir": "str", # Optional. An + optional path to where the built assets will be located, + relative to the build context. If not set, App Platform will + automatically scan for these directory names: ``_static``"" , + ``dist``"" , ``public``"" , ``build``. + "routes": [ + { + "path": "str", # + Optional. (Deprecated - Use Ingress Rules instead). + An HTTP path prefix. Paths must start with / and must + be unique across all components within an app. + "preserve_path_prefix": bool # Optional. An optional + flag to preserve the path that is forwarded to the + backend service. By default, the HTTP request path + will be trimmed from the left when forwarded to the + component. For example, a component with + ``path=/api`` will have requests to ``/api/list`` + trimmed to ``/list``. If this value is ``true``"" , + the path will remain ``/api/list``. + } + ], + "run_command": "str", # Optional. An + optional run command to override the component's default. + "source_dir": "str" # Optional. An + optional path to the working directory to use for the build. + For Dockerfile builds, this will be used as the build + context. Must be relative to the root of the repo. } ], - "git": { - "branch": "str", # Optional. The name of the - branch to use. - "repo_clone_url": "str" # Optional. The - clone URL of the repo. Example: - ``https://github.com/digitalocean/sample-golang.git``. - }, - "github": { - "branch": "str", # Optional. The name of the - branch to use. - "deploy_on_push": bool, # Optional. Whether - to automatically deploy new commits made to the repo. - "repo": "str" # Optional. The name of the - repo in the format owner/repo. Example: - ``digitalocean/sample-golang``. - }, - "gitlab": { - "branch": "str", # Optional. The name of the - branch to use. - "deploy_on_push": bool, # Optional. Whether - to automatically deploy new commits made to the repo. - "repo": "str" # Optional. The name of the - repo in the format owner/repo. Example: - ``digitalocean/sample-golang``. - }, - "image": { - "deploy_on_push": { - "enabled": bool # Optional. Whether - to automatically deploy new images. Can only be used for - images hosted in DOCR and can only be used with an image tag, - not a specific digest. - }, - "digest": "str", # Optional. The image - digest. Cannot be specified if tag is provided. - "registry": "str", # Optional. The registry - name. Must be left empty for the ``DOCR`` registry type. - "registry_credentials": "str", # Optional. - The credentials to be able to pull the image. The value will be - encrypted on first submission. On following submissions, the - encrypted value should be used. * "$username:$access_token" for - registries of type ``DOCKER_HUB``. * "$username:$access_token" - for registries of type ``GHCR``. - "registry_type": "str", # Optional. * - DOCKER_HUB: The DockerHub container registry type. * DOCR: The - DigitalOcean container registry type. * GHCR: The Github - container registry type. Known values are: "DOCKER_HUB", "DOCR", - and "GHCR". - "repository": "str", # Optional. The - repository name. - "tag": "latest" # Optional. Default value is - "latest". The repository tag. Defaults to ``latest`` if not - provided and no digest is provided. Cannot be specified if digest - is provided. - }, - "instance_count": 1, # Optional. Default value is 1. - The amount of instances that this component should be scaled to. - Default: 1. Must not be set if autoscaling is used. - "instance_size_slug": {}, - "liveness_health_check": { - "failure_threshold": 0, # Optional. The - number of failed health checks before considered unhealthy. - "http_path": "str", # Optional. The route - path used for the HTTP health check ping. If not set, the HTTP - health check will be disabled and a TCP health check used - instead. - "initial_delay_seconds": 0, # Optional. The - number of seconds to wait before beginning health checks. - "period_seconds": 0, # Optional. The number - of seconds to wait between health checks. - "port": 0, # Optional. The port on which the - health check will be performed. - "success_threshold": 0, # Optional. The - number of successful health checks before considered healthy. - "timeout_seconds": 0 # Optional. The number - of seconds after which the check times out. + "vpc": { + "egress_ips": [ + { + "ip": "str" # Optional. The + egress ips associated with the VPC. + } + ], + "id": "str" # Optional. The ID of the VPC. }, - "log_destinations": [ + "workers": [ { - "name": "str", # Required. - "datadog": { - "api_key": "str", # Datadog - API key. Required. - "endpoint": "str" # - Optional. Datadog HTTP log intake endpoint. + "autoscaling": { + "max_instance_count": 0, # + Optional. The maximum amount of instances for this + component. Must be more than min_instance_count. + "metrics": { + "cpu": { + "percent": 80 + # Optional. Default value is 80. The average + target CPU utilization for the component. + } + }, + "min_instance_count": 0 # + Optional. The minimum amount of instances for this + component. Must be less than max_instance_count. }, - "logtail": { - "token": "str" # Optional. - Logtail token. + "bitbucket": { + "branch": "str", # Optional. + The name of the branch to use. + "deploy_on_push": bool, # + Optional. Whether to automatically deploy new commits + made to the repo. + "repo": "str" # Optional. + The name of the repo in the format owner/repo. Example: + ``digitalocean/sample-golang``. }, - "open_search": { - "basic_auth": { - "password": "str", # - Optional. Password for user defined in User. Is - required when ``endpoint`` is set. Cannot be set if - using a DigitalOcean DBaaS OpenSearch cluster. - "user": "str" # - Optional. Username to authenticate with. Only - required when ``endpoint`` is set. Defaults to - ``doadmin`` when ``cluster_name`` is set. + "build_command": "str", # Optional. + An optional build command to run while building this + component from source. + "dockerfile_path": "str", # + Optional. The path to the Dockerfile relative to the root of + the repo. If set, it will be used to build this component. + Otherwise, App Platform will attempt to build it using + buildpacks. + "environment_slug": "str", # + Optional. An environment slug describing the type of this + app. For a full list, please refer to `the product + documentation + `_. + "envs": [ + { + "key": "str", # The + variable name. Required. + "scope": + "RUN_AND_BUILD_TIME", # Optional. Default value is + "RUN_AND_BUILD_TIME". * RUN_TIME: Made available only + at run-time * BUILD_TIME: Made available only at + build-time * RUN_AND_BUILD_TIME: Made available at + both build and run-time. Known values are: "UNSET", + "RUN_TIME", "BUILD_TIME", and "RUN_AND_BUILD_TIME". + "type": "GENERAL", # + Optional. Default value is "GENERAL". * GENERAL: A + plain-text environment variable * SECRET: A secret + encrypted environment variable. Known values are: + "GENERAL" and "SECRET". + "value": "str" # + Optional. The value. If the type is ``SECRET``"" , + the value will be encrypted on first submission. On + following submissions, the encrypted value should be + used. + } + ], + "git": { + "branch": "str", # Optional. + The name of the branch to use. + "repo_clone_url": "str" # + Optional. The clone URL of the repo. Example: + ``https://github.com/digitalocean/sample-golang.git``. + }, + "github": { + "branch": "str", # Optional. + The name of the branch to use. + "deploy_on_push": bool, # + Optional. Whether to automatically deploy new commits + made to the repo. + "repo": "str" # Optional. + The name of the repo in the format owner/repo. Example: + ``digitalocean/sample-golang``. + }, + "gitlab": { + "branch": "str", # Optional. + The name of the branch to use. + "deploy_on_push": bool, # + Optional. Whether to automatically deploy new commits + made to the repo. + "repo": "str" # Optional. + The name of the repo in the format owner/repo. Example: + ``digitalocean/sample-golang``. + }, + "image": { + "deploy_on_push": { + "enabled": bool # + Optional. Whether to automatically deploy new images. + Can only be used for images hosted in DOCR and can + only be used with an image tag, not a specific + digest. }, - "cluster_name": "str", # - Optional. The name of a DigitalOcean DBaaS OpenSearch - cluster to use as a log forwarding destination. Cannot be - specified if ``endpoint`` is also specified. - "endpoint": "str", # - Optional. OpenSearch API Endpoint. Only HTTPS is - supported. Format: https://:code:``::code:``. - Cannot be specified if ``cluster_name`` is also - specified. - "index_name": "logs" # - Optional. Default value is "logs". The index name to use - for the logs. If not set, the default index name is - "logs". + "digest": "str", # Optional. + The image digest. Cannot be specified if tag is provided. + "registry": "str", # + Optional. The registry name. Must be left empty for the + ``DOCR`` registry type. + "registry_credentials": + "str", # Optional. The credentials to be able to pull + the image. The value will be encrypted on first + submission. On following submissions, the encrypted value + should be used. * "$username:$access_token" for + registries of type ``DOCKER_HUB``. * + "$username:$access_token" for registries of type + ``GHCR``. + "registry_type": "str", # + Optional. * DOCKER_HUB: The DockerHub container registry + type. * DOCR: The DigitalOcean container registry type. * + GHCR: The Github container registry type. Known values + are: "DOCKER_HUB", "DOCR", and "GHCR". + "repository": "str", # + Optional. The repository name. + "tag": "latest" # Optional. + Default value is "latest". The repository tag. Defaults + to ``latest`` if not provided and no digest is provided. + Cannot be specified if digest is provided. }, - "papertrail": { - "endpoint": "str" # - Papertrail syslog endpoint. Required. + "instance_count": 1, # Optional. + Default value is 1. The amount of instances that this + component should be scaled to. Default: 1. Must not be set if + autoscaling is used. + "instance_size_slug": {}, + "liveness_health_check": { + "failure_threshold": 0, # + Optional. The number of failed health checks before + considered unhealthy. + "http_path": "str", # + Optional. The route path used for the HTTP health check + ping. If not set, the HTTP health check will be disabled + and a TCP health check used instead. + "initial_delay_seconds": 0, + # Optional. The number of seconds to wait before + beginning health checks. + "period_seconds": 0, # + Optional. The number of seconds to wait between health + checks. + "port": 0, # Optional. The + port on which the health check will be performed. + "success_threshold": 0, # + Optional. The number of successful health checks before + considered healthy. + "timeout_seconds": 0 # + Optional. The number of seconds after which the check + times out. + }, + "log_destinations": [ + { + "name": "str", # + Required. + "datadog": { + "api_key": + "str", # Datadog API key. Required. + "endpoint": + "str" # Optional. Datadog HTTP log intake + endpoint. + }, + "logtail": { + "token": + "str" # Optional. Logtail token. + }, + "open_search": { + "basic_auth": + { + "password": "str", # Optional. Password for + user defined in User. Is required when + ``endpoint`` is set. Cannot be set if using a + DigitalOcean DBaaS OpenSearch cluster. + "user": "str" # Optional. Username to + authenticate with. Only required when + ``endpoint`` is set. Defaults to ``doadmin`` + when ``cluster_name`` is set. + }, + "cluster_name": "str", # Optional. The name of a + DigitalOcean DBaaS OpenSearch cluster to use as a + log forwarding destination. Cannot be specified + if ``endpoint`` is also specified. + "endpoint": + "str", # Optional. OpenSearch API Endpoint. Only + HTTPS is supported. Format: + https://:code:``::code:``. Cannot be + specified if ``cluster_name`` is also specified. + "index_name": + "logs" # Optional. Default value is "logs". The + index name to use for the logs. If not set, the + default index name is "logs". + }, + "papertrail": { + "endpoint": + "str" # Papertrail syslog endpoint. Required. + } + } + ], + "name": "str", # Optional. The name. + Must be unique across all components within the same app. + "run_command": "str", # Optional. An + optional run command to override the component's default. + "source_dir": "str", # Optional. An + optional path to the working directory to use for the build. + For Dockerfile builds, this will be used as the build + context. Must be relative to the root of the repo. + "termination": { + "grace_period_seconds": 0 # + Optional. The number of seconds to wait between sending a + TERM signal to a container and issuing a KILL which + causes immediate shutdown. (Default 120). } } - ], - "name": "str", # Optional. The name. Must be unique - across all components within the same app. - "run_command": "str", # Optional. An optional run - command to override the component's default. - "source_dir": "str", # Optional. An optional path to - the working directory to use for the build. For Dockerfile builds, - this will be used as the build context. Must be relative to the root - of the repo. - "termination": { - "grace_period_seconds": 0 # Optional. The - number of seconds to wait between sending a TERM signal to a - container and issuing a KILL which causes immediate shutdown. - (Default 120). + ] + }, + "static_sites": [ + { + "name": "str", # Optional. The name of this + static site. + "source_commit_hash": "str" # Optional. The + commit hash of the repository that was used to build this static + site. } - } - ] + ], + "tier_slug": "str", # Optional. The current pricing tier + slug of the deployment. + "updated_at": "2020-02-20 00:00:00", # Optional. When the + deployment was last updated. + "workers": [ + { + "name": "str", # Optional. The name of this + worker. + "source_commit_hash": "str" # Optional. The + commit hash of the repository that was used to build this worker. + } + ] + }, + "deployment_id": "str", # Optional. For deployment events, this is + the same as the deployment's ID. For autoscaling events, this is the + deployment that was autoscaled. + "id": "str", # Optional. The ID of the event (UUID). + "type": "str" # Optional. The type of event. Known values are: + "UNKNOWN", "DEPLOYMENT", and "AUTOSCALING". } } + # response body for status code(s): 404 + response == { + "id": "str", # A short identifier corresponding to the HTTP status code + returned. For example, the ID for a response returning a 404 status code would + be "not_found.". Required. + "message": "str", # A message providing additional information about the + error, including details to help resolve it when possible. Required. + "request_id": "str" # Optional. Optionally, some endpoints may include a + request ID that should be provided when reporting bugs or opening support + tickets to help identify the issue. + } """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + 401: cast( + Type[HttpResponseError], + lambda response: ClientAuthenticationError(response=response), + ), + 429: HttpResponseError, + 500: HttpResponseError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[JSON] = kwargs.pop("cls", None) + + _request = build_apps_cancel_event_request( + app_id=app_id, + event_id=event_id, + headers=_headers, + params=_params, + ) + _request.url = self._client.format_url(_request.url) + + _stream = False + pipeline_response: PipelineResponse = ( + self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + ) + + response = pipeline_response.http_response + + if response.status_code not in [200, 404]: + if _stream: + response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore + raise HttpResponseError(response=response) + + response_headers = {} + if response.status_code == 200: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.content: + deserialized = response.json() + else: + deserialized = None + + if response.status_code == 404: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.content: + deserialized = response.json() + else: + deserialized = None + + if cls: + return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore + + return cast(JSON, deserialized) # type: ignore @distributed_trace - def validate_app_spec(self, body: Union[JSON, IO[bytes]], **kwargs: Any) -> JSON: + def get_event_logs( + self, + app_id: str, + event_id: str, + *, + follow: Optional[bool] = None, + type: str = "UNSPECIFIED", + pod_connection_timeout: Optional[str] = None, + **kwargs: Any, + ) -> JSON: # pylint: disable=line-too-long - """Propose an App Spec. + """Retrieve Event Logs. - To propose and validate a spec for a new or existing app, send a POST request to the - ``/v2/apps/propose`` endpoint. The request returns some information about the proposed app, - including app cost and upgrade cost. If an existing app ID is specified, the app spec is - treated as a proposed update to the existing app. + Retrieve the logs of an autoscaling event for an app. - :param body: Is either a JSON type or a IO[bytes] type. Required. - :type body: JSON or IO[bytes] + :param app_id: The app ID. Required. + :type app_id: str + :param event_id: The event ID. Required. + :type event_id: str + :keyword follow: Whether the logs should follow live updates. Default value is None. + :paramtype follow: bool + :keyword type: The type of logs to retrieve + + + * BUILD: Build-time logs + * DEPLOY: Deploy-time logs + * RUN: Live run-time logs + * RUN_RESTARTED: Logs of crashed/restarted instances during runtime + * AUTOSCALE_EVENT: Logs of an autoscaling event (requires event_id). Known values are: + "UNSPECIFIED", "BUILD", "DEPLOY", "RUN", "RUN_RESTARTED", and "AUTOSCALE_EVENT". Default value + is "UNSPECIFIED". + :paramtype type: str + :keyword pod_connection_timeout: An optional time duration to wait if the underlying component + instance is not immediately available. Default: ``3m``. Default value is None. + :paramtype pod_connection_timeout: str :return: JSON object :rtype: JSON :raises ~azure.core.exceptions.HttpResponseError: @@ -104213,247 +105967,720 @@ def validate_app_spec(self, body: Union[JSON, IO[bytes]], **kwargs: Any) -> JSON Example: .. code-block:: python - # JSON input template you can fill out and use as your body input. - body = { - "spec": { - "name": "str", # The name of the app. Must be unique across all apps - in the same account. Required. - "databases": [ - { - "name": "str", # The database's name. The name must - be unique across all components within the same app and cannot use - capital letters. Required. - "cluster_name": "str", # Optional. The name of the - underlying DigitalOcean DBaaS cluster. This is required for - production databases. For dev databases, if cluster_name is not set, - a new cluster will be provisioned. - "db_name": "str", # Optional. The name of the MySQL - or PostgreSQL database to configure. - "db_user": "str", # Optional. The name of the MySQL - or PostgreSQL user to configure. - "engine": "UNSET", # Optional. Default value is - "UNSET". * MYSQL: MySQL * PG: PostgreSQL * REDIS: Caching * MONGODB: - MongoDB * KAFKA: Kafka * OPENSEARCH: OpenSearch * VALKEY: ValKey. - Known values are: "UNSET", "MYSQL", "PG", "REDIS", "MONGODB", - "KAFKA", "OPENSEARCH", and "VALKEY". - "production": bool, # Optional. Whether this is a - production or dev database. - "version": "str" # Optional. The version of the - database engine. - } - ], - "disable_edge_cache": False, # Optional. Default value is False. .. - role:: raw-html-m2r(raw) :format: html If set to ``true``"" , the app - will **not** be cached at the edge (CDN). Enable this option if you want to - manage CDN configuration yourself"u2014whether by using an external CDN - provider or by handling static content and caching within your app. This - setting is also recommended for apps that require real-time data or serve - dynamic content, such as those using Server-Sent Events (SSE) over GET, or - hosting an MCP (Model Context Protocol) Server that utilizes SSE."" - :raw-html-m2r:`
` **Note:** This feature is not available for static site - components."" :raw-html-m2r:`
` For more information, see `Disable CDN - Cache - `_. - "disable_email_obfuscation": False, # Optional. Default value is - False. If set to ``true``"" , email addresses in the app will not be - obfuscated. This is useful for apps that require email addresses to be - visible (in the HTML markup). - "domains": [ - { - "domain": "str", # The hostname for the domain. - Required. - "minimum_tls_version": "str", # Optional. The - minimum version of TLS a client application can use to access - resources for the domain. Must be one of the following values - wrapped within quotations: ``"1.2"`` or ``"1.3"``. Known values are: - "1.2" and "1.3". - "type": "UNSPECIFIED", # Optional. Default value is - "UNSPECIFIED". * DEFAULT: The default ``.ondigitalocean.app`` domain - assigned to this app * PRIMARY: The primary domain for this app that - is displayed as the default in the control panel, used in bindable - environment variables, and any other places that reference an app's - live URL. Only one domain may be set as primary. * ALIAS: A - non-primary domain. Known values are: "UNSPECIFIED", "DEFAULT", - "PRIMARY", and "ALIAS". - "wildcard": bool, # Optional. Indicates whether the - domain includes all sub-domains, in addition to the given domain. - "zone": "str" # Optional. Optional. If the domain - uses DigitalOcean DNS and you would like App Platform to - automatically manage it for you, set this to the name of the domain - on your account. For example, If the domain you are adding is - ``app.domain.com``"" , the zone could be ``domain.com``. - } - ], - "egress": { - "type": "AUTOASSIGN" # Optional. Default value is - "AUTOASSIGN". The app egress type. Known values are: "AUTOASSIGN" and - "DEDICATED_IP". - }, - "enhanced_threat_control_enabled": False, # Optional. Default value - is False. If set to ``true``"" , suspicious requests will go through - additional security checks to help mitigate layer 7 DDoS attacks. - "functions": [ - { - "name": "str", # The name. Must be unique across all - components within the same app. Required. - "alerts": [ - { - "disabled": bool, # Optional. Is the - alert disabled?. - "operator": "UNSPECIFIED_OPERATOR", - # Optional. Default value is "UNSPECIFIED_OPERATOR". Known - values are: "UNSPECIFIED_OPERATOR", "GREATER_THAN", and - "LESS_THAN". - "rule": "UNSPECIFIED_RULE", # - Optional. Default value is "UNSPECIFIED_RULE". Known values - are: "UNSPECIFIED_RULE", "CPU_UTILIZATION", - "MEM_UTILIZATION", "RESTART_COUNT", "DEPLOYMENT_FAILED", - "DEPLOYMENT_LIVE", "DOMAIN_FAILED", "DOMAIN_LIVE", - "AUTOSCALE_FAILED", "AUTOSCALE_SUCCEEDED", - "FUNCTIONS_ACTIVATION_COUNT", - "FUNCTIONS_AVERAGE_DURATION_MS", - "FUNCTIONS_ERROR_RATE_PER_MINUTE", - "FUNCTIONS_AVERAGE_WAIT_TIME_MS", "FUNCTIONS_ERROR_COUNT", - and "FUNCTIONS_GB_RATE_PER_SECOND". - "value": 0.0, # Optional. Threshold - value for alert. - "window": "UNSPECIFIED_WINDOW" # - Optional. Default value is "UNSPECIFIED_WINDOW". Known values - are: "UNSPECIFIED_WINDOW", "FIVE_MINUTES", "TEN_MINUTES", - "THIRTY_MINUTES", and "ONE_HOUR". - } - ], - "bitbucket": { - "branch": "str", # Optional. The name of the - branch to use. - "deploy_on_push": bool, # Optional. Whether - to automatically deploy new commits made to the repo. - "repo": "str" # Optional. The name of the - repo in the format owner/repo. Example: - ``digitalocean/sample-golang``. - }, - "cors": { - "allow_credentials": bool, # Optional. - Whether browsers should expose the response to the client-side - JavaScript code when the request"u2019s credentials mode is - include. This configures the ``Access-Control-Allow-Credentials`` - header. - "allow_headers": [ - "str" # Optional. The set of allowed - HTTP request headers. This configures the - ``Access-Control-Allow-Headers`` header. - ], - "allow_methods": [ - "str" # Optional. The set of allowed - HTTP methods. This configures the - ``Access-Control-Allow-Methods`` header. - ], - "allow_origins": [ - { - "exact": "str", # Optional. - Exact string match. Only 1 of ``exact``"" , ``prefix``"" - , or ``regex`` must be set. - "prefix": "str", # Optional. - Prefix-based match. Only 1 of ``exact``"" , ``prefix``"" - , or ``regex`` must be set. - "regex": "str" # Optional. - RE2 style regex-based match. Only 1 of ``exact``"" , - ``prefix``"" , or ``regex`` must be set. For more - information about RE2 syntax, see: - https://github.com/google/re2/wiki/Syntax. - } - ], - "expose_headers": [ - "str" # Optional. The set of HTTP - response headers that browsers are allowed to access. This - configures the ``Access-Control-Expose-Headers`` header. - ], - "max_age": "str" # Optional. An optional - duration specifying how long browsers can cache the results of a - preflight request. This configures the ``Access-Control-Max-Age`` - header. - }, - "envs": [ - { - "key": "str", # The variable name. - Required. - "scope": "RUN_AND_BUILD_TIME", # - Optional. Default value is "RUN_AND_BUILD_TIME". * RUN_TIME: - Made available only at run-time * BUILD_TIME: Made available - only at build-time * RUN_AND_BUILD_TIME: Made available at - both build and run-time. Known values are: "UNSET", - "RUN_TIME", "BUILD_TIME", and "RUN_AND_BUILD_TIME". - "type": "GENERAL", # Optional. - Default value is "GENERAL". * GENERAL: A plain-text - environment variable * SECRET: A secret encrypted environment - variable. Known values are: "GENERAL" and "SECRET". - "value": "str" # Optional. The - value. If the type is ``SECRET``"" , the value will be - encrypted on first submission. On following submissions, the - encrypted value should be used. - } - ], - "git": { - "branch": "str", # Optional. The name of the - branch to use. - "repo_clone_url": "str" # Optional. The - clone URL of the repo. Example: - ``https://github.com/digitalocean/sample-golang.git``. - }, - "github": { - "branch": "str", # Optional. The name of the - branch to use. - "deploy_on_push": bool, # Optional. Whether - to automatically deploy new commits made to the repo. - "repo": "str" # Optional. The name of the - repo in the format owner/repo. Example: - ``digitalocean/sample-golang``. - }, - "gitlab": { - "branch": "str", # Optional. The name of the - branch to use. - "deploy_on_push": bool, # Optional. Whether - to automatically deploy new commits made to the repo. - "repo": "str" # Optional. The name of the - repo in the format owner/repo. Example: - ``digitalocean/sample-golang``. - }, - "log_destinations": [ - { - "name": "str", # Required. - "datadog": { - "api_key": "str", # Datadog - API key. Required. - "endpoint": "str" # - Optional. Datadog HTTP log intake endpoint. - }, - "logtail": { - "token": "str" # Optional. - Logtail token. - }, - "open_search": { - "basic_auth": { - "password": "str", # - Optional. Password for user defined in User. Is - required when ``endpoint`` is set. Cannot be set if - using a DigitalOcean DBaaS OpenSearch cluster. - "user": "str" # - Optional. Username to authenticate with. Only - required when ``endpoint`` is set. Defaults to - ``doadmin`` when ``cluster_name`` is set. - }, - "cluster_name": "str", # - Optional. The name of a DigitalOcean DBaaS OpenSearch - cluster to use as a log forwarding destination. Cannot be - specified if ``endpoint`` is also specified. - "endpoint": "str", # - Optional. OpenSearch API Endpoint. Only HTTPS is - supported. Format: https://:code:``::code:``. - Cannot be specified if ``cluster_name`` is also - specified. - "index_name": "logs" # - Optional. Default value is "logs". The index name to use - for the logs. If not set, the default index name is + # response body for status code(s): 200 + response == { + "historic_urls": [ + "str" # Optional. A list of URLs to archived log files. + ], + "live_url": "str" # Optional. A URL of the real-time live logs. This URL may + use either the ``https://`` or ``wss://`` protocols and will keep pushing live + logs as they become available. + } + # response body for status code(s): 404 + response == { + "id": "str", # A short identifier corresponding to the HTTP status code + returned. For example, the ID for a response returning a 404 status code would + be "not_found.". Required. + "message": "str", # A message providing additional information about the + error, including details to help resolve it when possible. Required. + "request_id": "str" # Optional. Optionally, some endpoints may include a + request ID that should be provided when reporting bugs or opening support + tickets to help identify the issue. + } + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + 401: cast( + Type[HttpResponseError], + lambda response: ClientAuthenticationError(response=response), + ), + 429: HttpResponseError, + 500: HttpResponseError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[JSON] = kwargs.pop("cls", None) + + _request = build_apps_get_event_logs_request( + app_id=app_id, + event_id=event_id, + follow=follow, + type=type, + pod_connection_timeout=pod_connection_timeout, + headers=_headers, + params=_params, + ) + _request.url = self._client.format_url(_request.url) + + _stream = False + pipeline_response: PipelineResponse = ( + self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + ) + + response = pipeline_response.http_response + + if response.status_code not in [200, 404]: + if _stream: + response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore + raise HttpResponseError(response=response) + + response_headers = {} + if response.status_code == 200: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.content: + deserialized = response.json() + else: + deserialized = None + + if response.status_code == 404: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.content: + deserialized = response.json() + else: + deserialized = None + + if cls: + return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore + + return cast(JSON, deserialized) # type: ignore + + @distributed_trace + def list_instance_sizes(self, **kwargs: Any) -> JSON: + # pylint: disable=line-too-long + """List Instance Sizes. + + List all instance sizes for ``service``\\ , ``worker``\\ , and ``job`` components. + + :return: JSON object + :rtype: JSON + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 200 + response == { + "discount_percent": 0.0, # Optional. + "instance_sizes": [ + { + "bandwidth_allowance_gib": "str", # Optional. The bandwidth + allowance in GiB for the instance size. + "cpu_type": "UNSPECIFIED", # Optional. Default value is + "UNSPECIFIED". * SHARED: Shared vCPU cores * DEDICATED: Dedicated vCPU + cores. Known values are: "UNSPECIFIED", "SHARED", and "DEDICATED". + "cpus": "str", # Optional. The number of allotted vCPU + cores. + "deprecation_intent": bool, # Optional. Indicates if the + instance size is intended for deprecation. + "memory_bytes": "str", # Optional. The allotted memory in + bytes. + "name": "str", # Optional. A human-readable name of the + instance size. + "scalable": bool, # Optional. Indicates if the instance size + can enable autoscaling. + "single_instance_only": bool, # Optional. Indicates if the + instance size allows more than one instance. + "slug": "str", # Optional. The slug of the instance size. + "tier_downgrade_to": "str", # Optional. The slug of the + corresponding downgradable instance size on the lower tier. + "tier_slug": "str", # Optional. The slug of the tier to + which this instance size belongs. + "tier_upgrade_to": "str", # Optional. The slug of the + corresponding upgradable instance size on the higher tier. + "usd_per_month": "str", # Optional. The cost of this + instance size in USD per month. + "usd_per_second": "str" # Optional. The cost of this + instance size in USD per second. + } + ] + } + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + 401: cast( + Type[HttpResponseError], + lambda response: ClientAuthenticationError(response=response), + ), + 429: HttpResponseError, + 500: HttpResponseError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[JSON] = kwargs.pop("cls", None) + + _request = build_apps_list_instance_sizes_request( + headers=_headers, + params=_params, + ) + _request.url = self._client.format_url(_request.url) + + _stream = False + pipeline_response: PipelineResponse = ( + self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore + raise HttpResponseError(response=response) + + response_headers = {} + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.content: + deserialized = response.json() + else: + deserialized = None + + if cls: + return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore + + return cast(JSON, deserialized) # type: ignore + + @distributed_trace + def get_instance_size(self, slug: str, **kwargs: Any) -> JSON: + # pylint: disable=line-too-long + """Retrieve an Instance Size. + + Retrieve information about a specific instance size for ``service``\\ , ``worker``\\ , and + ``job`` components. + + :param slug: The slug of the instance size. Required. + :type slug: str + :return: JSON object + :rtype: JSON + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 200 + response == { + "instance_size": { + "bandwidth_allowance_gib": "str", # Optional. The bandwidth + allowance in GiB for the instance size. + "cpu_type": "UNSPECIFIED", # Optional. Default value is + "UNSPECIFIED". * SHARED: Shared vCPU cores * DEDICATED: Dedicated vCPU + cores. Known values are: "UNSPECIFIED", "SHARED", and "DEDICATED". + "cpus": "str", # Optional. The number of allotted vCPU cores. + "deprecation_intent": bool, # Optional. Indicates if the instance + size is intended for deprecation. + "memory_bytes": "str", # Optional. The allotted memory in bytes. + "name": "str", # Optional. A human-readable name of the instance + size. + "scalable": bool, # Optional. Indicates if the instance size can + enable autoscaling. + "single_instance_only": bool, # Optional. Indicates if the instance + size allows more than one instance. + "slug": "str", # Optional. The slug of the instance size. + "tier_downgrade_to": "str", # Optional. The slug of the + corresponding downgradable instance size on the lower tier. + "tier_slug": "str", # Optional. The slug of the tier to which this + instance size belongs. + "tier_upgrade_to": "str", # Optional. The slug of the corresponding + upgradable instance size on the higher tier. + "usd_per_month": "str", # Optional. The cost of this instance size + in USD per month. + "usd_per_second": "str" # Optional. The cost of this instance size + in USD per second. + } + } + # response body for status code(s): 404 + response == { + "id": "str", # A short identifier corresponding to the HTTP status code + returned. For example, the ID for a response returning a 404 status code would + be "not_found.". Required. + "message": "str", # A message providing additional information about the + error, including details to help resolve it when possible. Required. + "request_id": "str" # Optional. Optionally, some endpoints may include a + request ID that should be provided when reporting bugs or opening support + tickets to help identify the issue. + } + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + 401: cast( + Type[HttpResponseError], + lambda response: ClientAuthenticationError(response=response), + ), + 429: HttpResponseError, + 500: HttpResponseError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[JSON] = kwargs.pop("cls", None) + + _request = build_apps_get_instance_size_request( + slug=slug, + headers=_headers, + params=_params, + ) + _request.url = self._client.format_url(_request.url) + + _stream = False + pipeline_response: PipelineResponse = ( + self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + ) + + response = pipeline_response.http_response + + if response.status_code not in [200, 404]: + if _stream: + response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore + raise HttpResponseError(response=response) + + response_headers = {} + if response.status_code == 200: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.content: + deserialized = response.json() + else: + deserialized = None + + if response.status_code == 404: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.content: + deserialized = response.json() + else: + deserialized = None + + if cls: + return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore + + return cast(JSON, deserialized) # type: ignore + + @distributed_trace + def list_regions(self, **kwargs: Any) -> JSON: + """List App Regions. + + List all regions supported by App Platform. + + :return: JSON object + :rtype: JSON + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 200 + response == { + "regions": [ + { + "continent": "str", # Optional. The continent that this + region is in. + "data_centers": [ + "str" # Optional. Data centers that are in this + region. + ], + "default": bool, # Optional. Whether or not the region is + presented as the default. + "disabled": bool, # Optional. Whether or not the region is + open for new apps. + "flag": "str", # Optional. The flag of this region. + "label": "str", # Optional. A human-readable name of the + region. + "reason": "str", # Optional. Reason that this region is not + available. + "slug": "str" # Optional. The slug form of the region name. + } + ] + } + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + 401: cast( + Type[HttpResponseError], + lambda response: ClientAuthenticationError(response=response), + ), + 429: HttpResponseError, + 500: HttpResponseError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[JSON] = kwargs.pop("cls", None) + + _request = build_apps_list_regions_request( + headers=_headers, + params=_params, + ) + _request.url = self._client.format_url(_request.url) + + _stream = False + pipeline_response: PipelineResponse = ( + self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore + raise HttpResponseError(response=response) + + response_headers = {} + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.content: + deserialized = response.json() + else: + deserialized = None + + if cls: + return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore + + return cast(JSON, deserialized) # type: ignore + + @overload + def validate_app_spec( + self, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> JSON: + # pylint: disable=line-too-long + """Propose an App Spec. + + To propose and validate a spec for a new or existing app, send a POST request to the + ``/v2/apps/propose`` endpoint. The request returns some information about the proposed app, + including app cost and upgrade cost. If an existing app ID is specified, the app spec is + treated as a proposed update to the existing app. + + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: JSON object + :rtype: JSON + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "spec": { + "name": "str", # The name of the app. Must be unique across all apps + in the same account. Required. + "databases": [ + { + "name": "str", # The database's name. The name must + be unique across all components within the same app and cannot use + capital letters. Required. + "cluster_name": "str", # Optional. The name of the + underlying DigitalOcean DBaaS cluster. This is required for + production databases. For dev databases, if cluster_name is not set, + a new cluster will be provisioned. + "db_name": "str", # Optional. The name of the MySQL + or PostgreSQL database to configure. + "db_user": "str", # Optional. The name of the MySQL + or PostgreSQL user to configure. + "engine": "UNSET", # Optional. Default value is + "UNSET". * MYSQL: MySQL * PG: PostgreSQL * REDIS: Caching * MONGODB: + MongoDB * KAFKA: Kafka * OPENSEARCH: OpenSearch * VALKEY: ValKey. + Known values are: "UNSET", "MYSQL", "PG", "REDIS", "MONGODB", + "KAFKA", "OPENSEARCH", and "VALKEY". + "production": bool, # Optional. Whether this is a + production or dev database. + "version": "str" # Optional. The version of the + database engine. + } + ], + "disable_edge_cache": False, # Optional. Default value is False. .. + role:: raw-html-m2r(raw) :format: html If set to ``true``"" , the app + will **not** be cached at the edge (CDN). Enable this option if you want to + manage CDN configuration yourself"u2014whether by using an external CDN + provider or by handling static content and caching within your app. This + setting is also recommended for apps that require real-time data or serve + dynamic content, such as those using Server-Sent Events (SSE) over GET, or + hosting an MCP (Model Context Protocol) Server that utilizes SSE."" + :raw-html-m2r:`
` **Note:** This feature is not available for static site + components."" :raw-html-m2r:`
` For more information, see `Disable CDN + Cache + `_. + "disable_email_obfuscation": False, # Optional. Default value is + False. If set to ``true``"" , email addresses in the app will not be + obfuscated. This is useful for apps that require email addresses to be + visible (in the HTML markup). + "domains": [ + { + "domain": "str", # The hostname for the domain. + Required. + "minimum_tls_version": "str", # Optional. The + minimum version of TLS a client application can use to access + resources for the domain. Must be one of the following values + wrapped within quotations: ``"1.2"`` or ``"1.3"``. Known values are: + "1.2" and "1.3". + "type": "UNSPECIFIED", # Optional. Default value is + "UNSPECIFIED". * DEFAULT: The default ``.ondigitalocean.app`` domain + assigned to this app * PRIMARY: The primary domain for this app that + is displayed as the default in the control panel, used in bindable + environment variables, and any other places that reference an app's + live URL. Only one domain may be set as primary. * ALIAS: A + non-primary domain. Known values are: "UNSPECIFIED", "DEFAULT", + "PRIMARY", and "ALIAS". + "wildcard": bool, # Optional. Indicates whether the + domain includes all sub-domains, in addition to the given domain. + "zone": "str" # Optional. Optional. If the domain + uses DigitalOcean DNS and you would like App Platform to + automatically manage it for you, set this to the name of the domain + on your account. For example, If the domain you are adding is + ``app.domain.com``"" , the zone could be ``domain.com``. + } + ], + "egress": { + "type": "AUTOASSIGN" # Optional. Default value is + "AUTOASSIGN". The app egress type. Known values are: "AUTOASSIGN" and + "DEDICATED_IP". + }, + "enhanced_threat_control_enabled": False, # Optional. Default value + is False. If set to ``true``"" , suspicious requests will go through + additional security checks to help mitigate layer 7 DDoS attacks. + "functions": [ + { + "name": "str", # The name. Must be unique across all + components within the same app. Required. + "alerts": [ + { + "disabled": bool, # Optional. Is the + alert disabled?. + "operator": "UNSPECIFIED_OPERATOR", + # Optional. Default value is "UNSPECIFIED_OPERATOR". Known + values are: "UNSPECIFIED_OPERATOR", "GREATER_THAN", and + "LESS_THAN". + "rule": "UNSPECIFIED_RULE", # + Optional. Default value is "UNSPECIFIED_RULE". Known values + are: "UNSPECIFIED_RULE", "CPU_UTILIZATION", + "MEM_UTILIZATION", "RESTART_COUNT", "DEPLOYMENT_FAILED", + "DEPLOYMENT_LIVE", "DOMAIN_FAILED", "DOMAIN_LIVE", + "AUTOSCALE_FAILED", "AUTOSCALE_SUCCEEDED", + "FUNCTIONS_ACTIVATION_COUNT", + "FUNCTIONS_AVERAGE_DURATION_MS", + "FUNCTIONS_ERROR_RATE_PER_MINUTE", + "FUNCTIONS_AVERAGE_WAIT_TIME_MS", "FUNCTIONS_ERROR_COUNT", + and "FUNCTIONS_GB_RATE_PER_SECOND". + "value": 0.0, # Optional. Threshold + value for alert. + "window": "UNSPECIFIED_WINDOW" # + Optional. Default value is "UNSPECIFIED_WINDOW". Known values + are: "UNSPECIFIED_WINDOW", "FIVE_MINUTES", "TEN_MINUTES", + "THIRTY_MINUTES", and "ONE_HOUR". + } + ], + "bitbucket": { + "branch": "str", # Optional. The name of the + branch to use. + "deploy_on_push": bool, # Optional. Whether + to automatically deploy new commits made to the repo. + "repo": "str" # Optional. The name of the + repo in the format owner/repo. Example: + ``digitalocean/sample-golang``. + }, + "cors": { + "allow_credentials": bool, # Optional. + Whether browsers should expose the response to the client-side + JavaScript code when the request"u2019s credentials mode is + include. This configures the ``Access-Control-Allow-Credentials`` + header. + "allow_headers": [ + "str" # Optional. The set of allowed + HTTP request headers. This configures the + ``Access-Control-Allow-Headers`` header. + ], + "allow_methods": [ + "str" # Optional. The set of allowed + HTTP methods. This configures the + ``Access-Control-Allow-Methods`` header. + ], + "allow_origins": [ + { + "exact": "str", # Optional. + Exact string match. Only 1 of ``exact``"" , ``prefix``"" + , or ``regex`` must be set. + "prefix": "str", # Optional. + Prefix-based match. Only 1 of ``exact``"" , ``prefix``"" + , or ``regex`` must be set. + "regex": "str" # Optional. + RE2 style regex-based match. Only 1 of ``exact``"" , + ``prefix``"" , or ``regex`` must be set. For more + information about RE2 syntax, see: + https://github.com/google/re2/wiki/Syntax. + } + ], + "expose_headers": [ + "str" # Optional. The set of HTTP + response headers that browsers are allowed to access. This + configures the ``Access-Control-Expose-Headers`` header. + ], + "max_age": "str" # Optional. An optional + duration specifying how long browsers can cache the results of a + preflight request. This configures the ``Access-Control-Max-Age`` + header. + }, + "envs": [ + { + "key": "str", # The variable name. + Required. + "scope": "RUN_AND_BUILD_TIME", # + Optional. Default value is "RUN_AND_BUILD_TIME". * RUN_TIME: + Made available only at run-time * BUILD_TIME: Made available + only at build-time * RUN_AND_BUILD_TIME: Made available at + both build and run-time. Known values are: "UNSET", + "RUN_TIME", "BUILD_TIME", and "RUN_AND_BUILD_TIME". + "type": "GENERAL", # Optional. + Default value is "GENERAL". * GENERAL: A plain-text + environment variable * SECRET: A secret encrypted environment + variable. Known values are: "GENERAL" and "SECRET". + "value": "str" # Optional. The + value. If the type is ``SECRET``"" , the value will be + encrypted on first submission. On following submissions, the + encrypted value should be used. + } + ], + "git": { + "branch": "str", # Optional. The name of the + branch to use. + "repo_clone_url": "str" # Optional. The + clone URL of the repo. Example: + ``https://github.com/digitalocean/sample-golang.git``. + }, + "github": { + "branch": "str", # Optional. The name of the + branch to use. + "deploy_on_push": bool, # Optional. Whether + to automatically deploy new commits made to the repo. + "repo": "str" # Optional. The name of the + repo in the format owner/repo. Example: + ``digitalocean/sample-golang``. + }, + "gitlab": { + "branch": "str", # Optional. The name of the + branch to use. + "deploy_on_push": bool, # Optional. Whether + to automatically deploy new commits made to the repo. + "repo": "str" # Optional. The name of the + repo in the format owner/repo. Example: + ``digitalocean/sample-golang``. + }, + "log_destinations": [ + { + "name": "str", # Required. + "datadog": { + "api_key": "str", # Datadog + API key. Required. + "endpoint": "str" # + Optional. Datadog HTTP log intake endpoint. + }, + "logtail": { + "token": "str" # Optional. + Logtail token. + }, + "open_search": { + "basic_auth": { + "password": "str", # + Optional. Password for user defined in User. Is + required when ``endpoint`` is set. Cannot be set if + using a DigitalOcean DBaaS OpenSearch cluster. + "user": "str" # + Optional. Username to authenticate with. Only + required when ``endpoint`` is set. Defaults to + ``doadmin`` when ``cluster_name`` is set. + }, + "cluster_name": "str", # + Optional. The name of a DigitalOcean DBaaS OpenSearch + cluster to use as a log forwarding destination. Cannot be + specified if ``endpoint`` is also specified. + "endpoint": "str", # + Optional. OpenSearch API Endpoint. Only HTTPS is + supported. Format: https://:code:``::code:``. + Cannot be specified if ``cluster_name`` is also + specified. + "index_name": "logs" # + Optional. Default value is "logs". The index name to use + for the logs. If not set, the default index name is "logs". }, "papertrail": { @@ -106782,276 +109009,22 @@ def validate_app_spec(self, body: Union[JSON, IO[bytes]], **kwargs: Any) -> JSON } } """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - 401: cast( - Type[HttpResponseError], - lambda response: ClientAuthenticationError(response=response), - ), - 429: HttpResponseError, - 500: HttpResponseError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = kwargs.pop("params", {}) or {} - - content_type: Optional[str] = kwargs.pop( - "content_type", _headers.pop("Content-Type", None) - ) - cls: ClsType[JSON] = kwargs.pop("cls", None) - - content_type = content_type or "application/json" - _json = None - _content = None - if isinstance(body, (IOBase, bytes)): - _content = body - else: - _json = body - - _request = build_apps_validate_app_spec_request( - content_type=content_type, - json=_json, - content=_content, - headers=_headers, - params=_params, - ) - _request.url = self._client.format_url(_request.url) - - _stream = False - pipeline_response: PipelineResponse = ( - self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - ) - - response = pipeline_response.http_response - - if response.status_code not in [200]: - if _stream: - response.read() # Load the body in memory and close the socket - map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore - raise HttpResponseError(response=response) - - response_headers = {} - response_headers["ratelimit-limit"] = self._deserialize( - "int", response.headers.get("ratelimit-limit") - ) - response_headers["ratelimit-remaining"] = self._deserialize( - "int", response.headers.get("ratelimit-remaining") - ) - response_headers["ratelimit-reset"] = self._deserialize( - "int", response.headers.get("ratelimit-reset") - ) - - if response.content: - deserialized = response.json() - else: - deserialized = None - - if cls: - return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore - - return cast(JSON, deserialized) # type: ignore - - @distributed_trace - def list_alerts(self, app_id: str, **kwargs: Any) -> JSON: - # pylint: disable=line-too-long - """List all app alerts. - - List alerts associated to the app and any components. This includes configuration information - about the alerts including emails, slack webhooks, and triggering events or conditions. - - :param app_id: The app ID. Required. - :type app_id: str - :return: JSON object - :rtype: JSON - :raises ~azure.core.exceptions.HttpResponseError: - - Example: - .. code-block:: python - - # response body for status code(s): 200 - response == { - "alerts": [ - { - "component_name": "str", # Optional. Name of component the - alert belongs to. - "emails": [ - "" # Optional. Default value is "". Emails for - alerts to go to. - ], - "id": "str", # Optional. The ID of the alert. - "phase": "UNKNOWN", # Optional. Default value is "UNKNOWN". - Known values are: "UNKNOWN", "PENDING", "CONFIGURING", "ACTIVE", and - "ERROR". - "progress": { - "steps": [ - { - "ended_at": "2020-02-20 00:00:00", # - Optional. The start time of this step. - "name": "str", # Optional. The name - of this step. - "reason": { - "code": "str", # Optional. - The error code. - "message": "str" # Optional. - The error message. - }, - "started_at": "2020-02-20 00:00:00", - # Optional. The start time of this step. - "status": "UNKNOWN" # Optional. - Default value is "UNKNOWN". Known values are: "UNKNOWN", - "PENDING", "RUNNING", "ERROR", and "SUCCESS". - } - ] - }, - "slack_webhooks": [ - { - "channel": "str", # Optional. Name of the - Slack Webhook Channel. - "url": "str" # Optional. URL of the Slack - webhook. - } - ], - "spec": { - "disabled": bool, # Optional. Is the alert - disabled?. - "operator": "UNSPECIFIED_OPERATOR", # Optional. - Default value is "UNSPECIFIED_OPERATOR". Known values are: - "UNSPECIFIED_OPERATOR", "GREATER_THAN", and "LESS_THAN". - "rule": "UNSPECIFIED_RULE", # Optional. Default - value is "UNSPECIFIED_RULE". Known values are: "UNSPECIFIED_RULE", - "CPU_UTILIZATION", "MEM_UTILIZATION", "RESTART_COUNT", - "DEPLOYMENT_FAILED", "DEPLOYMENT_LIVE", "DOMAIN_FAILED", - "DOMAIN_LIVE", "AUTOSCALE_FAILED", "AUTOSCALE_SUCCEEDED", - "FUNCTIONS_ACTIVATION_COUNT", "FUNCTIONS_AVERAGE_DURATION_MS", - "FUNCTIONS_ERROR_RATE_PER_MINUTE", "FUNCTIONS_AVERAGE_WAIT_TIME_MS", - "FUNCTIONS_ERROR_COUNT", and "FUNCTIONS_GB_RATE_PER_SECOND". - "value": 0.0, # Optional. Threshold value for alert. - "window": "UNSPECIFIED_WINDOW" # Optional. Default - value is "UNSPECIFIED_WINDOW". Known values are: - "UNSPECIFIED_WINDOW", "FIVE_MINUTES", "TEN_MINUTES", - "THIRTY_MINUTES", and "ONE_HOUR". - } - } - ] - } - # response body for status code(s): 404 - response == { - "id": "str", # A short identifier corresponding to the HTTP status code - returned. For example, the ID for a response returning a 404 status code would - be "not_found.". Required. - "message": "str", # A message providing additional information about the - error, including details to help resolve it when possible. Required. - "request_id": "str" # Optional. Optionally, some endpoints may include a - request ID that should be provided when reporting bugs or opening support - tickets to help identify the issue. - } - """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - 401: cast( - Type[HttpResponseError], - lambda response: ClientAuthenticationError(response=response), - ), - 429: HttpResponseError, - 500: HttpResponseError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = kwargs.pop("headers", {}) or {} - _params = kwargs.pop("params", {}) or {} - - cls: ClsType[JSON] = kwargs.pop("cls", None) - - _request = build_apps_list_alerts_request( - app_id=app_id, - headers=_headers, - params=_params, - ) - _request.url = self._client.format_url(_request.url) - - _stream = False - pipeline_response: PipelineResponse = ( - self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - ) - - response = pipeline_response.http_response - - if response.status_code not in [200, 404]: - if _stream: - response.read() # Load the body in memory and close the socket - map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore - raise HttpResponseError(response=response) - - response_headers = {} - if response.status_code == 200: - response_headers["ratelimit-limit"] = self._deserialize( - "int", response.headers.get("ratelimit-limit") - ) - response_headers["ratelimit-remaining"] = self._deserialize( - "int", response.headers.get("ratelimit-remaining") - ) - response_headers["ratelimit-reset"] = self._deserialize( - "int", response.headers.get("ratelimit-reset") - ) - - if response.content: - deserialized = response.json() - else: - deserialized = None - - if response.status_code == 404: - response_headers["ratelimit-limit"] = self._deserialize( - "int", response.headers.get("ratelimit-limit") - ) - response_headers["ratelimit-remaining"] = self._deserialize( - "int", response.headers.get("ratelimit-remaining") - ) - response_headers["ratelimit-reset"] = self._deserialize( - "int", response.headers.get("ratelimit-reset") - ) - - if response.content: - deserialized = response.json() - else: - deserialized = None - - if cls: - return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore - - return cast(JSON, deserialized) # type: ignore @overload - def assign_alert_destinations( - self, - app_id: str, - alert_id: str, - body: JSON, - *, - content_type: str = "application/json", - **kwargs: Any, + def validate_app_spec( + self, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any ) -> JSON: # pylint: disable=line-too-long - """Update destinations for alerts. + """Propose an App Spec. - Updates the emails and slack webhook destinations for app alerts. Emails must be associated to - a user with access to the app. + To propose and validate a spec for a new or existing app, send a POST request to the + ``/v2/apps/propose`` endpoint. The request returns some information about the proposed app, + including app cost and upgrade cost. If an existing app ID is specified, the app spec is + treated as a proposed update to the existing app. - :param app_id: The app ID. Required. - :type app_id: str - :param alert_id: The alert ID. Required. - :type alert_id: str :param body: Required. - :type body: JSON - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. Default value is "application/json". :paramtype content_type: str :return: JSON object @@ -107061,1173 +109034,1612 @@ def assign_alert_destinations( Example: .. code-block:: python - # JSON input template you can fill out and use as your body input. - body = { - "emails": [ - "" # Optional. Default value is "". - ], - "slack_webhooks": [ - { - "channel": "str", # Optional. Name of the Slack Webhook - Channel. - "url": "str" # Optional. URL of the Slack webhook. - } - ] - } - # response body for status code(s): 200 response == { - "alert": { - "component_name": "str", # Optional. Name of component the alert - belongs to. - "emails": [ - "" # Optional. Default value is "". Emails for alerts to go - to. - ], - "id": "str", # Optional. The ID of the alert. - "phase": "UNKNOWN", # Optional. Default value is "UNKNOWN". Known - values are: "UNKNOWN", "PENDING", "CONFIGURING", "ACTIVE", and "ERROR". - "progress": { - "steps": [ - { - "ended_at": "2020-02-20 00:00:00", # - Optional. The start time of this step. - "name": "str", # Optional. The name of this - step. - "reason": { - "code": "str", # Optional. The error - code. - "message": "str" # Optional. The - error message. - }, - "started_at": "2020-02-20 00:00:00", # - Optional. The start time of this step. - "status": "UNKNOWN" # Optional. Default - value is "UNKNOWN". Known values are: "UNKNOWN", "PENDING", - "RUNNING", "ERROR", and "SUCCESS". - } - ] - }, - "slack_webhooks": [ + "app_cost": 0, # Optional. The monthly cost of the proposed app in USD. + "app_is_static": bool, # Optional. Indicates whether the app is a static + app. + "app_name_available": bool, # Optional. Indicates whether the app name is + available. + "app_name_suggestion": "str", # Optional. The suggested name if the proposed + app name is unavailable. + "app_tier_downgrade_cost": 0, # Optional. The monthly cost of the proposed + app in USD using the previous pricing plan tier. For example, if you propose an + app that uses the Professional tier, the ``app_tier_downgrade_cost`` field + displays the monthly cost of the app if it were to use the Basic tier. If the + proposed app already uses the lest expensive tier, the field is empty. + "existing_static_apps": "str", # Optional. The maximum number of free static + apps the account can have. We will charge you for any additional static apps. + "spec": { + "name": "str", # The name of the app. Must be unique across all apps + in the same account. Required. + "databases": [ { - "channel": "str", # Optional. Name of the Slack - Webhook Channel. - "url": "str" # Optional. URL of the Slack webhook. + "name": "str", # The database's name. The name must + be unique across all components within the same app and cannot use + capital letters. Required. + "cluster_name": "str", # Optional. The name of the + underlying DigitalOcean DBaaS cluster. This is required for + production databases. For dev databases, if cluster_name is not set, + a new cluster will be provisioned. + "db_name": "str", # Optional. The name of the MySQL + or PostgreSQL database to configure. + "db_user": "str", # Optional. The name of the MySQL + or PostgreSQL user to configure. + "engine": "UNSET", # Optional. Default value is + "UNSET". * MYSQL: MySQL * PG: PostgreSQL * REDIS: Caching * MONGODB: + MongoDB * KAFKA: Kafka * OPENSEARCH: OpenSearch * VALKEY: ValKey. + Known values are: "UNSET", "MYSQL", "PG", "REDIS", "MONGODB", + "KAFKA", "OPENSEARCH", and "VALKEY". + "production": bool, # Optional. Whether this is a + production or dev database. + "version": "str" # Optional. The version of the + database engine. } ], - "spec": { - "disabled": bool, # Optional. Is the alert disabled?. - "operator": "UNSPECIFIED_OPERATOR", # Optional. Default - value is "UNSPECIFIED_OPERATOR". Known values are: - "UNSPECIFIED_OPERATOR", "GREATER_THAN", and "LESS_THAN". - "rule": "UNSPECIFIED_RULE", # Optional. Default value is - "UNSPECIFIED_RULE". Known values are: "UNSPECIFIED_RULE", - "CPU_UTILIZATION", "MEM_UTILIZATION", "RESTART_COUNT", - "DEPLOYMENT_FAILED", "DEPLOYMENT_LIVE", "DOMAIN_FAILED", "DOMAIN_LIVE", - "AUTOSCALE_FAILED", "AUTOSCALE_SUCCEEDED", "FUNCTIONS_ACTIVATION_COUNT", - "FUNCTIONS_AVERAGE_DURATION_MS", "FUNCTIONS_ERROR_RATE_PER_MINUTE", - "FUNCTIONS_AVERAGE_WAIT_TIME_MS", "FUNCTIONS_ERROR_COUNT", and - "FUNCTIONS_GB_RATE_PER_SECOND". - "value": 0.0, # Optional. Threshold value for alert. - "window": "UNSPECIFIED_WINDOW" # Optional. Default value is - "UNSPECIFIED_WINDOW". Known values are: "UNSPECIFIED_WINDOW", - "FIVE_MINUTES", "TEN_MINUTES", "THIRTY_MINUTES", and "ONE_HOUR". - } - } - } - # response body for status code(s): 404 - response == { - "id": "str", # A short identifier corresponding to the HTTP status code - returned. For example, the ID for a response returning a 404 status code would - be "not_found.". Required. - "message": "str", # A message providing additional information about the - error, including details to help resolve it when possible. Required. - "request_id": "str" # Optional. Optionally, some endpoints may include a - request ID that should be provided when reporting bugs or opening support - tickets to help identify the issue. - } - """ - - @overload - def assign_alert_destinations( - self, - app_id: str, - alert_id: str, - body: IO[bytes], - *, - content_type: str = "application/json", - **kwargs: Any, - ) -> JSON: - # pylint: disable=line-too-long - """Update destinations for alerts. - - Updates the emails and slack webhook destinations for app alerts. Emails must be associated to - a user with access to the app. - - :param app_id: The app ID. Required. - :type app_id: str - :param alert_id: The alert ID. Required. - :type alert_id: str - :param body: Required. - :type body: IO[bytes] - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str - :return: JSON object - :rtype: JSON - :raises ~azure.core.exceptions.HttpResponseError: - - Example: - .. code-block:: python - - # response body for status code(s): 200 - response == { - "alert": { - "component_name": "str", # Optional. Name of component the alert - belongs to. - "emails": [ - "" # Optional. Default value is "". Emails for alerts to go - to. + "disable_edge_cache": False, # Optional. Default value is False. .. + role:: raw-html-m2r(raw) :format: html If set to ``true``"" , the app + will **not** be cached at the edge (CDN). Enable this option if you want to + manage CDN configuration yourself"u2014whether by using an external CDN + provider or by handling static content and caching within your app. This + setting is also recommended for apps that require real-time data or serve + dynamic content, such as those using Server-Sent Events (SSE) over GET, or + hosting an MCP (Model Context Protocol) Server that utilizes SSE."" + :raw-html-m2r:`
` **Note:** This feature is not available for static site + components."" :raw-html-m2r:`
` For more information, see `Disable CDN + Cache + `_. + "disable_email_obfuscation": False, # Optional. Default value is + False. If set to ``true``"" , email addresses in the app will not be + obfuscated. This is useful for apps that require email addresses to be + visible (in the HTML markup). + "domains": [ + { + "domain": "str", # The hostname for the domain. + Required. + "minimum_tls_version": "str", # Optional. The + minimum version of TLS a client application can use to access + resources for the domain. Must be one of the following values + wrapped within quotations: ``"1.2"`` or ``"1.3"``. Known values are: + "1.2" and "1.3". + "type": "UNSPECIFIED", # Optional. Default value is + "UNSPECIFIED". * DEFAULT: The default ``.ondigitalocean.app`` domain + assigned to this app * PRIMARY: The primary domain for this app that + is displayed as the default in the control panel, used in bindable + environment variables, and any other places that reference an app's + live URL. Only one domain may be set as primary. * ALIAS: A + non-primary domain. Known values are: "UNSPECIFIED", "DEFAULT", + "PRIMARY", and "ALIAS". + "wildcard": bool, # Optional. Indicates whether the + domain includes all sub-domains, in addition to the given domain. + "zone": "str" # Optional. Optional. If the domain + uses DigitalOcean DNS and you would like App Platform to + automatically manage it for you, set this to the name of the domain + on your account. For example, If the domain you are adding is + ``app.domain.com``"" , the zone could be ``domain.com``. + } ], - "id": "str", # Optional. The ID of the alert. - "phase": "UNKNOWN", # Optional. Default value is "UNKNOWN". Known - values are: "UNKNOWN", "PENDING", "CONFIGURING", "ACTIVE", and "ERROR". - "progress": { - "steps": [ - { - "ended_at": "2020-02-20 00:00:00", # - Optional. The start time of this step. - "name": "str", # Optional. The name of this - step. - "reason": { - "code": "str", # Optional. The error - code. - "message": "str" # Optional. The - error message. - }, - "started_at": "2020-02-20 00:00:00", # - Optional. The start time of this step. - "status": "UNKNOWN" # Optional. Default - value is "UNKNOWN". Known values are: "UNKNOWN", "PENDING", - "RUNNING", "ERROR", and "SUCCESS". - } - ] + "egress": { + "type": "AUTOASSIGN" # Optional. Default value is + "AUTOASSIGN". The app egress type. Known values are: "AUTOASSIGN" and + "DEDICATED_IP". }, - "slack_webhooks": [ + "enhanced_threat_control_enabled": False, # Optional. Default value + is False. If set to ``true``"" , suspicious requests will go through + additional security checks to help mitigate layer 7 DDoS attacks. + "functions": [ { - "channel": "str", # Optional. Name of the Slack - Webhook Channel. - "url": "str" # Optional. URL of the Slack webhook. + "name": "str", # The name. Must be unique across all + components within the same app. Required. + "alerts": [ + { + "disabled": bool, # Optional. Is the + alert disabled?. + "operator": "UNSPECIFIED_OPERATOR", + # Optional. Default value is "UNSPECIFIED_OPERATOR". Known + values are: "UNSPECIFIED_OPERATOR", "GREATER_THAN", and + "LESS_THAN". + "rule": "UNSPECIFIED_RULE", # + Optional. Default value is "UNSPECIFIED_RULE". Known values + are: "UNSPECIFIED_RULE", "CPU_UTILIZATION", + "MEM_UTILIZATION", "RESTART_COUNT", "DEPLOYMENT_FAILED", + "DEPLOYMENT_LIVE", "DOMAIN_FAILED", "DOMAIN_LIVE", + "AUTOSCALE_FAILED", "AUTOSCALE_SUCCEEDED", + "FUNCTIONS_ACTIVATION_COUNT", + "FUNCTIONS_AVERAGE_DURATION_MS", + "FUNCTIONS_ERROR_RATE_PER_MINUTE", + "FUNCTIONS_AVERAGE_WAIT_TIME_MS", "FUNCTIONS_ERROR_COUNT", + and "FUNCTIONS_GB_RATE_PER_SECOND". + "value": 0.0, # Optional. Threshold + value for alert. + "window": "UNSPECIFIED_WINDOW" # + Optional. Default value is "UNSPECIFIED_WINDOW". Known values + are: "UNSPECIFIED_WINDOW", "FIVE_MINUTES", "TEN_MINUTES", + "THIRTY_MINUTES", and "ONE_HOUR". + } + ], + "bitbucket": { + "branch": "str", # Optional. The name of the + branch to use. + "deploy_on_push": bool, # Optional. Whether + to automatically deploy new commits made to the repo. + "repo": "str" # Optional. The name of the + repo in the format owner/repo. Example: + ``digitalocean/sample-golang``. + }, + "cors": { + "allow_credentials": bool, # Optional. + Whether browsers should expose the response to the client-side + JavaScript code when the request"u2019s credentials mode is + include. This configures the ``Access-Control-Allow-Credentials`` + header. + "allow_headers": [ + "str" # Optional. The set of allowed + HTTP request headers. This configures the + ``Access-Control-Allow-Headers`` header. + ], + "allow_methods": [ + "str" # Optional. The set of allowed + HTTP methods. This configures the + ``Access-Control-Allow-Methods`` header. + ], + "allow_origins": [ + { + "exact": "str", # Optional. + Exact string match. Only 1 of ``exact``"" , ``prefix``"" + , or ``regex`` must be set. + "prefix": "str", # Optional. + Prefix-based match. Only 1 of ``exact``"" , ``prefix``"" + , or ``regex`` must be set. + "regex": "str" # Optional. + RE2 style regex-based match. Only 1 of ``exact``"" , + ``prefix``"" , or ``regex`` must be set. For more + information about RE2 syntax, see: + https://github.com/google/re2/wiki/Syntax. + } + ], + "expose_headers": [ + "str" # Optional. The set of HTTP + response headers that browsers are allowed to access. This + configures the ``Access-Control-Expose-Headers`` header. + ], + "max_age": "str" # Optional. An optional + duration specifying how long browsers can cache the results of a + preflight request. This configures the ``Access-Control-Max-Age`` + header. + }, + "envs": [ + { + "key": "str", # The variable name. + Required. + "scope": "RUN_AND_BUILD_TIME", # + Optional. Default value is "RUN_AND_BUILD_TIME". * RUN_TIME: + Made available only at run-time * BUILD_TIME: Made available + only at build-time * RUN_AND_BUILD_TIME: Made available at + both build and run-time. Known values are: "UNSET", + "RUN_TIME", "BUILD_TIME", and "RUN_AND_BUILD_TIME". + "type": "GENERAL", # Optional. + Default value is "GENERAL". * GENERAL: A plain-text + environment variable * SECRET: A secret encrypted environment + variable. Known values are: "GENERAL" and "SECRET". + "value": "str" # Optional. The + value. If the type is ``SECRET``"" , the value will be + encrypted on first submission. On following submissions, the + encrypted value should be used. + } + ], + "git": { + "branch": "str", # Optional. The name of the + branch to use. + "repo_clone_url": "str" # Optional. The + clone URL of the repo. Example: + ``https://github.com/digitalocean/sample-golang.git``. + }, + "github": { + "branch": "str", # Optional. The name of the + branch to use. + "deploy_on_push": bool, # Optional. Whether + to automatically deploy new commits made to the repo. + "repo": "str" # Optional. The name of the + repo in the format owner/repo. Example: + ``digitalocean/sample-golang``. + }, + "gitlab": { + "branch": "str", # Optional. The name of the + branch to use. + "deploy_on_push": bool, # Optional. Whether + to automatically deploy new commits made to the repo. + "repo": "str" # Optional. The name of the + repo in the format owner/repo. Example: + ``digitalocean/sample-golang``. + }, + "log_destinations": [ + { + "name": "str", # Required. + "datadog": { + "api_key": "str", # Datadog + API key. Required. + "endpoint": "str" # + Optional. Datadog HTTP log intake endpoint. + }, + "logtail": { + "token": "str" # Optional. + Logtail token. + }, + "open_search": { + "basic_auth": { + "password": "str", # + Optional. Password for user defined in User. Is + required when ``endpoint`` is set. Cannot be set if + using a DigitalOcean DBaaS OpenSearch cluster. + "user": "str" # + Optional. Username to authenticate with. Only + required when ``endpoint`` is set. Defaults to + ``doadmin`` when ``cluster_name`` is set. + }, + "cluster_name": "str", # + Optional. The name of a DigitalOcean DBaaS OpenSearch + cluster to use as a log forwarding destination. Cannot be + specified if ``endpoint`` is also specified. + "endpoint": "str", # + Optional. OpenSearch API Endpoint. Only HTTPS is + supported. Format: https://:code:``::code:``. + Cannot be specified if ``cluster_name`` is also + specified. + "index_name": "logs" # + Optional. Default value is "logs". The index name to use + for the logs. If not set, the default index name is + "logs". + }, + "papertrail": { + "endpoint": "str" # + Papertrail syslog endpoint. Required. + } + } + ], + "routes": [ + { + "path": "str", # Optional. + (Deprecated - Use Ingress Rules instead). An HTTP path + prefix. Paths must start with / and must be unique across all + components within an app. + "preserve_path_prefix": bool # + Optional. An optional flag to preserve the path that is + forwarded to the backend service. By default, the HTTP + request path will be trimmed from the left when forwarded to + the component. For example, a component with ``path=/api`` + will have requests to ``/api/list`` trimmed to ``/list``. If + this value is ``true``"" , the path will remain + ``/api/list``. + } + ], + "source_dir": "str" # Optional. An optional path to + the working directory to use for the build. For Dockerfile builds, + this will be used as the build context. Must be relative to the root + of the repo. } ], - "spec": { - "disabled": bool, # Optional. Is the alert disabled?. - "operator": "UNSPECIFIED_OPERATOR", # Optional. Default - value is "UNSPECIFIED_OPERATOR". Known values are: - "UNSPECIFIED_OPERATOR", "GREATER_THAN", and "LESS_THAN". - "rule": "UNSPECIFIED_RULE", # Optional. Default value is - "UNSPECIFIED_RULE". Known values are: "UNSPECIFIED_RULE", - "CPU_UTILIZATION", "MEM_UTILIZATION", "RESTART_COUNT", - "DEPLOYMENT_FAILED", "DEPLOYMENT_LIVE", "DOMAIN_FAILED", "DOMAIN_LIVE", - "AUTOSCALE_FAILED", "AUTOSCALE_SUCCEEDED", "FUNCTIONS_ACTIVATION_COUNT", - "FUNCTIONS_AVERAGE_DURATION_MS", "FUNCTIONS_ERROR_RATE_PER_MINUTE", - "FUNCTIONS_AVERAGE_WAIT_TIME_MS", "FUNCTIONS_ERROR_COUNT", and - "FUNCTIONS_GB_RATE_PER_SECOND". - "value": 0.0, # Optional. Threshold value for alert. - "window": "UNSPECIFIED_WINDOW" # Optional. Default value is - "UNSPECIFIED_WINDOW". Known values are: "UNSPECIFIED_WINDOW", - "FIVE_MINUTES", "TEN_MINUTES", "THIRTY_MINUTES", and "ONE_HOUR". - } - } - } - # response body for status code(s): 404 - response == { - "id": "str", # A short identifier corresponding to the HTTP status code - returned. For example, the ID for a response returning a 404 status code would - be "not_found.". Required. - "message": "str", # A message providing additional information about the - error, including details to help resolve it when possible. Required. - "request_id": "str" # Optional. Optionally, some endpoints may include a - request ID that should be provided when reporting bugs or opening support - tickets to help identify the issue. - } - """ - - @distributed_trace - def assign_alert_destinations( - self, app_id: str, alert_id: str, body: Union[JSON, IO[bytes]], **kwargs: Any - ) -> JSON: - # pylint: disable=line-too-long - """Update destinations for alerts. - - Updates the emails and slack webhook destinations for app alerts. Emails must be associated to - a user with access to the app. - - :param app_id: The app ID. Required. - :type app_id: str - :param alert_id: The alert ID. Required. - :type alert_id: str - :param body: Is either a JSON type or a IO[bytes] type. Required. - :type body: JSON or IO[bytes] - :return: JSON object - :rtype: JSON - :raises ~azure.core.exceptions.HttpResponseError: - - Example: - .. code-block:: python - - # JSON input template you can fill out and use as your body input. - body = { - "emails": [ - "" # Optional. Default value is "". - ], - "slack_webhooks": [ - { - "channel": "str", # Optional. Name of the Slack Webhook - Channel. - "url": "str" # Optional. URL of the Slack webhook. - } - ] - } - - # response body for status code(s): 200 - response == { - "alert": { - "component_name": "str", # Optional. Name of component the alert - belongs to. - "emails": [ - "" # Optional. Default value is "". Emails for alerts to go - to. - ], - "id": "str", # Optional. The ID of the alert. - "phase": "UNKNOWN", # Optional. Default value is "UNKNOWN". Known - values are: "UNKNOWN", "PENDING", "CONFIGURING", "ACTIVE", and "ERROR". - "progress": { - "steps": [ + "ingress": { + "rules": [ { - "ended_at": "2020-02-20 00:00:00", # - Optional. The start time of this step. - "name": "str", # Optional. The name of this - step. - "reason": { - "code": "str", # Optional. The error - code. - "message": "str" # Optional. The - error message. + "component": { + "name": "str", # The name of the + component to route to. Required. + "preserve_path_prefix": "str", # + Optional. An optional flag to preserve the path that is + forwarded to the backend service. By default, the HTTP + request path will be trimmed from the left when forwarded to + the component. For example, a component with ``path=/api`` + will have requests to ``/api/list`` trimmed to ``/list``. If + this value is ``true``"" , the path will remain + ``/api/list``. Note: this is not applicable for Functions + Components and is mutually exclusive with ``rewrite``. + "rewrite": "str" # Optional. An + optional field that will rewrite the path of the component to + be what is specified here. By default, the HTTP request path + will be trimmed from the left when forwarded to the + component. For example, a component with ``path=/api`` will + have requests to ``/api/list`` trimmed to ``/list``. If you + specified the rewrite to be ``/v1/``"" , requests to + ``/api/list`` would be rewritten to ``/v1/list``. Note: this + is mutually exclusive with ``preserve_path_prefix``. }, - "started_at": "2020-02-20 00:00:00", # - Optional. The start time of this step. - "status": "UNKNOWN" # Optional. Default - value is "UNKNOWN". Known values are: "UNKNOWN", "PENDING", - "RUNNING", "ERROR", and "SUCCESS". + "cors": { + "allow_credentials": bool, # + Optional. Whether browsers should expose the response to the + client-side JavaScript code when the request"u2019s + credentials mode is include. This configures the + ``Access-Control-Allow-Credentials`` header. + "allow_headers": [ + "str" # Optional. The set of + allowed HTTP request headers. This configures the + ``Access-Control-Allow-Headers`` header. + ], + "allow_methods": [ + "str" # Optional. The set of + allowed HTTP methods. This configures the + ``Access-Control-Allow-Methods`` header. + ], + "allow_origins": [ + { + "exact": "str", # + Optional. Exact string match. Only 1 of ``exact``"" , + ``prefix``"" , or ``regex`` must be set. + "prefix": "str", # + Optional. Prefix-based match. Only 1 of ``exact``"" , + ``prefix``"" , or ``regex`` must be set. + "regex": "str" # + Optional. RE2 style regex-based match. Only 1 of + ``exact``"" , ``prefix``"" , or ``regex`` must be + set. For more information about RE2 syntax, see: + https://github.com/google/re2/wiki/Syntax. + } + ], + "expose_headers": [ + "str" # Optional. The set of + HTTP response headers that browsers are allowed to + access. This configures the + ``Access-Control-Expose-Headers`` header. + ], + "max_age": "str" # Optional. An + optional duration specifying how long browsers can cache the + results of a preflight request. This configures the + ``Access-Control-Max-Age`` header. + }, + "match": { + "authority": { + "exact": "str" # Required. + }, + "path": { + "prefix": "str" # + Prefix-based match. For example, ``/api`` will match + ``/api``"" , ``/api/``"" , and any nested paths such as + ``/api/v1/endpoint``. Required. + } + }, + "redirect": { + "authority": "str", # Optional. The + authority/host to redirect to. This can be a hostname or IP + address. Note: use ``port`` to set the port. + "port": 0, # Optional. The port to + redirect to. + "redirect_code": 0, # Optional. The + redirect code to use. Defaults to ``302``. Supported values + are 300, 301, 302, 303, 304, 307, 308. + "scheme": "str", # Optional. The + scheme to redirect to. Supported values are ``http`` or + ``https``. Default: ``https``. + "uri": "str" # Optional. An optional + URI path to redirect to. Note: if this is specified the whole + URI of the original request will be overwritten to this + value, irrespective of the original request URI being + matched. + } } ] }, - "slack_webhooks": [ + "jobs": [ { - "channel": "str", # Optional. Name of the Slack - Webhook Channel. - "url": "str" # Optional. URL of the Slack webhook. - } - ], - "spec": { - "disabled": bool, # Optional. Is the alert disabled?. - "operator": "UNSPECIFIED_OPERATOR", # Optional. Default - value is "UNSPECIFIED_OPERATOR". Known values are: - "UNSPECIFIED_OPERATOR", "GREATER_THAN", and "LESS_THAN". - "rule": "UNSPECIFIED_RULE", # Optional. Default value is - "UNSPECIFIED_RULE". Known values are: "UNSPECIFIED_RULE", - "CPU_UTILIZATION", "MEM_UTILIZATION", "RESTART_COUNT", - "DEPLOYMENT_FAILED", "DEPLOYMENT_LIVE", "DOMAIN_FAILED", "DOMAIN_LIVE", - "AUTOSCALE_FAILED", "AUTOSCALE_SUCCEEDED", "FUNCTIONS_ACTIVATION_COUNT", - "FUNCTIONS_AVERAGE_DURATION_MS", "FUNCTIONS_ERROR_RATE_PER_MINUTE", - "FUNCTIONS_AVERAGE_WAIT_TIME_MS", "FUNCTIONS_ERROR_COUNT", and - "FUNCTIONS_GB_RATE_PER_SECOND". - "value": 0.0, # Optional. Threshold value for alert. - "window": "UNSPECIFIED_WINDOW" # Optional. Default value is - "UNSPECIFIED_WINDOW". Known values are: "UNSPECIFIED_WINDOW", - "FIVE_MINUTES", "TEN_MINUTES", "THIRTY_MINUTES", and "ONE_HOUR". - } - } - } - # response body for status code(s): 404 - response == { - "id": "str", # A short identifier corresponding to the HTTP status code - returned. For example, the ID for a response returning a 404 status code would - be "not_found.". Required. - "message": "str", # A message providing additional information about the - error, including details to help resolve it when possible. Required. - "request_id": "str" # Optional. Optionally, some endpoints may include a - request ID that should be provided when reporting bugs or opening support - tickets to help identify the issue. - } - """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - 401: cast( - Type[HttpResponseError], - lambda response: ClientAuthenticationError(response=response), - ), - 429: HttpResponseError, - 500: HttpResponseError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = kwargs.pop("params", {}) or {} - - content_type: Optional[str] = kwargs.pop( - "content_type", _headers.pop("Content-Type", None) - ) - cls: ClsType[JSON] = kwargs.pop("cls", None) - - content_type = content_type or "application/json" - _json = None - _content = None - if isinstance(body, (IOBase, bytes)): - _content = body - else: - _json = body - - _request = build_apps_assign_alert_destinations_request( - app_id=app_id, - alert_id=alert_id, - content_type=content_type, - json=_json, - content=_content, - headers=_headers, - params=_params, - ) - _request.url = self._client.format_url(_request.url) - - _stream = False - pipeline_response: PipelineResponse = ( - self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - ) - - response = pipeline_response.http_response - - if response.status_code not in [200, 404]: - if _stream: - response.read() # Load the body in memory and close the socket - map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore - raise HttpResponseError(response=response) - - response_headers = {} - if response.status_code == 200: - response_headers["ratelimit-limit"] = self._deserialize( - "int", response.headers.get("ratelimit-limit") - ) - response_headers["ratelimit-remaining"] = self._deserialize( - "int", response.headers.get("ratelimit-remaining") - ) - response_headers["ratelimit-reset"] = self._deserialize( - "int", response.headers.get("ratelimit-reset") - ) - - if response.content: - deserialized = response.json() - else: - deserialized = None - - if response.status_code == 404: - response_headers["ratelimit-limit"] = self._deserialize( - "int", response.headers.get("ratelimit-limit") - ) - response_headers["ratelimit-remaining"] = self._deserialize( - "int", response.headers.get("ratelimit-remaining") - ) - response_headers["ratelimit-reset"] = self._deserialize( - "int", response.headers.get("ratelimit-reset") - ) - - if response.content: - deserialized = response.json() - else: - deserialized = None - - if cls: - return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore - - return cast(JSON, deserialized) # type: ignore - - @overload - def create_rollback( - self, - app_id: str, - body: JSON, - *, - content_type: str = "application/json", - **kwargs: Any, - ) -> JSON: - # pylint: disable=line-too-long - """Rollback App. - - Rollback an app to a previous deployment. A new deployment will be created to perform the - rollback. - The app will be pinned to the rollback deployment preventing any new deployments from being - created, - either manually or through Auto Deploy on Push webhooks. To resume deployments, the rollback - must be - either committed or reverted. - - It is recommended to use the Validate App Rollback endpoint to double check if the rollback is - valid and if there are any warnings. - - :param app_id: The app ID. Required. - :type app_id: str - :param body: Required. - :type body: JSON - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: JSON object - :rtype: JSON - :raises ~azure.core.exceptions.HttpResponseError: - - Example: - .. code-block:: python - - # JSON input template you can fill out and use as your body input. - body = { - "deployment_id": "str", # Optional. The ID of the deployment to rollback to. - "skip_pin": bool # Optional. Whether to skip pinning the rollback - deployment. If false, the rollback deployment will be pinned and any new - deployments including Auto Deploy on Push hooks will be disabled until the - rollback is either manually committed or reverted via the CommitAppRollback or - RevertAppRollback endpoints respectively. If true, the rollback will be - immediately committed and the app will remain unpinned. - } - - # response body for status code(s): 200 - response == { - "deployment": { - "cause": "str", # Optional. What caused this deployment to be - created. - "cloned_from": "str", # Optional. The ID of a previous deployment - that this deployment was cloned from. - "created_at": "2020-02-20 00:00:00", # Optional. The creation time - of the deployment. - "functions": [ - { - "name": "str", # Optional. The name of this - functions component. - "namespace": "str", # Optional. The namespace where - the functions are deployed. - "source_commit_hash": "str" # Optional. The commit - hash of the repository that was used to build this functions - component. - } - ], - "id": "str", # Optional. The ID of the deployment. - "jobs": [ - { - "name": "str", # Optional. The name of this job. - "source_commit_hash": "str" # Optional. The commit - hash of the repository that was used to build this job. - } - ], - "phase": "UNKNOWN", # Optional. Default value is "UNKNOWN". Known - values are: "UNKNOWN", "PENDING_BUILD", "BUILDING", "PENDING_DEPLOY", - "DEPLOYING", "ACTIVE", "SUPERSEDED", "ERROR", and "CANCELED". - "phase_last_updated_at": "2020-02-20 00:00:00", # Optional. When the - deployment phase was last updated. - "progress": { - "error_steps": 0, # Optional. Number of unsuccessful steps. - "pending_steps": 0, # Optional. Number of pending steps. - "running_steps": 0, # Optional. Number of currently running - steps. - "steps": [ - { - "component_name": "str", # Optional. The - component name that this step is associated with. - "ended_at": "2020-02-20 00:00:00", # - Optional. The end time of this step. - "message_base": "str", # Optional. The base - of a human-readable description of the step intended to be - combined with the component name for presentation. For example: - ``message_base`` = "Building service" ``component_name`` = "api". - "name": "str", # Optional. The name of this - step. - "reason": { - "code": "str", # Optional. The error - code. - "message": "str" # Optional. The - error message. + "autoscaling": { + "max_instance_count": 0, # Optional. The + maximum amount of instances for this component. Must be more than + min_instance_count. + "metrics": { + "cpu": { + "percent": 80 # Optional. + Default value is 80. The average target CPU utilization + for the component. + } }, - "started_at": "2020-02-20 00:00:00", # - Optional. The start time of this step. - "status": "UNKNOWN", # Optional. Default - value is "UNKNOWN". Known values are: "UNKNOWN", "PENDING", - "RUNNING", "ERROR", and "SUCCESS". - "steps": [ - {} # Optional. Child steps of this - step. - ] - } - ], - "success_steps": 0, # Optional. Number of successful steps. - "summary_steps": [ - { - "component_name": "str", # Optional. The - component name that this step is associated with. - "ended_at": "2020-02-20 00:00:00", # - Optional. The end time of this step. - "message_base": "str", # Optional. The base - of a human-readable description of the step intended to be - combined with the component name for presentation. For example: - ``message_base`` = "Building service" ``component_name`` = "api". - "name": "str", # Optional. The name of this - step. - "reason": { - "code": "str", # Optional. The error - code. - "message": "str" # Optional. The - error message. + "min_instance_count": 0 # Optional. The + minimum amount of instances for this component. Must be less than + max_instance_count. + }, + "bitbucket": { + "branch": "str", # Optional. The name of the + branch to use. + "deploy_on_push": bool, # Optional. Whether + to automatically deploy new commits made to the repo. + "repo": "str" # Optional. The name of the + repo in the format owner/repo. Example: + ``digitalocean/sample-golang``. + }, + "build_command": "str", # Optional. An optional + build command to run while building this component from source. + "dockerfile_path": "str", # Optional. The path to + the Dockerfile relative to the root of the repo. If set, it will be + used to build this component. Otherwise, App Platform will attempt to + build it using buildpacks. + "environment_slug": "str", # Optional. An + environment slug describing the type of this app. For a full list, + please refer to `the product documentation + `_. + "envs": [ + { + "key": "str", # The variable name. + Required. + "scope": "RUN_AND_BUILD_TIME", # + Optional. Default value is "RUN_AND_BUILD_TIME". * RUN_TIME: + Made available only at run-time * BUILD_TIME: Made available + only at build-time * RUN_AND_BUILD_TIME: Made available at + both build and run-time. Known values are: "UNSET", + "RUN_TIME", "BUILD_TIME", and "RUN_AND_BUILD_TIME". + "type": "GENERAL", # Optional. + Default value is "GENERAL". * GENERAL: A plain-text + environment variable * SECRET: A secret encrypted environment + variable. Known values are: "GENERAL" and "SECRET". + "value": "str" # Optional. The + value. If the type is ``SECRET``"" , the value will be + encrypted on first submission. On following submissions, the + encrypted value should be used. + } + ], + "git": { + "branch": "str", # Optional. The name of the + branch to use. + "repo_clone_url": "str" # Optional. The + clone URL of the repo. Example: + ``https://github.com/digitalocean/sample-golang.git``. + }, + "github": { + "branch": "str", # Optional. The name of the + branch to use. + "deploy_on_push": bool, # Optional. Whether + to automatically deploy new commits made to the repo. + "repo": "str" # Optional. The name of the + repo in the format owner/repo. Example: + ``digitalocean/sample-golang``. + }, + "gitlab": { + "branch": "str", # Optional. The name of the + branch to use. + "deploy_on_push": bool, # Optional. Whether + to automatically deploy new commits made to the repo. + "repo": "str" # Optional. The name of the + repo in the format owner/repo. Example: + ``digitalocean/sample-golang``. + }, + "image": { + "deploy_on_push": { + "enabled": bool # Optional. Whether + to automatically deploy new images. Can only be used for + images hosted in DOCR and can only be used with an image tag, + not a specific digest. }, - "started_at": "2020-02-20 00:00:00", # - Optional. The start time of this step. - "status": "UNKNOWN", # Optional. Default - value is "UNKNOWN". Known values are: "UNKNOWN", "PENDING", - "RUNNING", "ERROR", and "SUCCESS". - "steps": [ - {} # Optional. Child steps of this - step. - ] + "digest": "str", # Optional. The image + digest. Cannot be specified if tag is provided. + "registry": "str", # Optional. The registry + name. Must be left empty for the ``DOCR`` registry type. + "registry_credentials": "str", # Optional. + The credentials to be able to pull the image. The value will be + encrypted on first submission. On following submissions, the + encrypted value should be used. * "$username:$access_token" for + registries of type ``DOCKER_HUB``. * "$username:$access_token" + for registries of type ``GHCR``. + "registry_type": "str", # Optional. * + DOCKER_HUB: The DockerHub container registry type. * DOCR: The + DigitalOcean container registry type. * GHCR: The Github + container registry type. Known values are: "DOCKER_HUB", "DOCR", + and "GHCR". + "repository": "str", # Optional. The + repository name. + "tag": "latest" # Optional. Default value is + "latest". The repository tag. Defaults to ``latest`` if not + provided and no digest is provided. Cannot be specified if digest + is provided. + }, + "instance_count": 1, # Optional. Default value is 1. + The amount of instances that this component should be scaled to. + Default: 1. Must not be set if autoscaling is used. + "instance_size_slug": {}, + "kind": "UNSPECIFIED", # Optional. Default value is + "UNSPECIFIED". * UNSPECIFIED: Default job type, will auto-complete to + POST_DEPLOY kind. * PRE_DEPLOY: Indicates a job that runs before an + app deployment. * POST_DEPLOY: Indicates a job that runs after an app + deployment. * FAILED_DEPLOY: Indicates a job that runs after a + component fails to deploy. Known values are: "UNSPECIFIED", + "PRE_DEPLOY", "POST_DEPLOY", and "FAILED_DEPLOY". + "log_destinations": [ + { + "name": "str", # Required. + "datadog": { + "api_key": "str", # Datadog + API key. Required. + "endpoint": "str" # + Optional. Datadog HTTP log intake endpoint. + }, + "logtail": { + "token": "str" # Optional. + Logtail token. + }, + "open_search": { + "basic_auth": { + "password": "str", # + Optional. Password for user defined in User. Is + required when ``endpoint`` is set. Cannot be set if + using a DigitalOcean DBaaS OpenSearch cluster. + "user": "str" # + Optional. Username to authenticate with. Only + required when ``endpoint`` is set. Defaults to + ``doadmin`` when ``cluster_name`` is set. + }, + "cluster_name": "str", # + Optional. The name of a DigitalOcean DBaaS OpenSearch + cluster to use as a log forwarding destination. Cannot be + specified if ``endpoint`` is also specified. + "endpoint": "str", # + Optional. OpenSearch API Endpoint. Only HTTPS is + supported. Format: https://:code:``::code:``. + Cannot be specified if ``cluster_name`` is also + specified. + "index_name": "logs" # + Optional. Default value is "logs". The index name to use + for the logs. If not set, the default index name is + "logs". + }, + "papertrail": { + "endpoint": "str" # + Papertrail syslog endpoint. Required. + } + } + ], + "name": "str", # Optional. The name. Must be unique + across all components within the same app. + "run_command": "str", # Optional. An optional run + command to override the component's default. + "source_dir": "str", # Optional. An optional path to + the working directory to use for the build. For Dockerfile builds, + this will be used as the build context. Must be relative to the root + of the repo. + "termination": { + "grace_period_seconds": 0 # Optional. The + number of seconds to wait between sending a TERM signal to a + container and issuing a KILL which causes immediate shutdown. + (Default 120). } - ], - "total_steps": 0 # Optional. Total number of steps. + } + ], + "maintenance": { + "archive": bool, # Optional. Indicates whether the app + should be archived. Setting this to true implies that enabled is set to + true. + "enabled": bool, # Optional. Indicates whether maintenance + mode should be enabled for the app. + "offline_page_url": "str" # Optional. A custom offline page + to display when maintenance mode is enabled or the app is archived. }, + "region": "str", # Optional. The slug form of the geographical + origin of the app. Default: ``nearest available``. Known values are: "atl", + "nyc", "sfo", "tor", "ams", "fra", "lon", "blr", "sgp", and "syd". "services": [ { - "name": "str", # Optional. The name of this service. - "source_commit_hash": "str" # Optional. The commit - hash of the repository that was used to build this service. - } - ], - "spec": { - "name": "str", # The name of the app. Must be unique across - all apps in the same account. Required. - "databases": [ - { - "name": "str", # The database's name. The - name must be unique across all components within the same app and - cannot use capital letters. Required. - "cluster_name": "str", # Optional. The name - of the underlying DigitalOcean DBaaS cluster. This is required - for production databases. For dev databases, if cluster_name is - not set, a new cluster will be provisioned. - "db_name": "str", # Optional. The name of - the MySQL or PostgreSQL database to configure. - "db_user": "str", # Optional. The name of - the MySQL or PostgreSQL user to configure. - "engine": "UNSET", # Optional. Default value - is "UNSET". * MYSQL: MySQL * PG: PostgreSQL * REDIS: Caching * - MONGODB: MongoDB * KAFKA: Kafka * OPENSEARCH: OpenSearch * - VALKEY: ValKey. Known values are: "UNSET", "MYSQL", "PG", - "REDIS", "MONGODB", "KAFKA", "OPENSEARCH", and "VALKEY". - "production": bool, # Optional. Whether this - is a production or dev database. - "version": "str" # Optional. The version of - the database engine. - } - ], - "disable_edge_cache": False, # Optional. Default value is - False. .. role:: raw-html-m2r(raw) :format: html If set to - ``true``"" , the app will **not** be cached at the edge (CDN). Enable - this option if you want to manage CDN configuration yourself"u2014whether - by using an external CDN provider or by handling static content and - caching within your app. This setting is also recommended for apps that - require real-time data or serve dynamic content, such as those using - Server-Sent Events (SSE) over GET, or hosting an MCP (Model Context - Protocol) Server that utilizes SSE."" :raw-html-m2r:`
` **Note:** This - feature is not available for static site components."" - :raw-html-m2r:`
` For more information, see `Disable CDN Cache - `_. - "disable_email_obfuscation": False, # Optional. Default - value is False. If set to ``true``"" , email addresses in the app will - not be obfuscated. This is useful for apps that require email addresses - to be visible (in the HTML markup). - "domains": [ - { - "domain": "str", # The hostname for the - domain. Required. - "minimum_tls_version": "str", # Optional. - The minimum version of TLS a client application can use to access - resources for the domain. Must be one of the following values - wrapped within quotations: ``"1.2"`` or ``"1.3"``. Known values - are: "1.2" and "1.3". - "type": "UNSPECIFIED", # Optional. Default - value is "UNSPECIFIED". * DEFAULT: The default - ``.ondigitalocean.app`` domain assigned to this app * PRIMARY: - The primary domain for this app that is displayed as the default - in the control panel, used in bindable environment variables, and - any other places that reference an app's live URL. Only one - domain may be set as primary. * ALIAS: A non-primary domain. - Known values are: "UNSPECIFIED", "DEFAULT", "PRIMARY", and - "ALIAS". - "wildcard": bool, # Optional. Indicates - whether the domain includes all sub-domains, in addition to the - given domain. - "zone": "str" # Optional. Optional. If the - domain uses DigitalOcean DNS and you would like App Platform to - automatically manage it for you, set this to the name of the - domain on your account. For example, If the domain you are - adding is ``app.domain.com``"" , the zone could be - ``domain.com``. - } - ], - "egress": { - "type": "AUTOASSIGN" # Optional. Default value is - "AUTOASSIGN". The app egress type. Known values are: "AUTOASSIGN" and - "DEDICATED_IP". - }, - "enhanced_threat_control_enabled": False, # Optional. - Default value is False. If set to ``true``"" , suspicious requests will - go through additional security checks to help mitigate layer 7 DDoS - attacks. - "functions": [ - { - "name": "str", # The name. Must be unique - across all components within the same app. Required. - "alerts": [ - { - "disabled": bool, # - Optional. Is the alert disabled?. - "operator": - "UNSPECIFIED_OPERATOR", # Optional. Default value is - "UNSPECIFIED_OPERATOR". Known values are: - "UNSPECIFIED_OPERATOR", "GREATER_THAN", and "LESS_THAN". - "rule": "UNSPECIFIED_RULE", - # Optional. Default value is "UNSPECIFIED_RULE". Known - values are: "UNSPECIFIED_RULE", "CPU_UTILIZATION", - "MEM_UTILIZATION", "RESTART_COUNT", "DEPLOYMENT_FAILED", - "DEPLOYMENT_LIVE", "DOMAIN_FAILED", "DOMAIN_LIVE", - "AUTOSCALE_FAILED", "AUTOSCALE_SUCCEEDED", - "FUNCTIONS_ACTIVATION_COUNT", - "FUNCTIONS_AVERAGE_DURATION_MS", - "FUNCTIONS_ERROR_RATE_PER_MINUTE", - "FUNCTIONS_AVERAGE_WAIT_TIME_MS", - "FUNCTIONS_ERROR_COUNT", and - "FUNCTIONS_GB_RATE_PER_SECOND". - "value": 0.0, # Optional. - Threshold value for alert. - "window": - "UNSPECIFIED_WINDOW" # Optional. Default value is - "UNSPECIFIED_WINDOW". Known values are: - "UNSPECIFIED_WINDOW", "FIVE_MINUTES", "TEN_MINUTES", - "THIRTY_MINUTES", and "ONE_HOUR". + "autoscaling": { + "max_instance_count": 0, # Optional. The + maximum amount of instances for this component. Must be more than + min_instance_count. + "metrics": { + "cpu": { + "percent": 80 # Optional. + Default value is 80. The average target CPU utilization + for the component. } - ], - "bitbucket": { - "branch": "str", # Optional. The - name of the branch to use. - "deploy_on_push": bool, # Optional. - Whether to automatically deploy new commits made to the repo. - "repo": "str" # Optional. The name - of the repo in the format owner/repo. Example: - ``digitalocean/sample-golang``. - }, - "cors": { - "allow_credentials": bool, # - Optional. Whether browsers should expose the response to the - client-side JavaScript code when the request"u2019s - credentials mode is include. This configures the - ``Access-Control-Allow-Credentials`` header. - "allow_headers": [ - "str" # Optional. The set of - allowed HTTP request headers. This configures the - ``Access-Control-Allow-Headers`` header. - ], - "allow_methods": [ - "str" # Optional. The set of - allowed HTTP methods. This configures the - ``Access-Control-Allow-Methods`` header. - ], - "allow_origins": [ - { - "exact": "str", # - Optional. Exact string match. Only 1 of ``exact``"" , - ``prefix``"" , or ``regex`` must be set. - "prefix": "str", # - Optional. Prefix-based match. Only 1 of ``exact``"" , - ``prefix``"" , or ``regex`` must be set. - "regex": "str" # - Optional. RE2 style regex-based match. Only 1 of - ``exact``"" , ``prefix``"" , or ``regex`` must be - set. For more information about RE2 syntax, see: - https://github.com/google/re2/wiki/Syntax. - } - ], - "expose_headers": [ - "str" # Optional. The set of - HTTP response headers that browsers are allowed to - access. This configures the - ``Access-Control-Expose-Headers`` header. - ], - "max_age": "str" # Optional. An - optional duration specifying how long browsers can cache the - results of a preflight request. This configures the - ``Access-Control-Max-Age`` header. }, - "envs": [ - { - "key": "str", # The variable - name. Required. - "scope": - "RUN_AND_BUILD_TIME", # Optional. Default value is - "RUN_AND_BUILD_TIME". * RUN_TIME: Made available only at - run-time * BUILD_TIME: Made available only at build-time - * RUN_AND_BUILD_TIME: Made available at both build and - run-time. Known values are: "UNSET", "RUN_TIME", - "BUILD_TIME", and "RUN_AND_BUILD_TIME". - "type": "GENERAL", # - Optional. Default value is "GENERAL". * GENERAL: A - plain-text environment variable * SECRET: A secret - encrypted environment variable. Known values are: - "GENERAL" and "SECRET". - "value": "str" # Optional. - The value. If the type is ``SECRET``"" , the value will - be encrypted on first submission. On following - submissions, the encrypted value should be used. - } + "min_instance_count": 0 # Optional. The + minimum amount of instances for this component. Must be less than + max_instance_count. + }, + "bitbucket": { + "branch": "str", # Optional. The name of the + branch to use. + "deploy_on_push": bool, # Optional. Whether + to automatically deploy new commits made to the repo. + "repo": "str" # Optional. The name of the + repo in the format owner/repo. Example: + ``digitalocean/sample-golang``. + }, + "build_command": "str", # Optional. An optional + build command to run while building this component from source. + "cors": { + "allow_credentials": bool, # Optional. + Whether browsers should expose the response to the client-side + JavaScript code when the request"u2019s credentials mode is + include. This configures the ``Access-Control-Allow-Credentials`` + header. + "allow_headers": [ + "str" # Optional. The set of allowed + HTTP request headers. This configures the + ``Access-Control-Allow-Headers`` header. ], - "git": { - "branch": "str", # Optional. The - name of the branch to use. - "repo_clone_url": "str" # Optional. - The clone URL of the repo. Example: - ``https://github.com/digitalocean/sample-golang.git``. - }, - "github": { - "branch": "str", # Optional. The - name of the branch to use. - "deploy_on_push": bool, # Optional. - Whether to automatically deploy new commits made to the repo. - "repo": "str" # Optional. The name - of the repo in the format owner/repo. Example: - ``digitalocean/sample-golang``. - }, - "gitlab": { - "branch": "str", # Optional. The - name of the branch to use. - "deploy_on_push": bool, # Optional. - Whether to automatically deploy new commits made to the repo. - "repo": "str" # Optional. The name - of the repo in the format owner/repo. Example: - ``digitalocean/sample-golang``. - }, - "log_destinations": [ - { - "name": "str", # Required. - "datadog": { - "api_key": "str", # - Datadog API key. Required. - "endpoint": "str" # - Optional. Datadog HTTP log intake endpoint. - }, - "logtail": { - "token": "str" # - Optional. Logtail token. - }, - "open_search": { - "basic_auth": { - "password": - "str", # Optional. Password for user defined in - User. Is required when ``endpoint`` is set. - Cannot be set if using a DigitalOcean DBaaS - OpenSearch cluster. - "user": "str" - # Optional. Username to authenticate with. Only - required when ``endpoint`` is set. Defaults to - ``doadmin`` when ``cluster_name`` is set. - }, - "cluster_name": - "str", # Optional. The name of a DigitalOcean DBaaS - OpenSearch cluster to use as a log forwarding - destination. Cannot be specified if ``endpoint`` is - also specified. - "endpoint": "str", # - Optional. OpenSearch API Endpoint. Only HTTPS is - supported. Format: - https://:code:``::code:``. Cannot be - specified if ``cluster_name`` is also specified. - "index_name": "logs" - # Optional. Default value is "logs". The index name - to use for the logs. If not set, the default index - name is "logs". - }, - "papertrail": { - "endpoint": "str" # - Papertrail syslog endpoint. Required. - } - } + "allow_methods": [ + "str" # Optional. The set of allowed + HTTP methods. This configures the + ``Access-Control-Allow-Methods`` header. ], - "routes": [ + "allow_origins": [ { - "path": "str", # Optional. - (Deprecated - Use Ingress Rules instead). An HTTP path - prefix. Paths must start with / and must be unique across - all components within an app. - "preserve_path_prefix": bool - # Optional. An optional flag to preserve the path that is - forwarded to the backend service. By default, the HTTP - request path will be trimmed from the left when forwarded - to the component. For example, a component with - ``path=/api`` will have requests to ``/api/list`` trimmed - to ``/list``. If this value is ``true``"" , the path will - remain ``/api/list``. + "exact": "str", # Optional. + Exact string match. Only 1 of ``exact``"" , ``prefix``"" + , or ``regex`` must be set. + "prefix": "str", # Optional. + Prefix-based match. Only 1 of ``exact``"" , ``prefix``"" + , or ``regex`` must be set. + "regex": "str" # Optional. + RE2 style regex-based match. Only 1 of ``exact``"" , + ``prefix``"" , or ``regex`` must be set. For more + information about RE2 syntax, see: + https://github.com/google/re2/wiki/Syntax. } ], - "source_dir": "str" # Optional. An optional - path to the working directory to use for the build. For - Dockerfile builds, this will be used as the build context. Must - be relative to the root of the repo. - } - ], - "ingress": { - "rules": [ + "expose_headers": [ + "str" # Optional. The set of HTTP + response headers that browsers are allowed to access. This + configures the ``Access-Control-Expose-Headers`` header. + ], + "max_age": "str" # Optional. An optional + duration specifying how long browsers can cache the results of a + preflight request. This configures the ``Access-Control-Max-Age`` + header. + }, + "dockerfile_path": "str", # Optional. The path to + the Dockerfile relative to the root of the repo. If set, it will be + used to build this component. Otherwise, App Platform will attempt to + build it using buildpacks. + "environment_slug": "str", # Optional. An + environment slug describing the type of this app. For a full list, + please refer to `the product documentation + `_. + "envs": [ { - "component": { - "name": "str", # The name of - the component to route to. Required. - "preserve_path_prefix": - "str", # Optional. An optional flag to preserve the path - that is forwarded to the backend service. By default, the - HTTP request path will be trimmed from the left when - forwarded to the component. For example, a component with - ``path=/api`` will have requests to ``/api/list`` trimmed - to ``/list``. If this value is ``true``"" , the path will - remain ``/api/list``. Note: this is not applicable for - Functions Components and is mutually exclusive with - ``rewrite``. - "rewrite": "str" # Optional. - An optional field that will rewrite the path of the - component to be what is specified here. By default, the - HTTP request path will be trimmed from the left when - forwarded to the component. For example, a component with - ``path=/api`` will have requests to ``/api/list`` trimmed - to ``/list``. If you specified the rewrite to be - ``/v1/``"" , requests to ``/api/list`` would be rewritten - to ``/v1/list``. Note: this is mutually exclusive with - ``preserve_path_prefix``. + "key": "str", # The variable name. + Required. + "scope": "RUN_AND_BUILD_TIME", # + Optional. Default value is "RUN_AND_BUILD_TIME". * RUN_TIME: + Made available only at run-time * BUILD_TIME: Made available + only at build-time * RUN_AND_BUILD_TIME: Made available at + both build and run-time. Known values are: "UNSET", + "RUN_TIME", "BUILD_TIME", and "RUN_AND_BUILD_TIME". + "type": "GENERAL", # Optional. + Default value is "GENERAL". * GENERAL: A plain-text + environment variable * SECRET: A secret encrypted environment + variable. Known values are: "GENERAL" and "SECRET". + "value": "str" # Optional. The + value. If the type is ``SECRET``"" , the value will be + encrypted on first submission. On following submissions, the + encrypted value should be used. + } + ], + "git": { + "branch": "str", # Optional. The name of the + branch to use. + "repo_clone_url": "str" # Optional. The + clone URL of the repo. Example: + ``https://github.com/digitalocean/sample-golang.git``. + }, + "github": { + "branch": "str", # Optional. The name of the + branch to use. + "deploy_on_push": bool, # Optional. Whether + to automatically deploy new commits made to the repo. + "repo": "str" # Optional. The name of the + repo in the format owner/repo. Example: + ``digitalocean/sample-golang``. + }, + "gitlab": { + "branch": "str", # Optional. The name of the + branch to use. + "deploy_on_push": bool, # Optional. Whether + to automatically deploy new commits made to the repo. + "repo": "str" # Optional. The name of the + repo in the format owner/repo. Example: + ``digitalocean/sample-golang``. + }, + "health_check": { + "failure_threshold": 0, # Optional. The + number of failed health checks before considered unhealthy. + "http_path": "str", # Optional. The route + path used for the HTTP health check ping. If not set, the HTTP + health check will be disabled and a TCP health check used + instead. + "initial_delay_seconds": 0, # Optional. The + number of seconds to wait before beginning health checks. + "period_seconds": 0, # Optional. The number + of seconds to wait between health checks. + "port": 0, # Optional. The port on which the + health check will be performed. If not set, the health check will + be performed on the component's http_port. + "success_threshold": 0, # Optional. The + number of successful health checks before considered healthy. + "timeout_seconds": 0 # Optional. The number + of seconds after which the check times out. + }, + "http_port": 0, # Optional. The internal port on + which this service's run command will listen. Default: 8080 If there + is not an environment variable with the name ``PORT``"" , one will be + automatically added with its value set to the value of this field. + "image": { + "deploy_on_push": { + "enabled": bool # Optional. Whether + to automatically deploy new images. Can only be used for + images hosted in DOCR and can only be used with an image tag, + not a specific digest. + }, + "digest": "str", # Optional. The image + digest. Cannot be specified if tag is provided. + "registry": "str", # Optional. The registry + name. Must be left empty for the ``DOCR`` registry type. + "registry_credentials": "str", # Optional. + The credentials to be able to pull the image. The value will be + encrypted on first submission. On following submissions, the + encrypted value should be used. * "$username:$access_token" for + registries of type ``DOCKER_HUB``. * "$username:$access_token" + for registries of type ``GHCR``. + "registry_type": "str", # Optional. * + DOCKER_HUB: The DockerHub container registry type. * DOCR: The + DigitalOcean container registry type. * GHCR: The Github + container registry type. Known values are: "DOCKER_HUB", "DOCR", + and "GHCR". + "repository": "str", # Optional. The + repository name. + "tag": "latest" # Optional. Default value is + "latest". The repository tag. Defaults to ``latest`` if not + provided and no digest is provided. Cannot be specified if digest + is provided. + }, + "instance_count": 1, # Optional. Default value is 1. + The amount of instances that this component should be scaled to. + Default: 1. Must not be set if autoscaling is used. + "instance_size_slug": {}, + "internal_ports": [ + 0 # Optional. The ports on which this + service will listen for internal traffic. + ], + "liveness_health_check": { + "failure_threshold": 0, # Optional. The + number of failed health checks before considered unhealthy. + "http_path": "str", # Optional. The route + path used for the HTTP health check ping. If not set, the HTTP + health check will be disabled and a TCP health check used + instead. + "initial_delay_seconds": 0, # Optional. The + number of seconds to wait before beginning health checks. + "period_seconds": 0, # Optional. The number + of seconds to wait between health checks. + "port": 0, # Optional. The port on which the + health check will be performed. + "success_threshold": 0, # Optional. The + number of successful health checks before considered healthy. + "timeout_seconds": 0 # Optional. The number + of seconds after which the check times out. + }, + "log_destinations": [ + { + "name": "str", # Required. + "datadog": { + "api_key": "str", # Datadog + API key. Required. + "endpoint": "str" # + Optional. Datadog HTTP log intake endpoint. }, - "cors": { - "allow_credentials": bool, # - Optional. Whether browsers should expose the response to - the client-side JavaScript code when the request"u2019s - credentials mode is include. This configures the - ``Access-Control-Allow-Credentials`` header. - "allow_headers": [ - "str" # Optional. - The set of allowed HTTP request headers. This - configures the ``Access-Control-Allow-Headers`` - header. - ], - "allow_methods": [ - "str" # Optional. - The set of allowed HTTP methods. This configures the - ``Access-Control-Allow-Methods`` header. - ], - "allow_origins": [ - { - "exact": - "str", # Optional. Exact string match. Only 1 of - ``exact``"" , ``prefix``"" , or ``regex`` must be - set. - "prefix": - "str", # Optional. Prefix-based match. Only 1 of - ``exact``"" , ``prefix``"" , or ``regex`` must be - set. - "regex": - "str" # Optional. RE2 style regex-based match. - Only 1 of ``exact``"" , ``prefix``"" , or - ``regex`` must be set. For more information about - RE2 syntax, see: - https://github.com/google/re2/wiki/Syntax. - } - ], - "expose_headers": [ - "str" # Optional. - The set of HTTP response headers that browsers are - allowed to access. This configures the - ``Access-Control-Expose-Headers`` header. - ], - "max_age": "str" # Optional. - An optional duration specifying how long browsers can - cache the results of a preflight request. This configures - the ``Access-Control-Max-Age`` header. + "logtail": { + "token": "str" # Optional. + Logtail token. }, - "match": { - "authority": { - "exact": "str" # - Required. + "open_search": { + "basic_auth": { + "password": "str", # + Optional. Password for user defined in User. Is + required when ``endpoint`` is set. Cannot be set if + using a DigitalOcean DBaaS OpenSearch cluster. + "user": "str" # + Optional. Username to authenticate with. Only + required when ``endpoint`` is set. Defaults to + ``doadmin`` when ``cluster_name`` is set. }, - "path": { - "prefix": "str" # - Prefix-based match. For example, ``/api`` will match - ``/api``"" , ``/api/``"" , and any nested paths such - as ``/api/v1/endpoint``. Required. - } + "cluster_name": "str", # + Optional. The name of a DigitalOcean DBaaS OpenSearch + cluster to use as a log forwarding destination. Cannot be + specified if ``endpoint`` is also specified. + "endpoint": "str", # + Optional. OpenSearch API Endpoint. Only HTTPS is + supported. Format: https://:code:``::code:``. + Cannot be specified if ``cluster_name`` is also + specified. + "index_name": "logs" # + Optional. Default value is "logs". The index name to use + for the logs. If not set, the default index name is + "logs". }, - "redirect": { - "authority": "str", # - Optional. The authority/host to redirect to. This can be - a hostname or IP address. Note: use ``port`` to set the - port. - "port": 0, # Optional. The - port to redirect to. - "redirect_code": 0, # - Optional. The redirect code to use. Defaults to ``302``. - Supported values are 300, 301, 302, 303, 304, 307, 308. - "scheme": "str", # Optional. - The scheme to redirect to. Supported values are ``http`` - or ``https``. Default: ``https``. - "uri": "str" # Optional. An - optional URI path to redirect to. Note: if this is - specified the whole URI of the original request will be - overwritten to this value, irrespective of the original - request URI being matched. + "papertrail": { + "endpoint": "str" # + Papertrail syslog endpoint. Required. } } - ] - }, - "jobs": [ - { - "autoscaling": { - "max_instance_count": 0, # Optional. - The maximum amount of instances for this component. Must be - more than min_instance_count. - "metrics": { - "cpu": { - "percent": 80 # - Optional. Default value is 80. The average target CPU - utilization for the component. - } - }, - "min_instance_count": 0 # Optional. - The minimum amount of instances for this component. Must be - less than max_instance_count. - }, - "bitbucket": { - "branch": "str", # Optional. The - name of the branch to use. - "deploy_on_push": bool, # Optional. - Whether to automatically deploy new commits made to the repo. - "repo": "str" # Optional. The name - of the repo in the format owner/repo. Example: - ``digitalocean/sample-golang``. - }, - "build_command": "str", # Optional. An - optional build command to run while building this component from - source. - "dockerfile_path": "str", # Optional. The - path to the Dockerfile relative to the root of the repo. If set, - it will be used to build this component. Otherwise, App Platform - will attempt to build it using buildpacks. - "environment_slug": "str", # Optional. An - environment slug describing the type of this app. For a full - list, please refer to `the product documentation - `_. - "envs": [ - { - "key": "str", # The variable - name. Required. - "scope": - "RUN_AND_BUILD_TIME", # Optional. Default value is - "RUN_AND_BUILD_TIME". * RUN_TIME: Made available only at - run-time * BUILD_TIME: Made available only at build-time - * RUN_AND_BUILD_TIME: Made available at both build and - run-time. Known values are: "UNSET", "RUN_TIME", - "BUILD_TIME", and "RUN_AND_BUILD_TIME". - "type": "GENERAL", # - Optional. Default value is "GENERAL". * GENERAL: A - plain-text environment variable * SECRET: A secret - encrypted environment variable. Known values are: - "GENERAL" and "SECRET". - "value": "str" # Optional. - The value. If the type is ``SECRET``"" , the value will - be encrypted on first submission. On following - submissions, the encrypted value should be used. - } - ], - "git": { - "branch": "str", # Optional. The - name of the branch to use. - "repo_clone_url": "str" # Optional. - The clone URL of the repo. Example: - ``https://github.com/digitalocean/sample-golang.git``. - }, - "github": { - "branch": "str", # Optional. The - name of the branch to use. - "deploy_on_push": bool, # Optional. - Whether to automatically deploy new commits made to the repo. - "repo": "str" # Optional. The name - of the repo in the format owner/repo. Example: - ``digitalocean/sample-golang``. - }, - "gitlab": { - "branch": "str", # Optional. The - name of the branch to use. - "deploy_on_push": bool, # Optional. - Whether to automatically deploy new commits made to the repo. - "repo": "str" # Optional. The name - of the repo in the format owner/repo. Example: - ``digitalocean/sample-golang``. - }, - "image": { - "deploy_on_push": { - "enabled": bool # Optional. - Whether to automatically deploy new images. Can only be - used for images hosted in DOCR and can only be used with - an image tag, not a specific digest. - }, - "digest": "str", # Optional. The - image digest. Cannot be specified if tag is provided. - "registry": "str", # Optional. The - registry name. Must be left empty for the ``DOCR`` registry - type. - "registry_credentials": "str", # - Optional. The credentials to be able to pull the image. The - value will be encrypted on first submission. On following - submissions, the encrypted value should be used. * - "$username:$access_token" for registries of type - ``DOCKER_HUB``. * "$username:$access_token" for registries of - type ``GHCR``. - "registry_type": "str", # Optional. - * DOCKER_HUB: The DockerHub container registry type. * DOCR: - The DigitalOcean container registry type. * GHCR: The Github - container registry type. Known values are: "DOCKER_HUB", - "DOCR", and "GHCR". - "repository": "str", # Optional. The - repository name. - "tag": "latest" # Optional. Default - value is "latest". The repository tag. Defaults to ``latest`` - if not provided and no digest is provided. Cannot be - specified if digest is provided. - }, - "instance_count": 1, # Optional. Default - value is 1. The amount of instances that this component should be - scaled to. Default: 1. Must not be set if autoscaling is used. - "instance_size_slug": {}, - "kind": "UNSPECIFIED", # Optional. Default - value is "UNSPECIFIED". * UNSPECIFIED: Default job type, will - auto-complete to POST_DEPLOY kind. * PRE_DEPLOY: Indicates a job - that runs before an app deployment. * POST_DEPLOY: Indicates a - job that runs after an app deployment. * FAILED_DEPLOY: Indicates - a job that runs after a component fails to deploy. Known values - are: "UNSPECIFIED", "PRE_DEPLOY", "POST_DEPLOY", and - "FAILED_DEPLOY". - "log_destinations": [ + ], + "name": "str", # Optional. The name. Must be unique + across all components within the same app. + "protocol": "str", # Optional. The protocol which + the service uses to serve traffic on the http_port. * ``HTTP``"" : + The app is serving the HTTP protocol. Default. * ``HTTP2``"" : The + app is serving the HTTP/2 protocol. Currently, this needs to be + implemented in the service by serving HTTP/2 cleartext (h2c). Known + values are: "HTTP" and "HTTP2". + "routes": [ + { + "path": "str", # Optional. + (Deprecated - Use Ingress Rules instead). An HTTP path + prefix. Paths must start with / and must be unique across all + components within an app. + "preserve_path_prefix": bool # + Optional. An optional flag to preserve the path that is + forwarded to the backend service. By default, the HTTP + request path will be trimmed from the left when forwarded to + the component. For example, a component with ``path=/api`` + will have requests to ``/api/list`` trimmed to ``/list``. If + this value is ``true``"" , the path will remain + ``/api/list``. + } + ], + "run_command": "str", # Optional. An optional run + command to override the component's default. + "source_dir": "str", # Optional. An optional path to + the working directory to use for the build. For Dockerfile builds, + this will be used as the build context. Must be relative to the root + of the repo. + "termination": { + "drain_seconds": 0, # Optional. The number + of seconds to wait between selecting a container instance for + termination and issuing the TERM signal. Selecting a container + instance for termination begins an asynchronous drain of new + requests on upstream load-balancers. (Default 15). + "grace_period_seconds": 0 # Optional. The + number of seconds to wait between sending a TERM signal to a + container and issuing a KILL which causes immediate shutdown. + (Default 120). + } + } + ], + "static_sites": [ + { + "bitbucket": { + "branch": "str", # Optional. The name of the + branch to use. + "deploy_on_push": bool, # Optional. Whether + to automatically deploy new commits made to the repo. + "repo": "str" # Optional. The name of the + repo in the format owner/repo. Example: + ``digitalocean/sample-golang``. + }, + "build_command": "str", # Optional. An optional + build command to run while building this component from source. + "catchall_document": "str", # Optional. The name of + the document to use as the fallback for any requests to documents + that are not found when serving this static site. Only 1 of + ``catchall_document`` or ``error_document`` can be set. + "cors": { + "allow_credentials": bool, # Optional. + Whether browsers should expose the response to the client-side + JavaScript code when the request"u2019s credentials mode is + include. This configures the ``Access-Control-Allow-Credentials`` + header. + "allow_headers": [ + "str" # Optional. The set of allowed + HTTP request headers. This configures the + ``Access-Control-Allow-Headers`` header. + ], + "allow_methods": [ + "str" # Optional. The set of allowed + HTTP methods. This configures the + ``Access-Control-Allow-Methods`` header. + ], + "allow_origins": [ { - "name": "str", # Required. - "datadog": { - "api_key": "str", # - Datadog API key. Required. - "endpoint": "str" # - Optional. Datadog HTTP log intake endpoint. - }, - "logtail": { - "token": "str" # - Optional. Logtail token. - }, - "open_search": { - "basic_auth": { - "password": - "str", # Optional. Password for user defined in - User. Is required when ``endpoint`` is set. - Cannot be set if using a DigitalOcean DBaaS - OpenSearch cluster. - "user": "str" - # Optional. Username to authenticate with. Only - required when ``endpoint`` is set. Defaults to - ``doadmin`` when ``cluster_name`` is set. - }, - "cluster_name": - "str", # Optional. The name of a DigitalOcean DBaaS - OpenSearch cluster to use as a log forwarding - destination. Cannot be specified if ``endpoint`` is - also specified. - "endpoint": "str", # - Optional. OpenSearch API Endpoint. Only HTTPS is - supported. Format: - https://:code:``::code:``. Cannot be - specified if ``cluster_name`` is also specified. - "index_name": "logs" - # Optional. Default value is "logs". The index name - to use for the logs. If not set, the default index - name is "logs". - }, - "papertrail": { - "endpoint": "str" # - Papertrail syslog endpoint. Required. - } + "exact": "str", # Optional. + Exact string match. Only 1 of ``exact``"" , ``prefix``"" + , or ``regex`` must be set. + "prefix": "str", # Optional. + Prefix-based match. Only 1 of ``exact``"" , ``prefix``"" + , or ``regex`` must be set. + "regex": "str" # Optional. + RE2 style regex-based match. Only 1 of ``exact``"" , + ``prefix``"" , or ``regex`` must be set. For more + information about RE2 syntax, see: + https://github.com/google/re2/wiki/Syntax. } ], - "name": "str", # Optional. The name. Must be - unique across all components within the same app. - "run_command": "str", # Optional. An - optional run command to override the component's default. - "source_dir": "str", # Optional. An optional - path to the working directory to use for the build. For - Dockerfile builds, this will be used as the build context. Must - be relative to the root of the repo. - "termination": { - "grace_period_seconds": 0 # - Optional. The number of seconds to wait between sending a - TERM signal to a container and issuing a KILL which causes - immediate shutdown. (Default 120). + "expose_headers": [ + "str" # Optional. The set of HTTP + response headers that browsers are allowed to access. This + configures the ``Access-Control-Expose-Headers`` header. + ], + "max_age": "str" # Optional. An optional + duration specifying how long browsers can cache the results of a + preflight request. This configures the ``Access-Control-Max-Age`` + header. + }, + "dockerfile_path": "str", # Optional. The path to + the Dockerfile relative to the root of the repo. If set, it will be + used to build this component. Otherwise, App Platform will attempt to + build it using buildpacks. + "environment_slug": "str", # Optional. An + environment slug describing the type of this app. For a full list, + please refer to `the product documentation + `_. + "envs": [ + { + "key": "str", # The variable name. + Required. + "scope": "RUN_AND_BUILD_TIME", # + Optional. Default value is "RUN_AND_BUILD_TIME". * RUN_TIME: + Made available only at run-time * BUILD_TIME: Made available + only at build-time * RUN_AND_BUILD_TIME: Made available at + both build and run-time. Known values are: "UNSET", + "RUN_TIME", "BUILD_TIME", and "RUN_AND_BUILD_TIME". + "type": "GENERAL", # Optional. + Default value is "GENERAL". * GENERAL: A plain-text + environment variable * SECRET: A secret encrypted environment + variable. Known values are: "GENERAL" and "SECRET". + "value": "str" # Optional. The + value. If the type is ``SECRET``"" , the value will be + encrypted on first submission. On following submissions, the + encrypted value should be used. + } + ], + "error_document": "404.html", # Optional. Default + value is "404.html". The name of the error document to use when + serving this static site. Default: 404.html. If no such file exists + within the built assets, App Platform will supply one. + "git": { + "branch": "str", # Optional. The name of the + branch to use. + "repo_clone_url": "str" # Optional. The + clone URL of the repo. Example: + ``https://github.com/digitalocean/sample-golang.git``. + }, + "github": { + "branch": "str", # Optional. The name of the + branch to use. + "deploy_on_push": bool, # Optional. Whether + to automatically deploy new commits made to the repo. + "repo": "str" # Optional. The name of the + repo in the format owner/repo. Example: + ``digitalocean/sample-golang``. + }, + "gitlab": { + "branch": "str", # Optional. The name of the + branch to use. + "deploy_on_push": bool, # Optional. Whether + to automatically deploy new commits made to the repo. + "repo": "str" # Optional. The name of the + repo in the format owner/repo. Example: + ``digitalocean/sample-golang``. + }, + "image": { + "deploy_on_push": { + "enabled": bool # Optional. Whether + to automatically deploy new images. Can only be used for + images hosted in DOCR and can only be used with an image tag, + not a specific digest. + }, + "digest": "str", # Optional. The image + digest. Cannot be specified if tag is provided. + "registry": "str", # Optional. The registry + name. Must be left empty for the ``DOCR`` registry type. + "registry_credentials": "str", # Optional. + The credentials to be able to pull the image. The value will be + encrypted on first submission. On following submissions, the + encrypted value should be used. * "$username:$access_token" for + registries of type ``DOCKER_HUB``. * "$username:$access_token" + for registries of type ``GHCR``. + "registry_type": "str", # Optional. * + DOCKER_HUB: The DockerHub container registry type. * DOCR: The + DigitalOcean container registry type. * GHCR: The Github + container registry type. Known values are: "DOCKER_HUB", "DOCR", + and "GHCR". + "repository": "str", # Optional. The + repository name. + "tag": "latest" # Optional. Default value is + "latest". The repository tag. Defaults to ``latest`` if not + provided and no digest is provided. Cannot be specified if digest + is provided. + }, + "index_document": "index.html", # Optional. Default + value is "index.html". The name of the index document to use when + serving this static site. Default: index.html. + "log_destinations": [ + { + "name": "str", # Required. + "datadog": { + "api_key": "str", # Datadog + API key. Required. + "endpoint": "str" # + Optional. Datadog HTTP log intake endpoint. + }, + "logtail": { + "token": "str" # Optional. + Logtail token. + }, + "open_search": { + "basic_auth": { + "password": "str", # + Optional. Password for user defined in User. Is + required when ``endpoint`` is set. Cannot be set if + using a DigitalOcean DBaaS OpenSearch cluster. + "user": "str" # + Optional. Username to authenticate with. Only + required when ``endpoint`` is set. Defaults to + ``doadmin`` when ``cluster_name`` is set. + }, + "cluster_name": "str", # + Optional. The name of a DigitalOcean DBaaS OpenSearch + cluster to use as a log forwarding destination. Cannot be + specified if ``endpoint`` is also specified. + "endpoint": "str", # + Optional. OpenSearch API Endpoint. Only HTTPS is + supported. Format: https://:code:``::code:``. + Cannot be specified if ``cluster_name`` is also + specified. + "index_name": "logs" # + Optional. Default value is "logs". The index name to use + for the logs. If not set, the default index name is + "logs". + }, + "papertrail": { + "endpoint": "str" # + Papertrail syslog endpoint. Required. + } } + ], + "name": "str", # Optional. The name. Must be unique + across all components within the same app. + "output_dir": "str", # Optional. An optional path to + where the built assets will be located, relative to the build + context. If not set, App Platform will automatically scan for these + directory names: ``_static``"" , ``dist``"" , ``public``"" , + ``build``. + "routes": [ + { + "path": "str", # Optional. + (Deprecated - Use Ingress Rules instead). An HTTP path + prefix. Paths must start with / and must be unique across all + components within an app. + "preserve_path_prefix": bool # + Optional. An optional flag to preserve the path that is + forwarded to the backend service. By default, the HTTP + request path will be trimmed from the left when forwarded to + the component. For example, a component with ``path=/api`` + will have requests to ``/api/list`` trimmed to ``/list``. If + this value is ``true``"" , the path will remain + ``/api/list``. + } + ], + "run_command": "str", # Optional. An optional run + command to override the component's default. + "source_dir": "str" # Optional. An optional path to + the working directory to use for the build. For Dockerfile builds, + this will be used as the build context. Must be relative to the root + of the repo. + } + ], + "vpc": { + "egress_ips": [ + { + "ip": "str" # Optional. The egress ips + associated with the VPC. } ], - "maintenance": { - "archive": bool, # Optional. Indicates whether the - app should be archived. Setting this to true implies that enabled is - set to true. - "enabled": bool, # Optional. Indicates whether - maintenance mode should be enabled for the app. - "offline_page_url": "str" # Optional. A custom - offline page to display when maintenance mode is enabled or the app - is archived. - }, - "region": "str", # Optional. The slug form of the - geographical origin of the app. Default: ``nearest available``. Known - values are: "atl", "nyc", "sfo", "tor", "ams", "fra", "lon", "blr", - "sgp", and "syd". - "services": [ - { - "autoscaling": { - "max_instance_count": 0, # Optional. - The maximum amount of instances for this component. Must be - more than min_instance_count. - "metrics": { - "cpu": { - "percent": 80 # - Optional. Default value is 80. The average target CPU - utilization for the component. - } - }, - "min_instance_count": 0 # Optional. - The minimum amount of instances for this component. Must be - less than max_instance_count. + "id": "str" # Optional. The ID of the VPC. + }, + "workers": [ + { + "autoscaling": { + "max_instance_count": 0, # Optional. The + maximum amount of instances for this component. Must be more than + min_instance_count. + "metrics": { + "cpu": { + "percent": 80 # Optional. + Default value is 80. The average target CPU utilization + for the component. + } }, - "bitbucket": { - "branch": "str", # Optional. The - name of the branch to use. - "deploy_on_push": bool, # Optional. - Whether to automatically deploy new commits made to the repo. - "repo": "str" # Optional. The name - of the repo in the format owner/repo. Example: - ``digitalocean/sample-golang``. + "min_instance_count": 0 # Optional. The + minimum amount of instances for this component. Must be less than + max_instance_count. + }, + "bitbucket": { + "branch": "str", # Optional. The name of the + branch to use. + "deploy_on_push": bool, # Optional. Whether + to automatically deploy new commits made to the repo. + "repo": "str" # Optional. The name of the + repo in the format owner/repo. Example: + ``digitalocean/sample-golang``. + }, + "build_command": "str", # Optional. An optional + build command to run while building this component from source. + "dockerfile_path": "str", # Optional. The path to + the Dockerfile relative to the root of the repo. If set, it will be + used to build this component. Otherwise, App Platform will attempt to + build it using buildpacks. + "environment_slug": "str", # Optional. An + environment slug describing the type of this app. For a full list, + please refer to `the product documentation + `_. + "envs": [ + { + "key": "str", # The variable name. + Required. + "scope": "RUN_AND_BUILD_TIME", # + Optional. Default value is "RUN_AND_BUILD_TIME". * RUN_TIME: + Made available only at run-time * BUILD_TIME: Made available + only at build-time * RUN_AND_BUILD_TIME: Made available at + both build and run-time. Known values are: "UNSET", + "RUN_TIME", "BUILD_TIME", and "RUN_AND_BUILD_TIME". + "type": "GENERAL", # Optional. + Default value is "GENERAL". * GENERAL: A plain-text + environment variable * SECRET: A secret encrypted environment + variable. Known values are: "GENERAL" and "SECRET". + "value": "str" # Optional. The + value. If the type is ``SECRET``"" , the value will be + encrypted on first submission. On following submissions, the + encrypted value should be used. + } + ], + "git": { + "branch": "str", # Optional. The name of the + branch to use. + "repo_clone_url": "str" # Optional. The + clone URL of the repo. Example: + ``https://github.com/digitalocean/sample-golang.git``. + }, + "github": { + "branch": "str", # Optional. The name of the + branch to use. + "deploy_on_push": bool, # Optional. Whether + to automatically deploy new commits made to the repo. + "repo": "str" # Optional. The name of the + repo in the format owner/repo. Example: + ``digitalocean/sample-golang``. + }, + "gitlab": { + "branch": "str", # Optional. The name of the + branch to use. + "deploy_on_push": bool, # Optional. Whether + to automatically deploy new commits made to the repo. + "repo": "str" # Optional. The name of the + repo in the format owner/repo. Example: + ``digitalocean/sample-golang``. + }, + "image": { + "deploy_on_push": { + "enabled": bool # Optional. Whether + to automatically deploy new images. Can only be used for + images hosted in DOCR and can only be used with an image tag, + not a specific digest. + }, + "digest": "str", # Optional. The image + digest. Cannot be specified if tag is provided. + "registry": "str", # Optional. The registry + name. Must be left empty for the ``DOCR`` registry type. + "registry_credentials": "str", # Optional. + The credentials to be able to pull the image. The value will be + encrypted on first submission. On following submissions, the + encrypted value should be used. * "$username:$access_token" for + registries of type ``DOCKER_HUB``. * "$username:$access_token" + for registries of type ``GHCR``. + "registry_type": "str", # Optional. * + DOCKER_HUB: The DockerHub container registry type. * DOCR: The + DigitalOcean container registry type. * GHCR: The Github + container registry type. Known values are: "DOCKER_HUB", "DOCR", + and "GHCR". + "repository": "str", # Optional. The + repository name. + "tag": "latest" # Optional. Default value is + "latest". The repository tag. Defaults to ``latest`` if not + provided and no digest is provided. Cannot be specified if digest + is provided. + }, + "instance_count": 1, # Optional. Default value is 1. + The amount of instances that this component should be scaled to. + Default: 1. Must not be set if autoscaling is used. + "instance_size_slug": {}, + "liveness_health_check": { + "failure_threshold": 0, # Optional. The + number of failed health checks before considered unhealthy. + "http_path": "str", # Optional. The route + path used for the HTTP health check ping. If not set, the HTTP + health check will be disabled and a TCP health check used + instead. + "initial_delay_seconds": 0, # Optional. The + number of seconds to wait before beginning health checks. + "period_seconds": 0, # Optional. The number + of seconds to wait between health checks. + "port": 0, # Optional. The port on which the + health check will be performed. + "success_threshold": 0, # Optional. The + number of successful health checks before considered healthy. + "timeout_seconds": 0 # Optional. The number + of seconds after which the check times out. + }, + "log_destinations": [ + { + "name": "str", # Required. + "datadog": { + "api_key": "str", # Datadog + API key. Required. + "endpoint": "str" # + Optional. Datadog HTTP log intake endpoint. + }, + "logtail": { + "token": "str" # Optional. + Logtail token. + }, + "open_search": { + "basic_auth": { + "password": "str", # + Optional. Password for user defined in User. Is + required when ``endpoint`` is set. Cannot be set if + using a DigitalOcean DBaaS OpenSearch cluster. + "user": "str" # + Optional. Username to authenticate with. Only + required when ``endpoint`` is set. Defaults to + ``doadmin`` when ``cluster_name`` is set. + }, + "cluster_name": "str", # + Optional. The name of a DigitalOcean DBaaS OpenSearch + cluster to use as a log forwarding destination. Cannot be + specified if ``endpoint`` is also specified. + "endpoint": "str", # + Optional. OpenSearch API Endpoint. Only HTTPS is + supported. Format: https://:code:``::code:``. + Cannot be specified if ``cluster_name`` is also + specified. + "index_name": "logs" # + Optional. Default value is "logs". The index name to use + for the logs. If not set, the default index name is + "logs". + }, + "papertrail": { + "endpoint": "str" # + Papertrail syslog endpoint. Required. + } + } + ], + "name": "str", # Optional. The name. Must be unique + across all components within the same app. + "run_command": "str", # Optional. An optional run + command to override the component's default. + "source_dir": "str", # Optional. An optional path to + the working directory to use for the build. For Dockerfile builds, + this will be used as the build context. Must be relative to the root + of the repo. + "termination": { + "grace_period_seconds": 0 # Optional. The + number of seconds to wait between sending a TERM signal to a + container and issuing a KILL which causes immediate shutdown. + (Default 120). + } + } + ] + } + } + """ + + @distributed_trace + def validate_app_spec(self, body: Union[JSON, IO[bytes]], **kwargs: Any) -> JSON: + # pylint: disable=line-too-long + """Propose an App Spec. + + To propose and validate a spec for a new or existing app, send a POST request to the + ``/v2/apps/propose`` endpoint. The request returns some information about the proposed app, + including app cost and upgrade cost. If an existing app ID is specified, the app spec is + treated as a proposed update to the existing app. + + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :return: JSON object + :rtype: JSON + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "spec": { + "name": "str", # The name of the app. Must be unique across all apps + in the same account. Required. + "databases": [ + { + "name": "str", # The database's name. The name must + be unique across all components within the same app and cannot use + capital letters. Required. + "cluster_name": "str", # Optional. The name of the + underlying DigitalOcean DBaaS cluster. This is required for + production databases. For dev databases, if cluster_name is not set, + a new cluster will be provisioned. + "db_name": "str", # Optional. The name of the MySQL + or PostgreSQL database to configure. + "db_user": "str", # Optional. The name of the MySQL + or PostgreSQL user to configure. + "engine": "UNSET", # Optional. Default value is + "UNSET". * MYSQL: MySQL * PG: PostgreSQL * REDIS: Caching * MONGODB: + MongoDB * KAFKA: Kafka * OPENSEARCH: OpenSearch * VALKEY: ValKey. + Known values are: "UNSET", "MYSQL", "PG", "REDIS", "MONGODB", + "KAFKA", "OPENSEARCH", and "VALKEY". + "production": bool, # Optional. Whether this is a + production or dev database. + "version": "str" # Optional. The version of the + database engine. + } + ], + "disable_edge_cache": False, # Optional. Default value is False. .. + role:: raw-html-m2r(raw) :format: html If set to ``true``"" , the app + will **not** be cached at the edge (CDN). Enable this option if you want to + manage CDN configuration yourself"u2014whether by using an external CDN + provider or by handling static content and caching within your app. This + setting is also recommended for apps that require real-time data or serve + dynamic content, such as those using Server-Sent Events (SSE) over GET, or + hosting an MCP (Model Context Protocol) Server that utilizes SSE."" + :raw-html-m2r:`
` **Note:** This feature is not available for static site + components."" :raw-html-m2r:`
` For more information, see `Disable CDN + Cache + `_. + "disable_email_obfuscation": False, # Optional. Default value is + False. If set to ``true``"" , email addresses in the app will not be + obfuscated. This is useful for apps that require email addresses to be + visible (in the HTML markup). + "domains": [ + { + "domain": "str", # The hostname for the domain. + Required. + "minimum_tls_version": "str", # Optional. The + minimum version of TLS a client application can use to access + resources for the domain. Must be one of the following values + wrapped within quotations: ``"1.2"`` or ``"1.3"``. Known values are: + "1.2" and "1.3". + "type": "UNSPECIFIED", # Optional. Default value is + "UNSPECIFIED". * DEFAULT: The default ``.ondigitalocean.app`` domain + assigned to this app * PRIMARY: The primary domain for this app that + is displayed as the default in the control panel, used in bindable + environment variables, and any other places that reference an app's + live URL. Only one domain may be set as primary. * ALIAS: A + non-primary domain. Known values are: "UNSPECIFIED", "DEFAULT", + "PRIMARY", and "ALIAS". + "wildcard": bool, # Optional. Indicates whether the + domain includes all sub-domains, in addition to the given domain. + "zone": "str" # Optional. Optional. If the domain + uses DigitalOcean DNS and you would like App Platform to + automatically manage it for you, set this to the name of the domain + on your account. For example, If the domain you are adding is + ``app.domain.com``"" , the zone could be ``domain.com``. + } + ], + "egress": { + "type": "AUTOASSIGN" # Optional. Default value is + "AUTOASSIGN". The app egress type. Known values are: "AUTOASSIGN" and + "DEDICATED_IP". + }, + "enhanced_threat_control_enabled": False, # Optional. Default value + is False. If set to ``true``"" , suspicious requests will go through + additional security checks to help mitigate layer 7 DDoS attacks. + "functions": [ + { + "name": "str", # The name. Must be unique across all + components within the same app. Required. + "alerts": [ + { + "disabled": bool, # Optional. Is the + alert disabled?. + "operator": "UNSPECIFIED_OPERATOR", + # Optional. Default value is "UNSPECIFIED_OPERATOR". Known + values are: "UNSPECIFIED_OPERATOR", "GREATER_THAN", and + "LESS_THAN". + "rule": "UNSPECIFIED_RULE", # + Optional. Default value is "UNSPECIFIED_RULE". Known values + are: "UNSPECIFIED_RULE", "CPU_UTILIZATION", + "MEM_UTILIZATION", "RESTART_COUNT", "DEPLOYMENT_FAILED", + "DEPLOYMENT_LIVE", "DOMAIN_FAILED", "DOMAIN_LIVE", + "AUTOSCALE_FAILED", "AUTOSCALE_SUCCEEDED", + "FUNCTIONS_ACTIVATION_COUNT", + "FUNCTIONS_AVERAGE_DURATION_MS", + "FUNCTIONS_ERROR_RATE_PER_MINUTE", + "FUNCTIONS_AVERAGE_WAIT_TIME_MS", "FUNCTIONS_ERROR_COUNT", + and "FUNCTIONS_GB_RATE_PER_SECOND". + "value": 0.0, # Optional. Threshold + value for alert. + "window": "UNSPECIFIED_WINDOW" # + Optional. Default value is "UNSPECIFIED_WINDOW". Known values + are: "UNSPECIFIED_WINDOW", "FIVE_MINUTES", "TEN_MINUTES", + "THIRTY_MINUTES", and "ONE_HOUR". + } + ], + "bitbucket": { + "branch": "str", # Optional. The name of the + branch to use. + "deploy_on_push": bool, # Optional. Whether + to automatically deploy new commits made to the repo. + "repo": "str" # Optional. The name of the + repo in the format owner/repo. Example: + ``digitalocean/sample-golang``. + }, + "cors": { + "allow_credentials": bool, # Optional. + Whether browsers should expose the response to the client-side + JavaScript code when the request"u2019s credentials mode is + include. This configures the ``Access-Control-Allow-Credentials`` + header. + "allow_headers": [ + "str" # Optional. The set of allowed + HTTP request headers. This configures the + ``Access-Control-Allow-Headers`` header. + ], + "allow_methods": [ + "str" # Optional. The set of allowed + HTTP methods. This configures the + ``Access-Control-Allow-Methods`` header. + ], + "allow_origins": [ + { + "exact": "str", # Optional. + Exact string match. Only 1 of ``exact``"" , ``prefix``"" + , or ``regex`` must be set. + "prefix": "str", # Optional. + Prefix-based match. Only 1 of ``exact``"" , ``prefix``"" + , or ``regex`` must be set. + "regex": "str" # Optional. + RE2 style regex-based match. Only 1 of ``exact``"" , + ``prefix``"" , or ``regex`` must be set. For more + information about RE2 syntax, see: + https://github.com/google/re2/wiki/Syntax. + } + ], + "expose_headers": [ + "str" # Optional. The set of HTTP + response headers that browsers are allowed to access. This + configures the ``Access-Control-Expose-Headers`` header. + ], + "max_age": "str" # Optional. An optional + duration specifying how long browsers can cache the results of a + preflight request. This configures the ``Access-Control-Max-Age`` + header. + }, + "envs": [ + { + "key": "str", # The variable name. + Required. + "scope": "RUN_AND_BUILD_TIME", # + Optional. Default value is "RUN_AND_BUILD_TIME". * RUN_TIME: + Made available only at run-time * BUILD_TIME: Made available + only at build-time * RUN_AND_BUILD_TIME: Made available at + both build and run-time. Known values are: "UNSET", + "RUN_TIME", "BUILD_TIME", and "RUN_AND_BUILD_TIME". + "type": "GENERAL", # Optional. + Default value is "GENERAL". * GENERAL: A plain-text + environment variable * SECRET: A secret encrypted environment + variable. Known values are: "GENERAL" and "SECRET". + "value": "str" # Optional. The + value. If the type is ``SECRET``"" , the value will be + encrypted on first submission. On following submissions, the + encrypted value should be used. + } + ], + "git": { + "branch": "str", # Optional. The name of the + branch to use. + "repo_clone_url": "str" # Optional. The + clone URL of the repo. Example: + ``https://github.com/digitalocean/sample-golang.git``. + }, + "github": { + "branch": "str", # Optional. The name of the + branch to use. + "deploy_on_push": bool, # Optional. Whether + to automatically deploy new commits made to the repo. + "repo": "str" # Optional. The name of the + repo in the format owner/repo. Example: + ``digitalocean/sample-golang``. + }, + "gitlab": { + "branch": "str", # Optional. The name of the + branch to use. + "deploy_on_push": bool, # Optional. Whether + to automatically deploy new commits made to the repo. + "repo": "str" # Optional. The name of the + repo in the format owner/repo. Example: + ``digitalocean/sample-golang``. + }, + "log_destinations": [ + { + "name": "str", # Required. + "datadog": { + "api_key": "str", # Datadog + API key. Required. + "endpoint": "str" # + Optional. Datadog HTTP log intake endpoint. + }, + "logtail": { + "token": "str" # Optional. + Logtail token. + }, + "open_search": { + "basic_auth": { + "password": "str", # + Optional. Password for user defined in User. Is + required when ``endpoint`` is set. Cannot be set if + using a DigitalOcean DBaaS OpenSearch cluster. + "user": "str" # + Optional. Username to authenticate with. Only + required when ``endpoint`` is set. Defaults to + ``doadmin`` when ``cluster_name`` is set. + }, + "cluster_name": "str", # + Optional. The name of a DigitalOcean DBaaS OpenSearch + cluster to use as a log forwarding destination. Cannot be + specified if ``endpoint`` is also specified. + "endpoint": "str", # + Optional. OpenSearch API Endpoint. Only HTTPS is + supported. Format: https://:code:``::code:``. + Cannot be specified if ``cluster_name`` is also + specified. + "index_name": "logs" # + Optional. Default value is "logs". The index name to use + for the logs. If not set, the default index name is + "logs". + }, + "papertrail": { + "endpoint": "str" # + Papertrail syslog endpoint. Required. + } + } + ], + "routes": [ + { + "path": "str", # Optional. + (Deprecated - Use Ingress Rules instead). An HTTP path + prefix. Paths must start with / and must be unique across all + components within an app. + "preserve_path_prefix": bool # + Optional. An optional flag to preserve the path that is + forwarded to the backend service. By default, the HTTP + request path will be trimmed from the left when forwarded to + the component. For example, a component with ``path=/api`` + will have requests to ``/api/list`` trimmed to ``/list``. If + this value is ``true``"" , the path will remain + ``/api/list``. + } + ], + "source_dir": "str" # Optional. An optional path to + the working directory to use for the build. For Dockerfile builds, + this will be used as the build context. Must be relative to the root + of the repo. + } + ], + "ingress": { + "rules": [ + { + "component": { + "name": "str", # The name of the + component to route to. Required. + "preserve_path_prefix": "str", # + Optional. An optional flag to preserve the path that is + forwarded to the backend service. By default, the HTTP + request path will be trimmed from the left when forwarded to + the component. For example, a component with ``path=/api`` + will have requests to ``/api/list`` trimmed to ``/list``. If + this value is ``true``"" , the path will remain + ``/api/list``. Note: this is not applicable for Functions + Components and is mutually exclusive with ``rewrite``. + "rewrite": "str" # Optional. An + optional field that will rewrite the path of the component to + be what is specified here. By default, the HTTP request path + will be trimmed from the left when forwarded to the + component. For example, a component with ``path=/api`` will + have requests to ``/api/list`` trimmed to ``/list``. If you + specified the rewrite to be ``/v1/``"" , requests to + ``/api/list`` would be rewritten to ``/v1/list``. Note: this + is mutually exclusive with ``preserve_path_prefix``. }, - "build_command": "str", # Optional. An - optional build command to run while building this component from - source. "cors": { "allow_credentials": bool, # Optional. Whether browsers should expose the response to the @@ -108270,980 +110682,1256 @@ def create_rollback( results of a preflight request. This configures the ``Access-Control-Max-Age`` header. }, - "dockerfile_path": "str", # Optional. The - path to the Dockerfile relative to the root of the repo. If set, - it will be used to build this component. Otherwise, App Platform - will attempt to build it using buildpacks. - "environment_slug": "str", # Optional. An - environment slug describing the type of this app. For a full - list, please refer to `the product documentation - `_. - "envs": [ - { - "key": "str", # The variable - name. Required. - "scope": - "RUN_AND_BUILD_TIME", # Optional. Default value is - "RUN_AND_BUILD_TIME". * RUN_TIME: Made available only at - run-time * BUILD_TIME: Made available only at build-time - * RUN_AND_BUILD_TIME: Made available at both build and - run-time. Known values are: "UNSET", "RUN_TIME", - "BUILD_TIME", and "RUN_AND_BUILD_TIME". - "type": "GENERAL", # - Optional. Default value is "GENERAL". * GENERAL: A - plain-text environment variable * SECRET: A secret - encrypted environment variable. Known values are: - "GENERAL" and "SECRET". - "value": "str" # Optional. - The value. If the type is ``SECRET``"" , the value will - be encrypted on first submission. On following - submissions, the encrypted value should be used. + "match": { + "authority": { + "exact": "str" # Required. + }, + "path": { + "prefix": "str" # + Prefix-based match. For example, ``/api`` will match + ``/api``"" , ``/api/``"" , and any nested paths such as + ``/api/v1/endpoint``. Required. } - ], - "git": { - "branch": "str", # Optional. The - name of the branch to use. - "repo_clone_url": "str" # Optional. - The clone URL of the repo. Example: - ``https://github.com/digitalocean/sample-golang.git``. - }, - "github": { - "branch": "str", # Optional. The - name of the branch to use. - "deploy_on_push": bool, # Optional. - Whether to automatically deploy new commits made to the repo. - "repo": "str" # Optional. The name - of the repo in the format owner/repo. Example: - ``digitalocean/sample-golang``. }, - "gitlab": { - "branch": "str", # Optional. The - name of the branch to use. - "deploy_on_push": bool, # Optional. - Whether to automatically deploy new commits made to the repo. - "repo": "str" # Optional. The name - of the repo in the format owner/repo. Example: - ``digitalocean/sample-golang``. + "redirect": { + "authority": "str", # Optional. The + authority/host to redirect to. This can be a hostname or IP + address. Note: use ``port`` to set the port. + "port": 0, # Optional. The port to + redirect to. + "redirect_code": 0, # Optional. The + redirect code to use. Defaults to ``302``. Supported values + are 300, 301, 302, 303, 304, 307, 308. + "scheme": "str", # Optional. The + scheme to redirect to. Supported values are ``http`` or + ``https``. Default: ``https``. + "uri": "str" # Optional. An optional + URI path to redirect to. Note: if this is specified the whole + URI of the original request will be overwritten to this + value, irrespective of the original request URI being + matched. + } + } + ] + }, + "jobs": [ + { + "autoscaling": { + "max_instance_count": 0, # Optional. The + maximum amount of instances for this component. Must be more than + min_instance_count. + "metrics": { + "cpu": { + "percent": 80 # Optional. + Default value is 80. The average target CPU utilization + for the component. + } }, - "health_check": { - "failure_threshold": 0, # Optional. - The number of failed health checks before considered - unhealthy. - "http_path": "str", # Optional. The - route path used for the HTTP health check ping. If not set, - the HTTP health check will be disabled and a TCP health check - used instead. - "initial_delay_seconds": 0, # - Optional. The number of seconds to wait before beginning - health checks. - "period_seconds": 0, # Optional. The - number of seconds to wait between health checks. - "port": 0, # Optional. The port on - which the health check will be performed. If not set, the - health check will be performed on the component's http_port. - "success_threshold": 0, # Optional. - The number of successful health checks before considered - healthy. - "timeout_seconds": 0 # Optional. The - number of seconds after which the check times out. + "min_instance_count": 0 # Optional. The + minimum amount of instances for this component. Must be less than + max_instance_count. + }, + "bitbucket": { + "branch": "str", # Optional. The name of the + branch to use. + "deploy_on_push": bool, # Optional. Whether + to automatically deploy new commits made to the repo. + "repo": "str" # Optional. The name of the + repo in the format owner/repo. Example: + ``digitalocean/sample-golang``. + }, + "build_command": "str", # Optional. An optional + build command to run while building this component from source. + "dockerfile_path": "str", # Optional. The path to + the Dockerfile relative to the root of the repo. If set, it will be + used to build this component. Otherwise, App Platform will attempt to + build it using buildpacks. + "environment_slug": "str", # Optional. An + environment slug describing the type of this app. For a full list, + please refer to `the product documentation + `_. + "envs": [ + { + "key": "str", # The variable name. + Required. + "scope": "RUN_AND_BUILD_TIME", # + Optional. Default value is "RUN_AND_BUILD_TIME". * RUN_TIME: + Made available only at run-time * BUILD_TIME: Made available + only at build-time * RUN_AND_BUILD_TIME: Made available at + both build and run-time. Known values are: "UNSET", + "RUN_TIME", "BUILD_TIME", and "RUN_AND_BUILD_TIME". + "type": "GENERAL", # Optional. + Default value is "GENERAL". * GENERAL: A plain-text + environment variable * SECRET: A secret encrypted environment + variable. Known values are: "GENERAL" and "SECRET". + "value": "str" # Optional. The + value. If the type is ``SECRET``"" , the value will be + encrypted on first submission. On following submissions, the + encrypted value should be used. + } + ], + "git": { + "branch": "str", # Optional. The name of the + branch to use. + "repo_clone_url": "str" # Optional. The + clone URL of the repo. Example: + ``https://github.com/digitalocean/sample-golang.git``. + }, + "github": { + "branch": "str", # Optional. The name of the + branch to use. + "deploy_on_push": bool, # Optional. Whether + to automatically deploy new commits made to the repo. + "repo": "str" # Optional. The name of the + repo in the format owner/repo. Example: + ``digitalocean/sample-golang``. + }, + "gitlab": { + "branch": "str", # Optional. The name of the + branch to use. + "deploy_on_push": bool, # Optional. Whether + to automatically deploy new commits made to the repo. + "repo": "str" # Optional. The name of the + repo in the format owner/repo. Example: + ``digitalocean/sample-golang``. + }, + "image": { + "deploy_on_push": { + "enabled": bool # Optional. Whether + to automatically deploy new images. Can only be used for + images hosted in DOCR and can only be used with an image tag, + not a specific digest. }, - "http_port": 0, # Optional. The internal - port on which this service's run command will listen. Default: - 8080 If there is not an environment variable with the name - ``PORT``"" , one will be automatically added with its value set - to the value of this field. - "image": { - "deploy_on_push": { - "enabled": bool # Optional. - Whether to automatically deploy new images. Can only be - used for images hosted in DOCR and can only be used with - an image tag, not a specific digest. + "digest": "str", # Optional. The image + digest. Cannot be specified if tag is provided. + "registry": "str", # Optional. The registry + name. Must be left empty for the ``DOCR`` registry type. + "registry_credentials": "str", # Optional. + The credentials to be able to pull the image. The value will be + encrypted on first submission. On following submissions, the + encrypted value should be used. * "$username:$access_token" for + registries of type ``DOCKER_HUB``. * "$username:$access_token" + for registries of type ``GHCR``. + "registry_type": "str", # Optional. * + DOCKER_HUB: The DockerHub container registry type. * DOCR: The + DigitalOcean container registry type. * GHCR: The Github + container registry type. Known values are: "DOCKER_HUB", "DOCR", + and "GHCR". + "repository": "str", # Optional. The + repository name. + "tag": "latest" # Optional. Default value is + "latest". The repository tag. Defaults to ``latest`` if not + provided and no digest is provided. Cannot be specified if digest + is provided. + }, + "instance_count": 1, # Optional. Default value is 1. + The amount of instances that this component should be scaled to. + Default: 1. Must not be set if autoscaling is used. + "instance_size_slug": {}, + "kind": "UNSPECIFIED", # Optional. Default value is + "UNSPECIFIED". * UNSPECIFIED: Default job type, will auto-complete to + POST_DEPLOY kind. * PRE_DEPLOY: Indicates a job that runs before an + app deployment. * POST_DEPLOY: Indicates a job that runs after an app + deployment. * FAILED_DEPLOY: Indicates a job that runs after a + component fails to deploy. Known values are: "UNSPECIFIED", + "PRE_DEPLOY", "POST_DEPLOY", and "FAILED_DEPLOY". + "log_destinations": [ + { + "name": "str", # Required. + "datadog": { + "api_key": "str", # Datadog + API key. Required. + "endpoint": "str" # + Optional. Datadog HTTP log intake endpoint. }, - "digest": "str", # Optional. The - image digest. Cannot be specified if tag is provided. - "registry": "str", # Optional. The - registry name. Must be left empty for the ``DOCR`` registry - type. - "registry_credentials": "str", # - Optional. The credentials to be able to pull the image. The - value will be encrypted on first submission. On following - submissions, the encrypted value should be used. * - "$username:$access_token" for registries of type - ``DOCKER_HUB``. * "$username:$access_token" for registries of - type ``GHCR``. - "registry_type": "str", # Optional. - * DOCKER_HUB: The DockerHub container registry type. * DOCR: - The DigitalOcean container registry type. * GHCR: The Github - container registry type. Known values are: "DOCKER_HUB", - "DOCR", and "GHCR". - "repository": "str", # Optional. The - repository name. - "tag": "latest" # Optional. Default - value is "latest". The repository tag. Defaults to ``latest`` - if not provided and no digest is provided. Cannot be - specified if digest is provided. - }, - "instance_count": 1, # Optional. Default - value is 1. The amount of instances that this component should be - scaled to. Default: 1. Must not be set if autoscaling is used. - "instance_size_slug": {}, - "internal_ports": [ - 0 # Optional. The ports on which - this service will listen for internal traffic. - ], - "liveness_health_check": { - "failure_threshold": 0, # Optional. - The number of failed health checks before considered - unhealthy. - "http_path": "str", # Optional. The - route path used for the HTTP health check ping. If not set, - the HTTP health check will be disabled and a TCP health check - used instead. - "initial_delay_seconds": 0, # - Optional. The number of seconds to wait before beginning - health checks. - "period_seconds": 0, # Optional. The - number of seconds to wait between health checks. - "port": 0, # Optional. The port on - which the health check will be performed. - "success_threshold": 0, # Optional. - The number of successful health checks before considered - healthy. - "timeout_seconds": 0 # Optional. The - number of seconds after which the check times out. - }, - "log_destinations": [ - { - "name": "str", # Required. - "datadog": { - "api_key": "str", # - Datadog API key. Required. - "endpoint": "str" # - Optional. Datadog HTTP log intake endpoint. - }, - "logtail": { - "token": "str" # - Optional. Logtail token. - }, - "open_search": { - "basic_auth": { - "password": - "str", # Optional. Password for user defined in - User. Is required when ``endpoint`` is set. - Cannot be set if using a DigitalOcean DBaaS - OpenSearch cluster. - "user": "str" - # Optional. Username to authenticate with. Only - required when ``endpoint`` is set. Defaults to - ``doadmin`` when ``cluster_name`` is set. - }, - "cluster_name": - "str", # Optional. The name of a DigitalOcean DBaaS - OpenSearch cluster to use as a log forwarding - destination. Cannot be specified if ``endpoint`` is - also specified. - "endpoint": "str", # - Optional. OpenSearch API Endpoint. Only HTTPS is - supported. Format: - https://:code:``::code:``. Cannot be - specified if ``cluster_name`` is also specified. - "index_name": "logs" - # Optional. Default value is "logs". The index name - to use for the logs. If not set, the default index - name is "logs". + "logtail": { + "token": "str" # Optional. + Logtail token. + }, + "open_search": { + "basic_auth": { + "password": "str", # + Optional. Password for user defined in User. Is + required when ``endpoint`` is set. Cannot be set if + using a DigitalOcean DBaaS OpenSearch cluster. + "user": "str" # + Optional. Username to authenticate with. Only + required when ``endpoint`` is set. Defaults to + ``doadmin`` when ``cluster_name`` is set. }, - "papertrail": { - "endpoint": "str" # - Papertrail syslog endpoint. Required. - } - } - ], - "name": "str", # Optional. The name. Must be - unique across all components within the same app. - "protocol": "str", # Optional. The protocol - which the service uses to serve traffic on the http_port. * - ``HTTP``"" : The app is serving the HTTP protocol. Default. * - ``HTTP2``"" : The app is serving the HTTP/2 protocol. Currently, - this needs to be implemented in the service by serving HTTP/2 - cleartext (h2c). Known values are: "HTTP" and "HTTP2". - "routes": [ - { - "path": "str", # Optional. - (Deprecated - Use Ingress Rules instead). An HTTP path - prefix. Paths must start with / and must be unique across - all components within an app. - "preserve_path_prefix": bool - # Optional. An optional flag to preserve the path that is - forwarded to the backend service. By default, the HTTP - request path will be trimmed from the left when forwarded - to the component. For example, a component with - ``path=/api`` will have requests to ``/api/list`` trimmed - to ``/list``. If this value is ``true``"" , the path will - remain ``/api/list``. + "cluster_name": "str", # + Optional. The name of a DigitalOcean DBaaS OpenSearch + cluster to use as a log forwarding destination. Cannot be + specified if ``endpoint`` is also specified. + "endpoint": "str", # + Optional. OpenSearch API Endpoint. Only HTTPS is + supported. Format: https://:code:``::code:``. + Cannot be specified if ``cluster_name`` is also + specified. + "index_name": "logs" # + Optional. Default value is "logs". The index name to use + for the logs. If not set, the default index name is + "logs". + }, + "papertrail": { + "endpoint": "str" # + Papertrail syslog endpoint. Required. } - ], - "run_command": "str", # Optional. An - optional run command to override the component's default. - "source_dir": "str", # Optional. An optional - path to the working directory to use for the build. For - Dockerfile builds, this will be used as the build context. Must - be relative to the root of the repo. - "termination": { - "drain_seconds": 0, # Optional. The - number of seconds to wait between selecting a container - instance for termination and issuing the TERM signal. - Selecting a container instance for termination begins an - asynchronous drain of new requests on upstream - load-balancers. (Default 15). - "grace_period_seconds": 0 # - Optional. The number of seconds to wait between sending a - TERM signal to a container and issuing a KILL which causes - immediate shutdown. (Default 120). } + ], + "name": "str", # Optional. The name. Must be unique + across all components within the same app. + "run_command": "str", # Optional. An optional run + command to override the component's default. + "source_dir": "str", # Optional. An optional path to + the working directory to use for the build. For Dockerfile builds, + this will be used as the build context. Must be relative to the root + of the repo. + "termination": { + "grace_period_seconds": 0 # Optional. The + number of seconds to wait between sending a TERM signal to a + container and issuing a KILL which causes immediate shutdown. + (Default 120). } - ], - "static_sites": [ - { - "bitbucket": { - "branch": "str", # Optional. The - name of the branch to use. - "deploy_on_push": bool, # Optional. - Whether to automatically deploy new commits made to the repo. - "repo": "str" # Optional. The name - of the repo in the format owner/repo. Example: - ``digitalocean/sample-golang``. - }, - "build_command": "str", # Optional. An - optional build command to run while building this component from - source. - "catchall_document": "str", # Optional. The - name of the document to use as the fallback for any requests to - documents that are not found when serving this static site. Only - 1 of ``catchall_document`` or ``error_document`` can be set. - "cors": { - "allow_credentials": bool, # - Optional. Whether browsers should expose the response to the - client-side JavaScript code when the request"u2019s - credentials mode is include. This configures the - ``Access-Control-Allow-Credentials`` header. - "allow_headers": [ - "str" # Optional. The set of - allowed HTTP request headers. This configures the - ``Access-Control-Allow-Headers`` header. - ], - "allow_methods": [ - "str" # Optional. The set of - allowed HTTP methods. This configures the - ``Access-Control-Allow-Methods`` header. - ], - "allow_origins": [ - { - "exact": "str", # - Optional. Exact string match. Only 1 of ``exact``"" , - ``prefix``"" , or ``regex`` must be set. - "prefix": "str", # - Optional. Prefix-based match. Only 1 of ``exact``"" , - ``prefix``"" , or ``regex`` must be set. - "regex": "str" # - Optional. RE2 style regex-based match. Only 1 of - ``exact``"" , ``prefix``"" , or ``regex`` must be - set. For more information about RE2 syntax, see: - https://github.com/google/re2/wiki/Syntax. - } - ], - "expose_headers": [ - "str" # Optional. The set of - HTTP response headers that browsers are allowed to - access. This configures the - ``Access-Control-Expose-Headers`` header. - ], - "max_age": "str" # Optional. An - optional duration specifying how long browsers can cache the - results of a preflight request. This configures the - ``Access-Control-Max-Age`` header. - }, - "dockerfile_path": "str", # Optional. The - path to the Dockerfile relative to the root of the repo. If set, - it will be used to build this component. Otherwise, App Platform - will attempt to build it using buildpacks. - "environment_slug": "str", # Optional. An - environment slug describing the type of this app. For a full - list, please refer to `the product documentation - `_. - "envs": [ - { - "key": "str", # The variable - name. Required. - "scope": - "RUN_AND_BUILD_TIME", # Optional. Default value is - "RUN_AND_BUILD_TIME". * RUN_TIME: Made available only at - run-time * BUILD_TIME: Made available only at build-time - * RUN_AND_BUILD_TIME: Made available at both build and - run-time. Known values are: "UNSET", "RUN_TIME", - "BUILD_TIME", and "RUN_AND_BUILD_TIME". - "type": "GENERAL", # - Optional. Default value is "GENERAL". * GENERAL: A - plain-text environment variable * SECRET: A secret - encrypted environment variable. Known values are: - "GENERAL" and "SECRET". - "value": "str" # Optional. - The value. If the type is ``SECRET``"" , the value will - be encrypted on first submission. On following - submissions, the encrypted value should be used. + } + ], + "maintenance": { + "archive": bool, # Optional. Indicates whether the app + should be archived. Setting this to true implies that enabled is set to + true. + "enabled": bool, # Optional. Indicates whether maintenance + mode should be enabled for the app. + "offline_page_url": "str" # Optional. A custom offline page + to display when maintenance mode is enabled or the app is archived. + }, + "region": "str", # Optional. The slug form of the geographical + origin of the app. Default: ``nearest available``. Known values are: "atl", + "nyc", "sfo", "tor", "ams", "fra", "lon", "blr", "sgp", and "syd". + "services": [ + { + "autoscaling": { + "max_instance_count": 0, # Optional. The + maximum amount of instances for this component. Must be more than + min_instance_count. + "metrics": { + "cpu": { + "percent": 80 # Optional. + Default value is 80. The average target CPU utilization + for the component. } - ], - "error_document": "404.html", # Optional. - Default value is "404.html". The name of the error document to - use when serving this static site. Default: 404.html. If no such - file exists within the built assets, App Platform will supply - one. - "git": { - "branch": "str", # Optional. The - name of the branch to use. - "repo_clone_url": "str" # Optional. - The clone URL of the repo. Example: - ``https://github.com/digitalocean/sample-golang.git``. - }, - "github": { - "branch": "str", # Optional. The - name of the branch to use. - "deploy_on_push": bool, # Optional. - Whether to automatically deploy new commits made to the repo. - "repo": "str" # Optional. The name - of the repo in the format owner/repo. Example: - ``digitalocean/sample-golang``. - }, - "gitlab": { - "branch": "str", # Optional. The - name of the branch to use. - "deploy_on_push": bool, # Optional. - Whether to automatically deploy new commits made to the repo. - "repo": "str" # Optional. The name - of the repo in the format owner/repo. Example: - ``digitalocean/sample-golang``. - }, - "image": { - "deploy_on_push": { - "enabled": bool # Optional. - Whether to automatically deploy new images. Can only be - used for images hosted in DOCR and can only be used with - an image tag, not a specific digest. - }, - "digest": "str", # Optional. The - image digest. Cannot be specified if tag is provided. - "registry": "str", # Optional. The - registry name. Must be left empty for the ``DOCR`` registry - type. - "registry_credentials": "str", # - Optional. The credentials to be able to pull the image. The - value will be encrypted on first submission. On following - submissions, the encrypted value should be used. * - "$username:$access_token" for registries of type - ``DOCKER_HUB``. * "$username:$access_token" for registries of - type ``GHCR``. - "registry_type": "str", # Optional. - * DOCKER_HUB: The DockerHub container registry type. * DOCR: - The DigitalOcean container registry type. * GHCR: The Github - container registry type. Known values are: "DOCKER_HUB", - "DOCR", and "GHCR". - "repository": "str", # Optional. The - repository name. - "tag": "latest" # Optional. Default - value is "latest". The repository tag. Defaults to ``latest`` - if not provided and no digest is provided. Cannot be - specified if digest is provided. }, - "index_document": "index.html", # Optional. - Default value is "index.html". The name of the index document to - use when serving this static site. Default: index.html. - "log_destinations": [ - { - "name": "str", # Required. - "datadog": { - "api_key": "str", # - Datadog API key. Required. - "endpoint": "str" # - Optional. Datadog HTTP log intake endpoint. - }, - "logtail": { - "token": "str" # - Optional. Logtail token. - }, - "open_search": { - "basic_auth": { - "password": - "str", # Optional. Password for user defined in - User. Is required when ``endpoint`` is set. - Cannot be set if using a DigitalOcean DBaaS - OpenSearch cluster. - "user": "str" - # Optional. Username to authenticate with. Only - required when ``endpoint`` is set. Defaults to - ``doadmin`` when ``cluster_name`` is set. - }, - "cluster_name": - "str", # Optional. The name of a DigitalOcean DBaaS - OpenSearch cluster to use as a log forwarding - destination. Cannot be specified if ``endpoint`` is - also specified. - "endpoint": "str", # - Optional. OpenSearch API Endpoint. Only HTTPS is - supported. Format: - https://:code:``::code:``. Cannot be - specified if ``cluster_name`` is also specified. - "index_name": "logs" - # Optional. Default value is "logs". The index name - to use for the logs. If not set, the default index - name is "logs". - }, - "papertrail": { - "endpoint": "str" # - Papertrail syslog endpoint. Required. - } - } + "min_instance_count": 0 # Optional. The + minimum amount of instances for this component. Must be less than + max_instance_count. + }, + "bitbucket": { + "branch": "str", # Optional. The name of the + branch to use. + "deploy_on_push": bool, # Optional. Whether + to automatically deploy new commits made to the repo. + "repo": "str" # Optional. The name of the + repo in the format owner/repo. Example: + ``digitalocean/sample-golang``. + }, + "build_command": "str", # Optional. An optional + build command to run while building this component from source. + "cors": { + "allow_credentials": bool, # Optional. + Whether browsers should expose the response to the client-side + JavaScript code when the request"u2019s credentials mode is + include. This configures the ``Access-Control-Allow-Credentials`` + header. + "allow_headers": [ + "str" # Optional. The set of allowed + HTTP request headers. This configures the + ``Access-Control-Allow-Headers`` header. ], - "name": "str", # Optional. The name. Must be - unique across all components within the same app. - "output_dir": "str", # Optional. An optional - path to where the built assets will be located, relative to the - build context. If not set, App Platform will automatically scan - for these directory names: ``_static``"" , ``dist``"" , - ``public``"" , ``build``. - "routes": [ + "allow_methods": [ + "str" # Optional. The set of allowed + HTTP methods. This configures the + ``Access-Control-Allow-Methods`` header. + ], + "allow_origins": [ { - "path": "str", # Optional. - (Deprecated - Use Ingress Rules instead). An HTTP path - prefix. Paths must start with / and must be unique across - all components within an app. - "preserve_path_prefix": bool - # Optional. An optional flag to preserve the path that is - forwarded to the backend service. By default, the HTTP - request path will be trimmed from the left when forwarded - to the component. For example, a component with - ``path=/api`` will have requests to ``/api/list`` trimmed - to ``/list``. If this value is ``true``"" , the path will - remain ``/api/list``. + "exact": "str", # Optional. + Exact string match. Only 1 of ``exact``"" , ``prefix``"" + , or ``regex`` must be set. + "prefix": "str", # Optional. + Prefix-based match. Only 1 of ``exact``"" , ``prefix``"" + , or ``regex`` must be set. + "regex": "str" # Optional. + RE2 style regex-based match. Only 1 of ``exact``"" , + ``prefix``"" , or ``regex`` must be set. For more + information about RE2 syntax, see: + https://github.com/google/re2/wiki/Syntax. } ], - "run_command": "str", # Optional. An - optional run command to override the component's default. - "source_dir": "str" # Optional. An optional - path to the working directory to use for the build. For - Dockerfile builds, this will be used as the build context. Must - be relative to the root of the repo. - } - ], - "vpc": { - "egress_ips": [ + "expose_headers": [ + "str" # Optional. The set of HTTP + response headers that browsers are allowed to access. This + configures the ``Access-Control-Expose-Headers`` header. + ], + "max_age": "str" # Optional. An optional + duration specifying how long browsers can cache the results of a + preflight request. This configures the ``Access-Control-Max-Age`` + header. + }, + "dockerfile_path": "str", # Optional. The path to + the Dockerfile relative to the root of the repo. If set, it will be + used to build this component. Otherwise, App Platform will attempt to + build it using buildpacks. + "environment_slug": "str", # Optional. An + environment slug describing the type of this app. For a full list, + please refer to `the product documentation + `_. + "envs": [ { - "ip": "str" # Optional. The egress - ips associated with the VPC. + "key": "str", # The variable name. + Required. + "scope": "RUN_AND_BUILD_TIME", # + Optional. Default value is "RUN_AND_BUILD_TIME". * RUN_TIME: + Made available only at run-time * BUILD_TIME: Made available + only at build-time * RUN_AND_BUILD_TIME: Made available at + both build and run-time. Known values are: "UNSET", + "RUN_TIME", "BUILD_TIME", and "RUN_AND_BUILD_TIME". + "type": "GENERAL", # Optional. + Default value is "GENERAL". * GENERAL: A plain-text + environment variable * SECRET: A secret encrypted environment + variable. Known values are: "GENERAL" and "SECRET". + "value": "str" # Optional. The + value. If the type is ``SECRET``"" , the value will be + encrypted on first submission. On following submissions, the + encrypted value should be used. } ], - "id": "str" # Optional. The ID of the VPC. - }, - "workers": [ - { - "autoscaling": { - "max_instance_count": 0, # Optional. - The maximum amount of instances for this component. Must be - more than min_instance_count. - "metrics": { - "cpu": { - "percent": 80 # - Optional. Default value is 80. The average target CPU - utilization for the component. - } - }, - "min_instance_count": 0 # Optional. - The minimum amount of instances for this component. Must be - less than max_instance_count. - }, - "bitbucket": { - "branch": "str", # Optional. The - name of the branch to use. - "deploy_on_push": bool, # Optional. - Whether to automatically deploy new commits made to the repo. - "repo": "str" # Optional. The name - of the repo in the format owner/repo. Example: - ``digitalocean/sample-golang``. - }, - "build_command": "str", # Optional. An - optional build command to run while building this component from - source. - "dockerfile_path": "str", # Optional. The - path to the Dockerfile relative to the root of the repo. If set, - it will be used to build this component. Otherwise, App Platform - will attempt to build it using buildpacks. - "environment_slug": "str", # Optional. An - environment slug describing the type of this app. For a full - list, please refer to `the product documentation - `_. - "envs": [ - { - "key": "str", # The variable - name. Required. - "scope": - "RUN_AND_BUILD_TIME", # Optional. Default value is - "RUN_AND_BUILD_TIME". * RUN_TIME: Made available only at - run-time * BUILD_TIME: Made available only at build-time - * RUN_AND_BUILD_TIME: Made available at both build and - run-time. Known values are: "UNSET", "RUN_TIME", - "BUILD_TIME", and "RUN_AND_BUILD_TIME". - "type": "GENERAL", # - Optional. Default value is "GENERAL". * GENERAL: A - plain-text environment variable * SECRET: A secret - encrypted environment variable. Known values are: - "GENERAL" and "SECRET". - "value": "str" # Optional. - The value. If the type is ``SECRET``"" , the value will - be encrypted on first submission. On following - submissions, the encrypted value should be used. - } - ], - "git": { - "branch": "str", # Optional. The - name of the branch to use. - "repo_clone_url": "str" # Optional. - The clone URL of the repo. Example: - ``https://github.com/digitalocean/sample-golang.git``. - }, - "github": { - "branch": "str", # Optional. The - name of the branch to use. - "deploy_on_push": bool, # Optional. - Whether to automatically deploy new commits made to the repo. - "repo": "str" # Optional. The name - of the repo in the format owner/repo. Example: - ``digitalocean/sample-golang``. - }, - "gitlab": { - "branch": "str", # Optional. The - name of the branch to use. - "deploy_on_push": bool, # Optional. - Whether to automatically deploy new commits made to the repo. - "repo": "str" # Optional. The name - of the repo in the format owner/repo. Example: - ``digitalocean/sample-golang``. + "git": { + "branch": "str", # Optional. The name of the + branch to use. + "repo_clone_url": "str" # Optional. The + clone URL of the repo. Example: + ``https://github.com/digitalocean/sample-golang.git``. + }, + "github": { + "branch": "str", # Optional. The name of the + branch to use. + "deploy_on_push": bool, # Optional. Whether + to automatically deploy new commits made to the repo. + "repo": "str" # Optional. The name of the + repo in the format owner/repo. Example: + ``digitalocean/sample-golang``. + }, + "gitlab": { + "branch": "str", # Optional. The name of the + branch to use. + "deploy_on_push": bool, # Optional. Whether + to automatically deploy new commits made to the repo. + "repo": "str" # Optional. The name of the + repo in the format owner/repo. Example: + ``digitalocean/sample-golang``. + }, + "health_check": { + "failure_threshold": 0, # Optional. The + number of failed health checks before considered unhealthy. + "http_path": "str", # Optional. The route + path used for the HTTP health check ping. If not set, the HTTP + health check will be disabled and a TCP health check used + instead. + "initial_delay_seconds": 0, # Optional. The + number of seconds to wait before beginning health checks. + "period_seconds": 0, # Optional. The number + of seconds to wait between health checks. + "port": 0, # Optional. The port on which the + health check will be performed. If not set, the health check will + be performed on the component's http_port. + "success_threshold": 0, # Optional. The + number of successful health checks before considered healthy. + "timeout_seconds": 0 # Optional. The number + of seconds after which the check times out. + }, + "http_port": 0, # Optional. The internal port on + which this service's run command will listen. Default: 8080 If there + is not an environment variable with the name ``PORT``"" , one will be + automatically added with its value set to the value of this field. + "image": { + "deploy_on_push": { + "enabled": bool # Optional. Whether + to automatically deploy new images. Can only be used for + images hosted in DOCR and can only be used with an image tag, + not a specific digest. }, - "image": { - "deploy_on_push": { - "enabled": bool # Optional. - Whether to automatically deploy new images. Can only be - used for images hosted in DOCR and can only be used with - an image tag, not a specific digest. + "digest": "str", # Optional. The image + digest. Cannot be specified if tag is provided. + "registry": "str", # Optional. The registry + name. Must be left empty for the ``DOCR`` registry type. + "registry_credentials": "str", # Optional. + The credentials to be able to pull the image. The value will be + encrypted on first submission. On following submissions, the + encrypted value should be used. * "$username:$access_token" for + registries of type ``DOCKER_HUB``. * "$username:$access_token" + for registries of type ``GHCR``. + "registry_type": "str", # Optional. * + DOCKER_HUB: The DockerHub container registry type. * DOCR: The + DigitalOcean container registry type. * GHCR: The Github + container registry type. Known values are: "DOCKER_HUB", "DOCR", + and "GHCR". + "repository": "str", # Optional. The + repository name. + "tag": "latest" # Optional. Default value is + "latest". The repository tag. Defaults to ``latest`` if not + provided and no digest is provided. Cannot be specified if digest + is provided. + }, + "instance_count": 1, # Optional. Default value is 1. + The amount of instances that this component should be scaled to. + Default: 1. Must not be set if autoscaling is used. + "instance_size_slug": {}, + "internal_ports": [ + 0 # Optional. The ports on which this + service will listen for internal traffic. + ], + "liveness_health_check": { + "failure_threshold": 0, # Optional. The + number of failed health checks before considered unhealthy. + "http_path": "str", # Optional. The route + path used for the HTTP health check ping. If not set, the HTTP + health check will be disabled and a TCP health check used + instead. + "initial_delay_seconds": 0, # Optional. The + number of seconds to wait before beginning health checks. + "period_seconds": 0, # Optional. The number + of seconds to wait between health checks. + "port": 0, # Optional. The port on which the + health check will be performed. + "success_threshold": 0, # Optional. The + number of successful health checks before considered healthy. + "timeout_seconds": 0 # Optional. The number + of seconds after which the check times out. + }, + "log_destinations": [ + { + "name": "str", # Required. + "datadog": { + "api_key": "str", # Datadog + API key. Required. + "endpoint": "str" # + Optional. Datadog HTTP log intake endpoint. }, - "digest": "str", # Optional. The - image digest. Cannot be specified if tag is provided. - "registry": "str", # Optional. The - registry name. Must be left empty for the ``DOCR`` registry - type. - "registry_credentials": "str", # - Optional. The credentials to be able to pull the image. The - value will be encrypted on first submission. On following - submissions, the encrypted value should be used. * - "$username:$access_token" for registries of type - ``DOCKER_HUB``. * "$username:$access_token" for registries of - type ``GHCR``. - "registry_type": "str", # Optional. - * DOCKER_HUB: The DockerHub container registry type. * DOCR: - The DigitalOcean container registry type. * GHCR: The Github - container registry type. Known values are: "DOCKER_HUB", - "DOCR", and "GHCR". - "repository": "str", # Optional. The - repository name. - "tag": "latest" # Optional. Default - value is "latest". The repository tag. Defaults to ``latest`` - if not provided and no digest is provided. Cannot be - specified if digest is provided. - }, - "instance_count": 1, # Optional. Default - value is 1. The amount of instances that this component should be - scaled to. Default: 1. Must not be set if autoscaling is used. - "instance_size_slug": {}, - "liveness_health_check": { - "failure_threshold": 0, # Optional. - The number of failed health checks before considered - unhealthy. - "http_path": "str", # Optional. The - route path used for the HTTP health check ping. If not set, - the HTTP health check will be disabled and a TCP health check - used instead. - "initial_delay_seconds": 0, # - Optional. The number of seconds to wait before beginning - health checks. - "period_seconds": 0, # Optional. The - number of seconds to wait between health checks. - "port": 0, # Optional. The port on - which the health check will be performed. - "success_threshold": 0, # Optional. - The number of successful health checks before considered - healthy. - "timeout_seconds": 0 # Optional. The - number of seconds after which the check times out. - }, - "log_destinations": [ - { - "name": "str", # Required. - "datadog": { - "api_key": "str", # - Datadog API key. Required. - "endpoint": "str" # - Optional. Datadog HTTP log intake endpoint. - }, - "logtail": { - "token": "str" # - Optional. Logtail token. - }, - "open_search": { - "basic_auth": { - "password": - "str", # Optional. Password for user defined in - User. Is required when ``endpoint`` is set. - Cannot be set if using a DigitalOcean DBaaS - OpenSearch cluster. - "user": "str" - # Optional. Username to authenticate with. Only - required when ``endpoint`` is set. Defaults to - ``doadmin`` when ``cluster_name`` is set. - }, - "cluster_name": - "str", # Optional. The name of a DigitalOcean DBaaS - OpenSearch cluster to use as a log forwarding - destination. Cannot be specified if ``endpoint`` is - also specified. - "endpoint": "str", # - Optional. OpenSearch API Endpoint. Only HTTPS is - supported. Format: - https://:code:``::code:``. Cannot be - specified if ``cluster_name`` is also specified. - "index_name": "logs" - # Optional. Default value is "logs". The index name - to use for the logs. If not set, the default index - name is "logs". + "logtail": { + "token": "str" # Optional. + Logtail token. + }, + "open_search": { + "basic_auth": { + "password": "str", # + Optional. Password for user defined in User. Is + required when ``endpoint`` is set. Cannot be set if + using a DigitalOcean DBaaS OpenSearch cluster. + "user": "str" # + Optional. Username to authenticate with. Only + required when ``endpoint`` is set. Defaults to + ``doadmin`` when ``cluster_name`` is set. }, - "papertrail": { - "endpoint": "str" # - Papertrail syslog endpoint. Required. - } + "cluster_name": "str", # + Optional. The name of a DigitalOcean DBaaS OpenSearch + cluster to use as a log forwarding destination. Cannot be + specified if ``endpoint`` is also specified. + "endpoint": "str", # + Optional. OpenSearch API Endpoint. Only HTTPS is + supported. Format: https://:code:``::code:``. + Cannot be specified if ``cluster_name`` is also + specified. + "index_name": "logs" # + Optional. Default value is "logs". The index name to use + for the logs. If not set, the default index name is + "logs". + }, + "papertrail": { + "endpoint": "str" # + Papertrail syslog endpoint. Required. } - ], - "name": "str", # Optional. The name. Must be - unique across all components within the same app. - "run_command": "str", # Optional. An - optional run command to override the component's default. - "source_dir": "str", # Optional. An optional - path to the working directory to use for the build. For - Dockerfile builds, this will be used as the build context. Must - be relative to the root of the repo. - "termination": { - "grace_period_seconds": 0 # - Optional. The number of seconds to wait between sending a - TERM signal to a container and issuing a KILL which causes - immediate shutdown. (Default 120). } + ], + "name": "str", # Optional. The name. Must be unique + across all components within the same app. + "protocol": "str", # Optional. The protocol which + the service uses to serve traffic on the http_port. * ``HTTP``"" : + The app is serving the HTTP protocol. Default. * ``HTTP2``"" : The + app is serving the HTTP/2 protocol. Currently, this needs to be + implemented in the service by serving HTTP/2 cleartext (h2c). Known + values are: "HTTP" and "HTTP2". + "routes": [ + { + "path": "str", # Optional. + (Deprecated - Use Ingress Rules instead). An HTTP path + prefix. Paths must start with / and must be unique across all + components within an app. + "preserve_path_prefix": bool # + Optional. An optional flag to preserve the path that is + forwarded to the backend service. By default, the HTTP + request path will be trimmed from the left when forwarded to + the component. For example, a component with ``path=/api`` + will have requests to ``/api/list`` trimmed to ``/list``. If + this value is ``true``"" , the path will remain + ``/api/list``. + } + ], + "run_command": "str", # Optional. An optional run + command to override the component's default. + "source_dir": "str", # Optional. An optional path to + the working directory to use for the build. For Dockerfile builds, + this will be used as the build context. Must be relative to the root + of the repo. + "termination": { + "drain_seconds": 0, # Optional. The number + of seconds to wait between selecting a container instance for + termination and issuing the TERM signal. Selecting a container + instance for termination begins an asynchronous drain of new + requests on upstream load-balancers. (Default 15). + "grace_period_seconds": 0 # Optional. The + number of seconds to wait between sending a TERM signal to a + container and issuing a KILL which causes immediate shutdown. + (Default 120). } - ] - }, + } + ], "static_sites": [ { - "name": "str", # Optional. The name of this static - site. - "source_commit_hash": "str" # Optional. The commit - hash of the repository that was used to build this static site. + "bitbucket": { + "branch": "str", # Optional. The name of the + branch to use. + "deploy_on_push": bool, # Optional. Whether + to automatically deploy new commits made to the repo. + "repo": "str" # Optional. The name of the + repo in the format owner/repo. Example: + ``digitalocean/sample-golang``. + }, + "build_command": "str", # Optional. An optional + build command to run while building this component from source. + "catchall_document": "str", # Optional. The name of + the document to use as the fallback for any requests to documents + that are not found when serving this static site. Only 1 of + ``catchall_document`` or ``error_document`` can be set. + "cors": { + "allow_credentials": bool, # Optional. + Whether browsers should expose the response to the client-side + JavaScript code when the request"u2019s credentials mode is + include. This configures the ``Access-Control-Allow-Credentials`` + header. + "allow_headers": [ + "str" # Optional. The set of allowed + HTTP request headers. This configures the + ``Access-Control-Allow-Headers`` header. + ], + "allow_methods": [ + "str" # Optional. The set of allowed + HTTP methods. This configures the + ``Access-Control-Allow-Methods`` header. + ], + "allow_origins": [ + { + "exact": "str", # Optional. + Exact string match. Only 1 of ``exact``"" , ``prefix``"" + , or ``regex`` must be set. + "prefix": "str", # Optional. + Prefix-based match. Only 1 of ``exact``"" , ``prefix``"" + , or ``regex`` must be set. + "regex": "str" # Optional. + RE2 style regex-based match. Only 1 of ``exact``"" , + ``prefix``"" , or ``regex`` must be set. For more + information about RE2 syntax, see: + https://github.com/google/re2/wiki/Syntax. + } + ], + "expose_headers": [ + "str" # Optional. The set of HTTP + response headers that browsers are allowed to access. This + configures the ``Access-Control-Expose-Headers`` header. + ], + "max_age": "str" # Optional. An optional + duration specifying how long browsers can cache the results of a + preflight request. This configures the ``Access-Control-Max-Age`` + header. + }, + "dockerfile_path": "str", # Optional. The path to + the Dockerfile relative to the root of the repo. If set, it will be + used to build this component. Otherwise, App Platform will attempt to + build it using buildpacks. + "environment_slug": "str", # Optional. An + environment slug describing the type of this app. For a full list, + please refer to `the product documentation + `_. + "envs": [ + { + "key": "str", # The variable name. + Required. + "scope": "RUN_AND_BUILD_TIME", # + Optional. Default value is "RUN_AND_BUILD_TIME". * RUN_TIME: + Made available only at run-time * BUILD_TIME: Made available + only at build-time * RUN_AND_BUILD_TIME: Made available at + both build and run-time. Known values are: "UNSET", + "RUN_TIME", "BUILD_TIME", and "RUN_AND_BUILD_TIME". + "type": "GENERAL", # Optional. + Default value is "GENERAL". * GENERAL: A plain-text + environment variable * SECRET: A secret encrypted environment + variable. Known values are: "GENERAL" and "SECRET". + "value": "str" # Optional. The + value. If the type is ``SECRET``"" , the value will be + encrypted on first submission. On following submissions, the + encrypted value should be used. + } + ], + "error_document": "404.html", # Optional. Default + value is "404.html". The name of the error document to use when + serving this static site. Default: 404.html. If no such file exists + within the built assets, App Platform will supply one. + "git": { + "branch": "str", # Optional. The name of the + branch to use. + "repo_clone_url": "str" # Optional. The + clone URL of the repo. Example: + ``https://github.com/digitalocean/sample-golang.git``. + }, + "github": { + "branch": "str", # Optional. The name of the + branch to use. + "deploy_on_push": bool, # Optional. Whether + to automatically deploy new commits made to the repo. + "repo": "str" # Optional. The name of the + repo in the format owner/repo. Example: + ``digitalocean/sample-golang``. + }, + "gitlab": { + "branch": "str", # Optional. The name of the + branch to use. + "deploy_on_push": bool, # Optional. Whether + to automatically deploy new commits made to the repo. + "repo": "str" # Optional. The name of the + repo in the format owner/repo. Example: + ``digitalocean/sample-golang``. + }, + "image": { + "deploy_on_push": { + "enabled": bool # Optional. Whether + to automatically deploy new images. Can only be used for + images hosted in DOCR and can only be used with an image tag, + not a specific digest. + }, + "digest": "str", # Optional. The image + digest. Cannot be specified if tag is provided. + "registry": "str", # Optional. The registry + name. Must be left empty for the ``DOCR`` registry type. + "registry_credentials": "str", # Optional. + The credentials to be able to pull the image. The value will be + encrypted on first submission. On following submissions, the + encrypted value should be used. * "$username:$access_token" for + registries of type ``DOCKER_HUB``. * "$username:$access_token" + for registries of type ``GHCR``. + "registry_type": "str", # Optional. * + DOCKER_HUB: The DockerHub container registry type. * DOCR: The + DigitalOcean container registry type. * GHCR: The Github + container registry type. Known values are: "DOCKER_HUB", "DOCR", + and "GHCR". + "repository": "str", # Optional. The + repository name. + "tag": "latest" # Optional. Default value is + "latest". The repository tag. Defaults to ``latest`` if not + provided and no digest is provided. Cannot be specified if digest + is provided. + }, + "index_document": "index.html", # Optional. Default + value is "index.html". The name of the index document to use when + serving this static site. Default: index.html. + "log_destinations": [ + { + "name": "str", # Required. + "datadog": { + "api_key": "str", # Datadog + API key. Required. + "endpoint": "str" # + Optional. Datadog HTTP log intake endpoint. + }, + "logtail": { + "token": "str" # Optional. + Logtail token. + }, + "open_search": { + "basic_auth": { + "password": "str", # + Optional. Password for user defined in User. Is + required when ``endpoint`` is set. Cannot be set if + using a DigitalOcean DBaaS OpenSearch cluster. + "user": "str" # + Optional. Username to authenticate with. Only + required when ``endpoint`` is set. Defaults to + ``doadmin`` when ``cluster_name`` is set. + }, + "cluster_name": "str", # + Optional. The name of a DigitalOcean DBaaS OpenSearch + cluster to use as a log forwarding destination. Cannot be + specified if ``endpoint`` is also specified. + "endpoint": "str", # + Optional. OpenSearch API Endpoint. Only HTTPS is + supported. Format: https://:code:``::code:``. + Cannot be specified if ``cluster_name`` is also + specified. + "index_name": "logs" # + Optional. Default value is "logs". The index name to use + for the logs. If not set, the default index name is + "logs". + }, + "papertrail": { + "endpoint": "str" # + Papertrail syslog endpoint. Required. + } + } + ], + "name": "str", # Optional. The name. Must be unique + across all components within the same app. + "output_dir": "str", # Optional. An optional path to + where the built assets will be located, relative to the build + context. If not set, App Platform will automatically scan for these + directory names: ``_static``"" , ``dist``"" , ``public``"" , + ``build``. + "routes": [ + { + "path": "str", # Optional. + (Deprecated - Use Ingress Rules instead). An HTTP path + prefix. Paths must start with / and must be unique across all + components within an app. + "preserve_path_prefix": bool # + Optional. An optional flag to preserve the path that is + forwarded to the backend service. By default, the HTTP + request path will be trimmed from the left when forwarded to + the component. For example, a component with ``path=/api`` + will have requests to ``/api/list`` trimmed to ``/list``. If + this value is ``true``"" , the path will remain + ``/api/list``. + } + ], + "run_command": "str", # Optional. An optional run + command to override the component's default. + "source_dir": "str" # Optional. An optional path to + the working directory to use for the build. For Dockerfile builds, + this will be used as the build context. Must be relative to the root + of the repo. } ], - "tier_slug": "str", # Optional. The current pricing tier slug of the - deployment. - "updated_at": "2020-02-20 00:00:00", # Optional. When the deployment - was last updated. + "vpc": { + "egress_ips": [ + { + "ip": "str" # Optional. The egress ips + associated with the VPC. + } + ], + "id": "str" # Optional. The ID of the VPC. + }, "workers": [ { - "name": "str", # Optional. The name of this worker. - "source_commit_hash": "str" # Optional. The commit - hash of the repository that was used to build this worker. + "autoscaling": { + "max_instance_count": 0, # Optional. The + maximum amount of instances for this component. Must be more than + min_instance_count. + "metrics": { + "cpu": { + "percent": 80 # Optional. + Default value is 80. The average target CPU utilization + for the component. + } + }, + "min_instance_count": 0 # Optional. The + minimum amount of instances for this component. Must be less than + max_instance_count. + }, + "bitbucket": { + "branch": "str", # Optional. The name of the + branch to use. + "deploy_on_push": bool, # Optional. Whether + to automatically deploy new commits made to the repo. + "repo": "str" # Optional. The name of the + repo in the format owner/repo. Example: + ``digitalocean/sample-golang``. + }, + "build_command": "str", # Optional. An optional + build command to run while building this component from source. + "dockerfile_path": "str", # Optional. The path to + the Dockerfile relative to the root of the repo. If set, it will be + used to build this component. Otherwise, App Platform will attempt to + build it using buildpacks. + "environment_slug": "str", # Optional. An + environment slug describing the type of this app. For a full list, + please refer to `the product documentation + `_. + "envs": [ + { + "key": "str", # The variable name. + Required. + "scope": "RUN_AND_BUILD_TIME", # + Optional. Default value is "RUN_AND_BUILD_TIME". * RUN_TIME: + Made available only at run-time * BUILD_TIME: Made available + only at build-time * RUN_AND_BUILD_TIME: Made available at + both build and run-time. Known values are: "UNSET", + "RUN_TIME", "BUILD_TIME", and "RUN_AND_BUILD_TIME". + "type": "GENERAL", # Optional. + Default value is "GENERAL". * GENERAL: A plain-text + environment variable * SECRET: A secret encrypted environment + variable. Known values are: "GENERAL" and "SECRET". + "value": "str" # Optional. The + value. If the type is ``SECRET``"" , the value will be + encrypted on first submission. On following submissions, the + encrypted value should be used. + } + ], + "git": { + "branch": "str", # Optional. The name of the + branch to use. + "repo_clone_url": "str" # Optional. The + clone URL of the repo. Example: + ``https://github.com/digitalocean/sample-golang.git``. + }, + "github": { + "branch": "str", # Optional. The name of the + branch to use. + "deploy_on_push": bool, # Optional. Whether + to automatically deploy new commits made to the repo. + "repo": "str" # Optional. The name of the + repo in the format owner/repo. Example: + ``digitalocean/sample-golang``. + }, + "gitlab": { + "branch": "str", # Optional. The name of the + branch to use. + "deploy_on_push": bool, # Optional. Whether + to automatically deploy new commits made to the repo. + "repo": "str" # Optional. The name of the + repo in the format owner/repo. Example: + ``digitalocean/sample-golang``. + }, + "image": { + "deploy_on_push": { + "enabled": bool # Optional. Whether + to automatically deploy new images. Can only be used for + images hosted in DOCR and can only be used with an image tag, + not a specific digest. + }, + "digest": "str", # Optional. The image + digest. Cannot be specified if tag is provided. + "registry": "str", # Optional. The registry + name. Must be left empty for the ``DOCR`` registry type. + "registry_credentials": "str", # Optional. + The credentials to be able to pull the image. The value will be + encrypted on first submission. On following submissions, the + encrypted value should be used. * "$username:$access_token" for + registries of type ``DOCKER_HUB``. * "$username:$access_token" + for registries of type ``GHCR``. + "registry_type": "str", # Optional. * + DOCKER_HUB: The DockerHub container registry type. * DOCR: The + DigitalOcean container registry type. * GHCR: The Github + container registry type. Known values are: "DOCKER_HUB", "DOCR", + and "GHCR". + "repository": "str", # Optional. The + repository name. + "tag": "latest" # Optional. Default value is + "latest". The repository tag. Defaults to ``latest`` if not + provided and no digest is provided. Cannot be specified if digest + is provided. + }, + "instance_count": 1, # Optional. Default value is 1. + The amount of instances that this component should be scaled to. + Default: 1. Must not be set if autoscaling is used. + "instance_size_slug": {}, + "liveness_health_check": { + "failure_threshold": 0, # Optional. The + number of failed health checks before considered unhealthy. + "http_path": "str", # Optional. The route + path used for the HTTP health check ping. If not set, the HTTP + health check will be disabled and a TCP health check used + instead. + "initial_delay_seconds": 0, # Optional. The + number of seconds to wait before beginning health checks. + "period_seconds": 0, # Optional. The number + of seconds to wait between health checks. + "port": 0, # Optional. The port on which the + health check will be performed. + "success_threshold": 0, # Optional. The + number of successful health checks before considered healthy. + "timeout_seconds": 0 # Optional. The number + of seconds after which the check times out. + }, + "log_destinations": [ + { + "name": "str", # Required. + "datadog": { + "api_key": "str", # Datadog + API key. Required. + "endpoint": "str" # + Optional. Datadog HTTP log intake endpoint. + }, + "logtail": { + "token": "str" # Optional. + Logtail token. + }, + "open_search": { + "basic_auth": { + "password": "str", # + Optional. Password for user defined in User. Is + required when ``endpoint`` is set. Cannot be set if + using a DigitalOcean DBaaS OpenSearch cluster. + "user": "str" # + Optional. Username to authenticate with. Only + required when ``endpoint`` is set. Defaults to + ``doadmin`` when ``cluster_name`` is set. + }, + "cluster_name": "str", # + Optional. The name of a DigitalOcean DBaaS OpenSearch + cluster to use as a log forwarding destination. Cannot be + specified if ``endpoint`` is also specified. + "endpoint": "str", # + Optional. OpenSearch API Endpoint. Only HTTPS is + supported. Format: https://:code:``::code:``. + Cannot be specified if ``cluster_name`` is also + specified. + "index_name": "logs" # + Optional. Default value is "logs". The index name to use + for the logs. If not set, the default index name is + "logs". + }, + "papertrail": { + "endpoint": "str" # + Papertrail syslog endpoint. Required. + } + } + ], + "name": "str", # Optional. The name. Must be unique + across all components within the same app. + "run_command": "str", # Optional. An optional run + command to override the component's default. + "source_dir": "str", # Optional. An optional path to + the working directory to use for the build. For Dockerfile builds, + this will be used as the build context. Must be relative to the root + of the repo. + "termination": { + "grace_period_seconds": 0 # Optional. The + number of seconds to wait between sending a TERM signal to a + container and issuing a KILL which causes immediate shutdown. + (Default 120). + } } ] - } - } - # response body for status code(s): 404 - response == { - "id": "str", # A short identifier corresponding to the HTTP status code - returned. For example, the ID for a response returning a 404 status code would - be "not_found.". Required. - "message": "str", # A message providing additional information about the - error, including details to help resolve it when possible. Required. - "request_id": "str" # Optional. Optionally, some endpoints may include a - request ID that should be provided when reporting bugs or opening support - tickets to help identify the issue. + }, + "app_id": "str" # Optional. An optional ID of an existing app. If set, the + spec will be treated as a proposed update to the specified app. The existing app + is not modified using this method. } - """ - - @overload - def create_rollback( - self, - app_id: str, - body: IO[bytes], - *, - content_type: str = "application/json", - **kwargs: Any, - ) -> JSON: - # pylint: disable=line-too-long - """Rollback App. - - Rollback an app to a previous deployment. A new deployment will be created to perform the - rollback. - The app will be pinned to the rollback deployment preventing any new deployments from being - created, - either manually or through Auto Deploy on Push webhooks. To resume deployments, the rollback - must be - either committed or reverted. - - It is recommended to use the Validate App Rollback endpoint to double check if the rollback is - valid and if there are any warnings. - - :param app_id: The app ID. Required. - :type app_id: str - :param body: Required. - :type body: IO[bytes] - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str - :return: JSON object - :rtype: JSON - :raises ~azure.core.exceptions.HttpResponseError: - - Example: - .. code-block:: python # response body for status code(s): 200 response == { - "deployment": { - "cause": "str", # Optional. What caused this deployment to be - created. - "cloned_from": "str", # Optional. The ID of a previous deployment - that this deployment was cloned from. - "created_at": "2020-02-20 00:00:00", # Optional. The creation time - of the deployment. - "functions": [ + "app_cost": 0, # Optional. The monthly cost of the proposed app in USD. + "app_is_static": bool, # Optional. Indicates whether the app is a static + app. + "app_name_available": bool, # Optional. Indicates whether the app name is + available. + "app_name_suggestion": "str", # Optional. The suggested name if the proposed + app name is unavailable. + "app_tier_downgrade_cost": 0, # Optional. The monthly cost of the proposed + app in USD using the previous pricing plan tier. For example, if you propose an + app that uses the Professional tier, the ``app_tier_downgrade_cost`` field + displays the monthly cost of the app if it were to use the Basic tier. If the + proposed app already uses the lest expensive tier, the field is empty. + "existing_static_apps": "str", # Optional. The maximum number of free static + apps the account can have. We will charge you for any additional static apps. + "spec": { + "name": "str", # The name of the app. Must be unique across all apps + in the same account. Required. + "databases": [ { - "name": "str", # Optional. The name of this - functions component. - "namespace": "str", # Optional. The namespace where - the functions are deployed. - "source_commit_hash": "str" # Optional. The commit - hash of the repository that was used to build this functions - component. + "name": "str", # The database's name. The name must + be unique across all components within the same app and cannot use + capital letters. Required. + "cluster_name": "str", # Optional. The name of the + underlying DigitalOcean DBaaS cluster. This is required for + production databases. For dev databases, if cluster_name is not set, + a new cluster will be provisioned. + "db_name": "str", # Optional. The name of the MySQL + or PostgreSQL database to configure. + "db_user": "str", # Optional. The name of the MySQL + or PostgreSQL user to configure. + "engine": "UNSET", # Optional. Default value is + "UNSET". * MYSQL: MySQL * PG: PostgreSQL * REDIS: Caching * MONGODB: + MongoDB * KAFKA: Kafka * OPENSEARCH: OpenSearch * VALKEY: ValKey. + Known values are: "UNSET", "MYSQL", "PG", "REDIS", "MONGODB", + "KAFKA", "OPENSEARCH", and "VALKEY". + "production": bool, # Optional. Whether this is a + production or dev database. + "version": "str" # Optional. The version of the + database engine. } ], - "id": "str", # Optional. The ID of the deployment. - "jobs": [ + "disable_edge_cache": False, # Optional. Default value is False. .. + role:: raw-html-m2r(raw) :format: html If set to ``true``"" , the app + will **not** be cached at the edge (CDN). Enable this option if you want to + manage CDN configuration yourself"u2014whether by using an external CDN + provider or by handling static content and caching within your app. This + setting is also recommended for apps that require real-time data or serve + dynamic content, such as those using Server-Sent Events (SSE) over GET, or + hosting an MCP (Model Context Protocol) Server that utilizes SSE."" + :raw-html-m2r:`
` **Note:** This feature is not available for static site + components."" :raw-html-m2r:`
` For more information, see `Disable CDN + Cache + `_. + "disable_email_obfuscation": False, # Optional. Default value is + False. If set to ``true``"" , email addresses in the app will not be + obfuscated. This is useful for apps that require email addresses to be + visible (in the HTML markup). + "domains": [ { - "name": "str", # Optional. The name of this job. - "source_commit_hash": "str" # Optional. The commit - hash of the repository that was used to build this job. + "domain": "str", # The hostname for the domain. + Required. + "minimum_tls_version": "str", # Optional. The + minimum version of TLS a client application can use to access + resources for the domain. Must be one of the following values + wrapped within quotations: ``"1.2"`` or ``"1.3"``. Known values are: + "1.2" and "1.3". + "type": "UNSPECIFIED", # Optional. Default value is + "UNSPECIFIED". * DEFAULT: The default ``.ondigitalocean.app`` domain + assigned to this app * PRIMARY: The primary domain for this app that + is displayed as the default in the control panel, used in bindable + environment variables, and any other places that reference an app's + live URL. Only one domain may be set as primary. * ALIAS: A + non-primary domain. Known values are: "UNSPECIFIED", "DEFAULT", + "PRIMARY", and "ALIAS". + "wildcard": bool, # Optional. Indicates whether the + domain includes all sub-domains, in addition to the given domain. + "zone": "str" # Optional. Optional. If the domain + uses DigitalOcean DNS and you would like App Platform to + automatically manage it for you, set this to the name of the domain + on your account. For example, If the domain you are adding is + ``app.domain.com``"" , the zone could be ``domain.com``. } ], - "phase": "UNKNOWN", # Optional. Default value is "UNKNOWN". Known - values are: "UNKNOWN", "PENDING_BUILD", "BUILDING", "PENDING_DEPLOY", - "DEPLOYING", "ACTIVE", "SUPERSEDED", "ERROR", and "CANCELED". - "phase_last_updated_at": "2020-02-20 00:00:00", # Optional. When the - deployment phase was last updated. - "progress": { - "error_steps": 0, # Optional. Number of unsuccessful steps. - "pending_steps": 0, # Optional. Number of pending steps. - "running_steps": 0, # Optional. Number of currently running - steps. - "steps": [ - { - "component_name": "str", # Optional. The - component name that this step is associated with. - "ended_at": "2020-02-20 00:00:00", # - Optional. The end time of this step. - "message_base": "str", # Optional. The base - of a human-readable description of the step intended to be - combined with the component name for presentation. For example: - ``message_base`` = "Building service" ``component_name`` = "api". - "name": "str", # Optional. The name of this - step. - "reason": { - "code": "str", # Optional. The error - code. - "message": "str" # Optional. The - error message. - }, - "started_at": "2020-02-20 00:00:00", # - Optional. The start time of this step. - "status": "UNKNOWN", # Optional. Default - value is "UNKNOWN". Known values are: "UNKNOWN", "PENDING", - "RUNNING", "ERROR", and "SUCCESS". - "steps": [ - {} # Optional. Child steps of this - step. - ] - } - ], - "success_steps": 0, # Optional. Number of successful steps. - "summary_steps": [ - { - "component_name": "str", # Optional. The - component name that this step is associated with. - "ended_at": "2020-02-20 00:00:00", # - Optional. The end time of this step. - "message_base": "str", # Optional. The base - of a human-readable description of the step intended to be - combined with the component name for presentation. For example: - ``message_base`` = "Building service" ``component_name`` = "api". - "name": "str", # Optional. The name of this - step. - "reason": { - "code": "str", # Optional. The error - code. - "message": "str" # Optional. The - error message. - }, - "started_at": "2020-02-20 00:00:00", # - Optional. The start time of this step. - "status": "UNKNOWN", # Optional. Default - value is "UNKNOWN". Known values are: "UNKNOWN", "PENDING", - "RUNNING", "ERROR", and "SUCCESS". - "steps": [ - {} # Optional. Child steps of this - step. - ] - } - ], - "total_steps": 0 # Optional. Total number of steps. + "egress": { + "type": "AUTOASSIGN" # Optional. Default value is + "AUTOASSIGN". The app egress type. Known values are: "AUTOASSIGN" and + "DEDICATED_IP". }, - "services": [ + "enhanced_threat_control_enabled": False, # Optional. Default value + is False. If set to ``true``"" , suspicious requests will go through + additional security checks to help mitigate layer 7 DDoS attacks. + "functions": [ { - "name": "str", # Optional. The name of this service. - "source_commit_hash": "str" # Optional. The commit - hash of the repository that was used to build this service. - } - ], - "spec": { - "name": "str", # The name of the app. Must be unique across - all apps in the same account. Required. - "databases": [ - { - "name": "str", # The database's name. The - name must be unique across all components within the same app and - cannot use capital letters. Required. - "cluster_name": "str", # Optional. The name - of the underlying DigitalOcean DBaaS cluster. This is required - for production databases. For dev databases, if cluster_name is - not set, a new cluster will be provisioned. - "db_name": "str", # Optional. The name of - the MySQL or PostgreSQL database to configure. - "db_user": "str", # Optional. The name of - the MySQL or PostgreSQL user to configure. - "engine": "UNSET", # Optional. Default value - is "UNSET". * MYSQL: MySQL * PG: PostgreSQL * REDIS: Caching * - MONGODB: MongoDB * KAFKA: Kafka * OPENSEARCH: OpenSearch * - VALKEY: ValKey. Known values are: "UNSET", "MYSQL", "PG", - "REDIS", "MONGODB", "KAFKA", "OPENSEARCH", and "VALKEY". - "production": bool, # Optional. Whether this - is a production or dev database. - "version": "str" # Optional. The version of - the database engine. - } - ], - "disable_edge_cache": False, # Optional. Default value is - False. .. role:: raw-html-m2r(raw) :format: html If set to - ``true``"" , the app will **not** be cached at the edge (CDN). Enable - this option if you want to manage CDN configuration yourself"u2014whether - by using an external CDN provider or by handling static content and - caching within your app. This setting is also recommended for apps that - require real-time data or serve dynamic content, such as those using - Server-Sent Events (SSE) over GET, or hosting an MCP (Model Context - Protocol) Server that utilizes SSE."" :raw-html-m2r:`
` **Note:** This - feature is not available for static site components."" - :raw-html-m2r:`
` For more information, see `Disable CDN Cache - `_. - "disable_email_obfuscation": False, # Optional. Default - value is False. If set to ``true``"" , email addresses in the app will - not be obfuscated. This is useful for apps that require email addresses - to be visible (in the HTML markup). - "domains": [ - { - "domain": "str", # The hostname for the - domain. Required. - "minimum_tls_version": "str", # Optional. - The minimum version of TLS a client application can use to access - resources for the domain. Must be one of the following values - wrapped within quotations: ``"1.2"`` or ``"1.3"``. Known values - are: "1.2" and "1.3". - "type": "UNSPECIFIED", # Optional. Default - value is "UNSPECIFIED". * DEFAULT: The default - ``.ondigitalocean.app`` domain assigned to this app * PRIMARY: - The primary domain for this app that is displayed as the default - in the control panel, used in bindable environment variables, and - any other places that reference an app's live URL. Only one - domain may be set as primary. * ALIAS: A non-primary domain. - Known values are: "UNSPECIFIED", "DEFAULT", "PRIMARY", and - "ALIAS". - "wildcard": bool, # Optional. Indicates - whether the domain includes all sub-domains, in addition to the - given domain. - "zone": "str" # Optional. Optional. If the - domain uses DigitalOcean DNS and you would like App Platform to - automatically manage it for you, set this to the name of the - domain on your account. For example, If the domain you are - adding is ``app.domain.com``"" , the zone could be - ``domain.com``. - } - ], - "egress": { - "type": "AUTOASSIGN" # Optional. Default value is - "AUTOASSIGN". The app egress type. Known values are: "AUTOASSIGN" and - "DEDICATED_IP". - }, - "enhanced_threat_control_enabled": False, # Optional. - Default value is False. If set to ``true``"" , suspicious requests will - go through additional security checks to help mitigate layer 7 DDoS - attacks. - "functions": [ - { - "name": "str", # The name. Must be unique - across all components within the same app. Required. - "alerts": [ + "name": "str", # The name. Must be unique across all + components within the same app. Required. + "alerts": [ + { + "disabled": bool, # Optional. Is the + alert disabled?. + "operator": "UNSPECIFIED_OPERATOR", + # Optional. Default value is "UNSPECIFIED_OPERATOR". Known + values are: "UNSPECIFIED_OPERATOR", "GREATER_THAN", and + "LESS_THAN". + "rule": "UNSPECIFIED_RULE", # + Optional. Default value is "UNSPECIFIED_RULE". Known values + are: "UNSPECIFIED_RULE", "CPU_UTILIZATION", + "MEM_UTILIZATION", "RESTART_COUNT", "DEPLOYMENT_FAILED", + "DEPLOYMENT_LIVE", "DOMAIN_FAILED", "DOMAIN_LIVE", + "AUTOSCALE_FAILED", "AUTOSCALE_SUCCEEDED", + "FUNCTIONS_ACTIVATION_COUNT", + "FUNCTIONS_AVERAGE_DURATION_MS", + "FUNCTIONS_ERROR_RATE_PER_MINUTE", + "FUNCTIONS_AVERAGE_WAIT_TIME_MS", "FUNCTIONS_ERROR_COUNT", + and "FUNCTIONS_GB_RATE_PER_SECOND". + "value": 0.0, # Optional. Threshold + value for alert. + "window": "UNSPECIFIED_WINDOW" # + Optional. Default value is "UNSPECIFIED_WINDOW". Known values + are: "UNSPECIFIED_WINDOW", "FIVE_MINUTES", "TEN_MINUTES", + "THIRTY_MINUTES", and "ONE_HOUR". + } + ], + "bitbucket": { + "branch": "str", # Optional. The name of the + branch to use. + "deploy_on_push": bool, # Optional. Whether + to automatically deploy new commits made to the repo. + "repo": "str" # Optional. The name of the + repo in the format owner/repo. Example: + ``digitalocean/sample-golang``. + }, + "cors": { + "allow_credentials": bool, # Optional. + Whether browsers should expose the response to the client-side + JavaScript code when the request"u2019s credentials mode is + include. This configures the ``Access-Control-Allow-Credentials`` + header. + "allow_headers": [ + "str" # Optional. The set of allowed + HTTP request headers. This configures the + ``Access-Control-Allow-Headers`` header. + ], + "allow_methods": [ + "str" # Optional. The set of allowed + HTTP methods. This configures the + ``Access-Control-Allow-Methods`` header. + ], + "allow_origins": [ { - "disabled": bool, # - Optional. Is the alert disabled?. - "operator": - "UNSPECIFIED_OPERATOR", # Optional. Default value is - "UNSPECIFIED_OPERATOR". Known values are: - "UNSPECIFIED_OPERATOR", "GREATER_THAN", and "LESS_THAN". - "rule": "UNSPECIFIED_RULE", - # Optional. Default value is "UNSPECIFIED_RULE". Known - values are: "UNSPECIFIED_RULE", "CPU_UTILIZATION", - "MEM_UTILIZATION", "RESTART_COUNT", "DEPLOYMENT_FAILED", - "DEPLOYMENT_LIVE", "DOMAIN_FAILED", "DOMAIN_LIVE", - "AUTOSCALE_FAILED", "AUTOSCALE_SUCCEEDED", - "FUNCTIONS_ACTIVATION_COUNT", - "FUNCTIONS_AVERAGE_DURATION_MS", - "FUNCTIONS_ERROR_RATE_PER_MINUTE", - "FUNCTIONS_AVERAGE_WAIT_TIME_MS", - "FUNCTIONS_ERROR_COUNT", and - "FUNCTIONS_GB_RATE_PER_SECOND". - "value": 0.0, # Optional. - Threshold value for alert. - "window": - "UNSPECIFIED_WINDOW" # Optional. Default value is - "UNSPECIFIED_WINDOW". Known values are: - "UNSPECIFIED_WINDOW", "FIVE_MINUTES", "TEN_MINUTES", - "THIRTY_MINUTES", and "ONE_HOUR". + "exact": "str", # Optional. + Exact string match. Only 1 of ``exact``"" , ``prefix``"" + , or ``regex`` must be set. + "prefix": "str", # Optional. + Prefix-based match. Only 1 of ``exact``"" , ``prefix``"" + , or ``regex`` must be set. + "regex": "str" # Optional. + RE2 style regex-based match. Only 1 of ``exact``"" , + ``prefix``"" , or ``regex`` must be set. For more + information about RE2 syntax, see: + https://github.com/google/re2/wiki/Syntax. } ], - "bitbucket": { - "branch": "str", # Optional. The - name of the branch to use. - "deploy_on_push": bool, # Optional. - Whether to automatically deploy new commits made to the repo. - "repo": "str" # Optional. The name - of the repo in the format owner/repo. Example: - ``digitalocean/sample-golang``. + "expose_headers": [ + "str" # Optional. The set of HTTP + response headers that browsers are allowed to access. This + configures the ``Access-Control-Expose-Headers`` header. + ], + "max_age": "str" # Optional. An optional + duration specifying how long browsers can cache the results of a + preflight request. This configures the ``Access-Control-Max-Age`` + header. + }, + "envs": [ + { + "key": "str", # The variable name. + Required. + "scope": "RUN_AND_BUILD_TIME", # + Optional. Default value is "RUN_AND_BUILD_TIME". * RUN_TIME: + Made available only at run-time * BUILD_TIME: Made available + only at build-time * RUN_AND_BUILD_TIME: Made available at + both build and run-time. Known values are: "UNSET", + "RUN_TIME", "BUILD_TIME", and "RUN_AND_BUILD_TIME". + "type": "GENERAL", # Optional. + Default value is "GENERAL". * GENERAL: A plain-text + environment variable * SECRET: A secret encrypted environment + variable. Known values are: "GENERAL" and "SECRET". + "value": "str" # Optional. The + value. If the type is ``SECRET``"" , the value will be + encrypted on first submission. On following submissions, the + encrypted value should be used. + } + ], + "git": { + "branch": "str", # Optional. The name of the + branch to use. + "repo_clone_url": "str" # Optional. The + clone URL of the repo. Example: + ``https://github.com/digitalocean/sample-golang.git``. + }, + "github": { + "branch": "str", # Optional. The name of the + branch to use. + "deploy_on_push": bool, # Optional. Whether + to automatically deploy new commits made to the repo. + "repo": "str" # Optional. The name of the + repo in the format owner/repo. Example: + ``digitalocean/sample-golang``. + }, + "gitlab": { + "branch": "str", # Optional. The name of the + branch to use. + "deploy_on_push": bool, # Optional. Whether + to automatically deploy new commits made to the repo. + "repo": "str" # Optional. The name of the + repo in the format owner/repo. Example: + ``digitalocean/sample-golang``. + }, + "log_destinations": [ + { + "name": "str", # Required. + "datadog": { + "api_key": "str", # Datadog + API key. Required. + "endpoint": "str" # + Optional. Datadog HTTP log intake endpoint. + }, + "logtail": { + "token": "str" # Optional. + Logtail token. + }, + "open_search": { + "basic_auth": { + "password": "str", # + Optional. Password for user defined in User. Is + required when ``endpoint`` is set. Cannot be set if + using a DigitalOcean DBaaS OpenSearch cluster. + "user": "str" # + Optional. Username to authenticate with. Only + required when ``endpoint`` is set. Defaults to + ``doadmin`` when ``cluster_name`` is set. + }, + "cluster_name": "str", # + Optional. The name of a DigitalOcean DBaaS OpenSearch + cluster to use as a log forwarding destination. Cannot be + specified if ``endpoint`` is also specified. + "endpoint": "str", # + Optional. OpenSearch API Endpoint. Only HTTPS is + supported. Format: https://:code:``::code:``. + Cannot be specified if ``cluster_name`` is also + specified. + "index_name": "logs" # + Optional. Default value is "logs". The index name to use + for the logs. If not set, the default index name is + "logs". + }, + "papertrail": { + "endpoint": "str" # + Papertrail syslog endpoint. Required. + } + } + ], + "routes": [ + { + "path": "str", # Optional. + (Deprecated - Use Ingress Rules instead). An HTTP path + prefix. Paths must start with / and must be unique across all + components within an app. + "preserve_path_prefix": bool # + Optional. An optional flag to preserve the path that is + forwarded to the backend service. By default, the HTTP + request path will be trimmed from the left when forwarded to + the component. For example, a component with ``path=/api`` + will have requests to ``/api/list`` trimmed to ``/list``. If + this value is ``true``"" , the path will remain + ``/api/list``. + } + ], + "source_dir": "str" # Optional. An optional path to + the working directory to use for the build. For Dockerfile builds, + this will be used as the build context. Must be relative to the root + of the repo. + } + ], + "ingress": { + "rules": [ + { + "component": { + "name": "str", # The name of the + component to route to. Required. + "preserve_path_prefix": "str", # + Optional. An optional flag to preserve the path that is + forwarded to the backend service. By default, the HTTP + request path will be trimmed from the left when forwarded to + the component. For example, a component with ``path=/api`` + will have requests to ``/api/list`` trimmed to ``/list``. If + this value is ``true``"" , the path will remain + ``/api/list``. Note: this is not applicable for Functions + Components and is mutually exclusive with ``rewrite``. + "rewrite": "str" # Optional. An + optional field that will rewrite the path of the component to + be what is specified here. By default, the HTTP request path + will be trimmed from the left when forwarded to the + component. For example, a component with ``path=/api`` will + have requests to ``/api/list`` trimmed to ``/list``. If you + specified the rewrite to be ``/v1/``"" , requests to + ``/api/list`` would be rewritten to ``/v1/list``. Note: this + is mutually exclusive with ``preserve_path_prefix``. }, "cors": { "allow_credentials": bool, # @@ -109287,1237 +111975,1651 @@ def create_rollback( results of a preflight request. This configures the ``Access-Control-Max-Age`` header. }, - "envs": [ - { - "key": "str", # The variable - name. Required. - "scope": - "RUN_AND_BUILD_TIME", # Optional. Default value is - "RUN_AND_BUILD_TIME". * RUN_TIME: Made available only at - run-time * BUILD_TIME: Made available only at build-time - * RUN_AND_BUILD_TIME: Made available at both build and - run-time. Known values are: "UNSET", "RUN_TIME", - "BUILD_TIME", and "RUN_AND_BUILD_TIME". - "type": "GENERAL", # - Optional. Default value is "GENERAL". * GENERAL: A - plain-text environment variable * SECRET: A secret - encrypted environment variable. Known values are: - "GENERAL" and "SECRET". - "value": "str" # Optional. - The value. If the type is ``SECRET``"" , the value will - be encrypted on first submission. On following - submissions, the encrypted value should be used. + "match": { + "authority": { + "exact": "str" # Required. + }, + "path": { + "prefix": "str" # + Prefix-based match. For example, ``/api`` will match + ``/api``"" , ``/api/``"" , and any nested paths such as + ``/api/v1/endpoint``. Required. } - ], - "git": { - "branch": "str", # Optional. The - name of the branch to use. - "repo_clone_url": "str" # Optional. - The clone URL of the repo. Example: - ``https://github.com/digitalocean/sample-golang.git``. }, - "github": { - "branch": "str", # Optional. The - name of the branch to use. - "deploy_on_push": bool, # Optional. - Whether to automatically deploy new commits made to the repo. - "repo": "str" # Optional. The name - of the repo in the format owner/repo. Example: - ``digitalocean/sample-golang``. + "redirect": { + "authority": "str", # Optional. The + authority/host to redirect to. This can be a hostname or IP + address. Note: use ``port`` to set the port. + "port": 0, # Optional. The port to + redirect to. + "redirect_code": 0, # Optional. The + redirect code to use. Defaults to ``302``. Supported values + are 300, 301, 302, 303, 304, 307, 308. + "scheme": "str", # Optional. The + scheme to redirect to. Supported values are ``http`` or + ``https``. Default: ``https``. + "uri": "str" # Optional. An optional + URI path to redirect to. Note: if this is specified the whole + URI of the original request will be overwritten to this + value, irrespective of the original request URI being + matched. + } + } + ] + }, + "jobs": [ + { + "autoscaling": { + "max_instance_count": 0, # Optional. The + maximum amount of instances for this component. Must be more than + min_instance_count. + "metrics": { + "cpu": { + "percent": 80 # Optional. + Default value is 80. The average target CPU utilization + for the component. + } }, - "gitlab": { - "branch": "str", # Optional. The - name of the branch to use. - "deploy_on_push": bool, # Optional. - Whether to automatically deploy new commits made to the repo. - "repo": "str" # Optional. The name - of the repo in the format owner/repo. Example: - ``digitalocean/sample-golang``. + "min_instance_count": 0 # Optional. The + minimum amount of instances for this component. Must be less than + max_instance_count. + }, + "bitbucket": { + "branch": "str", # Optional. The name of the + branch to use. + "deploy_on_push": bool, # Optional. Whether + to automatically deploy new commits made to the repo. + "repo": "str" # Optional. The name of the + repo in the format owner/repo. Example: + ``digitalocean/sample-golang``. + }, + "build_command": "str", # Optional. An optional + build command to run while building this component from source. + "dockerfile_path": "str", # Optional. The path to + the Dockerfile relative to the root of the repo. If set, it will be + used to build this component. Otherwise, App Platform will attempt to + build it using buildpacks. + "environment_slug": "str", # Optional. An + environment slug describing the type of this app. For a full list, + please refer to `the product documentation + `_. + "envs": [ + { + "key": "str", # The variable name. + Required. + "scope": "RUN_AND_BUILD_TIME", # + Optional. Default value is "RUN_AND_BUILD_TIME". * RUN_TIME: + Made available only at run-time * BUILD_TIME: Made available + only at build-time * RUN_AND_BUILD_TIME: Made available at + both build and run-time. Known values are: "UNSET", + "RUN_TIME", "BUILD_TIME", and "RUN_AND_BUILD_TIME". + "type": "GENERAL", # Optional. + Default value is "GENERAL". * GENERAL: A plain-text + environment variable * SECRET: A secret encrypted environment + variable. Known values are: "GENERAL" and "SECRET". + "value": "str" # Optional. The + value. If the type is ``SECRET``"" , the value will be + encrypted on first submission. On following submissions, the + encrypted value should be used. + } + ], + "git": { + "branch": "str", # Optional. The name of the + branch to use. + "repo_clone_url": "str" # Optional. The + clone URL of the repo. Example: + ``https://github.com/digitalocean/sample-golang.git``. + }, + "github": { + "branch": "str", # Optional. The name of the + branch to use. + "deploy_on_push": bool, # Optional. Whether + to automatically deploy new commits made to the repo. + "repo": "str" # Optional. The name of the + repo in the format owner/repo. Example: + ``digitalocean/sample-golang``. + }, + "gitlab": { + "branch": "str", # Optional. The name of the + branch to use. + "deploy_on_push": bool, # Optional. Whether + to automatically deploy new commits made to the repo. + "repo": "str" # Optional. The name of the + repo in the format owner/repo. Example: + ``digitalocean/sample-golang``. + }, + "image": { + "deploy_on_push": { + "enabled": bool # Optional. Whether + to automatically deploy new images. Can only be used for + images hosted in DOCR and can only be used with an image tag, + not a specific digest. }, - "log_destinations": [ - { - "name": "str", # Required. - "datadog": { - "api_key": "str", # - Datadog API key. Required. - "endpoint": "str" # - Optional. Datadog HTTP log intake endpoint. - }, - "logtail": { - "token": "str" # - Optional. Logtail token. - }, - "open_search": { - "basic_auth": { - "password": - "str", # Optional. Password for user defined in - User. Is required when ``endpoint`` is set. - Cannot be set if using a DigitalOcean DBaaS - OpenSearch cluster. - "user": "str" - # Optional. Username to authenticate with. Only - required when ``endpoint`` is set. Defaults to - ``doadmin`` when ``cluster_name`` is set. - }, - "cluster_name": - "str", # Optional. The name of a DigitalOcean DBaaS - OpenSearch cluster to use as a log forwarding - destination. Cannot be specified if ``endpoint`` is - also specified. - "endpoint": "str", # - Optional. OpenSearch API Endpoint. Only HTTPS is - supported. Format: - https://:code:``::code:``. Cannot be - specified if ``cluster_name`` is also specified. - "index_name": "logs" - # Optional. Default value is "logs". The index name - to use for the logs. If not set, the default index - name is "logs". - }, - "papertrail": { - "endpoint": "str" # - Papertrail syslog endpoint. Required. - } - } - ], - "routes": [ - { - "path": "str", # Optional. - (Deprecated - Use Ingress Rules instead). An HTTP path - prefix. Paths must start with / and must be unique across - all components within an app. - "preserve_path_prefix": bool - # Optional. An optional flag to preserve the path that is - forwarded to the backend service. By default, the HTTP - request path will be trimmed from the left when forwarded - to the component. For example, a component with - ``path=/api`` will have requests to ``/api/list`` trimmed - to ``/list``. If this value is ``true``"" , the path will - remain ``/api/list``. - } - ], - "source_dir": "str" # Optional. An optional - path to the working directory to use for the build. For - Dockerfile builds, this will be used as the build context. Must - be relative to the root of the repo. - } - ], - "ingress": { - "rules": [ + "digest": "str", # Optional. The image + digest. Cannot be specified if tag is provided. + "registry": "str", # Optional. The registry + name. Must be left empty for the ``DOCR`` registry type. + "registry_credentials": "str", # Optional. + The credentials to be able to pull the image. The value will be + encrypted on first submission. On following submissions, the + encrypted value should be used. * "$username:$access_token" for + registries of type ``DOCKER_HUB``. * "$username:$access_token" + for registries of type ``GHCR``. + "registry_type": "str", # Optional. * + DOCKER_HUB: The DockerHub container registry type. * DOCR: The + DigitalOcean container registry type. * GHCR: The Github + container registry type. Known values are: "DOCKER_HUB", "DOCR", + and "GHCR". + "repository": "str", # Optional. The + repository name. + "tag": "latest" # Optional. Default value is + "latest". The repository tag. Defaults to ``latest`` if not + provided and no digest is provided. Cannot be specified if digest + is provided. + }, + "instance_count": 1, # Optional. Default value is 1. + The amount of instances that this component should be scaled to. + Default: 1. Must not be set if autoscaling is used. + "instance_size_slug": {}, + "kind": "UNSPECIFIED", # Optional. Default value is + "UNSPECIFIED". * UNSPECIFIED: Default job type, will auto-complete to + POST_DEPLOY kind. * PRE_DEPLOY: Indicates a job that runs before an + app deployment. * POST_DEPLOY: Indicates a job that runs after an app + deployment. * FAILED_DEPLOY: Indicates a job that runs after a + component fails to deploy. Known values are: "UNSPECIFIED", + "PRE_DEPLOY", "POST_DEPLOY", and "FAILED_DEPLOY". + "log_destinations": [ { - "component": { - "name": "str", # The name of - the component to route to. Required. - "preserve_path_prefix": - "str", # Optional. An optional flag to preserve the path - that is forwarded to the backend service. By default, the - HTTP request path will be trimmed from the left when - forwarded to the component. For example, a component with - ``path=/api`` will have requests to ``/api/list`` trimmed - to ``/list``. If this value is ``true``"" , the path will - remain ``/api/list``. Note: this is not applicable for - Functions Components and is mutually exclusive with - ``rewrite``. - "rewrite": "str" # Optional. - An optional field that will rewrite the path of the - component to be what is specified here. By default, the - HTTP request path will be trimmed from the left when - forwarded to the component. For example, a component with - ``path=/api`` will have requests to ``/api/list`` trimmed - to ``/list``. If you specified the rewrite to be - ``/v1/``"" , requests to ``/api/list`` would be rewritten - to ``/v1/list``. Note: this is mutually exclusive with - ``preserve_path_prefix``. + "name": "str", # Required. + "datadog": { + "api_key": "str", # Datadog + API key. Required. + "endpoint": "str" # + Optional. Datadog HTTP log intake endpoint. }, - "cors": { - "allow_credentials": bool, # - Optional. Whether browsers should expose the response to - the client-side JavaScript code when the request"u2019s - credentials mode is include. This configures the - ``Access-Control-Allow-Credentials`` header. - "allow_headers": [ - "str" # Optional. - The set of allowed HTTP request headers. This - configures the ``Access-Control-Allow-Headers`` - header. - ], - "allow_methods": [ - "str" # Optional. - The set of allowed HTTP methods. This configures the - ``Access-Control-Allow-Methods`` header. - ], - "allow_origins": [ - { - "exact": - "str", # Optional. Exact string match. Only 1 of - ``exact``"" , ``prefix``"" , or ``regex`` must be - set. - "prefix": - "str", # Optional. Prefix-based match. Only 1 of - ``exact``"" , ``prefix``"" , or ``regex`` must be - set. - "regex": - "str" # Optional. RE2 style regex-based match. - Only 1 of ``exact``"" , ``prefix``"" , or - ``regex`` must be set. For more information about - RE2 syntax, see: - https://github.com/google/re2/wiki/Syntax. - } - ], - "expose_headers": [ - "str" # Optional. - The set of HTTP response headers that browsers are - allowed to access. This configures the - ``Access-Control-Expose-Headers`` header. - ], - "max_age": "str" # Optional. - An optional duration specifying how long browsers can - cache the results of a preflight request. This configures - the ``Access-Control-Max-Age`` header. + "logtail": { + "token": "str" # Optional. + Logtail token. }, - "match": { - "authority": { - "exact": "str" # - Required. + "open_search": { + "basic_auth": { + "password": "str", # + Optional. Password for user defined in User. Is + required when ``endpoint`` is set. Cannot be set if + using a DigitalOcean DBaaS OpenSearch cluster. + "user": "str" # + Optional. Username to authenticate with. Only + required when ``endpoint`` is set. Defaults to + ``doadmin`` when ``cluster_name`` is set. }, - "path": { - "prefix": "str" # - Prefix-based match. For example, ``/api`` will match - ``/api``"" , ``/api/``"" , and any nested paths such - as ``/api/v1/endpoint``. Required. - } + "cluster_name": "str", # + Optional. The name of a DigitalOcean DBaaS OpenSearch + cluster to use as a log forwarding destination. Cannot be + specified if ``endpoint`` is also specified. + "endpoint": "str", # + Optional. OpenSearch API Endpoint. Only HTTPS is + supported. Format: https://:code:``::code:``. + Cannot be specified if ``cluster_name`` is also + specified. + "index_name": "logs" # + Optional. Default value is "logs". The index name to use + for the logs. If not set, the default index name is + "logs". }, - "redirect": { - "authority": "str", # - Optional. The authority/host to redirect to. This can be - a hostname or IP address. Note: use ``port`` to set the - port. - "port": 0, # Optional. The - port to redirect to. - "redirect_code": 0, # - Optional. The redirect code to use. Defaults to ``302``. - Supported values are 300, 301, 302, 303, 304, 307, 308. - "scheme": "str", # Optional. - The scheme to redirect to. Supported values are ``http`` - or ``https``. Default: ``https``. - "uri": "str" # Optional. An - optional URI path to redirect to. Note: if this is - specified the whole URI of the original request will be - overwritten to this value, irrespective of the original - request URI being matched. + "papertrail": { + "endpoint": "str" # + Papertrail syslog endpoint. Required. } } - ] - }, - "jobs": [ - { - "autoscaling": { - "max_instance_count": 0, # Optional. - The maximum amount of instances for this component. Must be - more than min_instance_count. - "metrics": { - "cpu": { - "percent": 80 # - Optional. Default value is 80. The average target CPU - utilization for the component. - } - }, - "min_instance_count": 0 # Optional. - The minimum amount of instances for this component. Must be - less than max_instance_count. - }, - "bitbucket": { - "branch": "str", # Optional. The - name of the branch to use. - "deploy_on_push": bool, # Optional. - Whether to automatically deploy new commits made to the repo. - "repo": "str" # Optional. The name - of the repo in the format owner/repo. Example: - ``digitalocean/sample-golang``. + ], + "name": "str", # Optional. The name. Must be unique + across all components within the same app. + "run_command": "str", # Optional. An optional run + command to override the component's default. + "source_dir": "str", # Optional. An optional path to + the working directory to use for the build. For Dockerfile builds, + this will be used as the build context. Must be relative to the root + of the repo. + "termination": { + "grace_period_seconds": 0 # Optional. The + number of seconds to wait between sending a TERM signal to a + container and issuing a KILL which causes immediate shutdown. + (Default 120). + } + } + ], + "maintenance": { + "archive": bool, # Optional. Indicates whether the app + should be archived. Setting this to true implies that enabled is set to + true. + "enabled": bool, # Optional. Indicates whether maintenance + mode should be enabled for the app. + "offline_page_url": "str" # Optional. A custom offline page + to display when maintenance mode is enabled or the app is archived. + }, + "region": "str", # Optional. The slug form of the geographical + origin of the app. Default: ``nearest available``. Known values are: "atl", + "nyc", "sfo", "tor", "ams", "fra", "lon", "blr", "sgp", and "syd". + "services": [ + { + "autoscaling": { + "max_instance_count": 0, # Optional. The + maximum amount of instances for this component. Must be more than + min_instance_count. + "metrics": { + "cpu": { + "percent": 80 # Optional. + Default value is 80. The average target CPU utilization + for the component. + } }, - "build_command": "str", # Optional. An - optional build command to run while building this component from - source. - "dockerfile_path": "str", # Optional. The - path to the Dockerfile relative to the root of the repo. If set, - it will be used to build this component. Otherwise, App Platform - will attempt to build it using buildpacks. - "environment_slug": "str", # Optional. An - environment slug describing the type of this app. For a full - list, please refer to `the product documentation - `_. - "envs": [ + "min_instance_count": 0 # Optional. The + minimum amount of instances for this component. Must be less than + max_instance_count. + }, + "bitbucket": { + "branch": "str", # Optional. The name of the + branch to use. + "deploy_on_push": bool, # Optional. Whether + to automatically deploy new commits made to the repo. + "repo": "str" # Optional. The name of the + repo in the format owner/repo. Example: + ``digitalocean/sample-golang``. + }, + "build_command": "str", # Optional. An optional + build command to run while building this component from source. + "cors": { + "allow_credentials": bool, # Optional. + Whether browsers should expose the response to the client-side + JavaScript code when the request"u2019s credentials mode is + include. This configures the ``Access-Control-Allow-Credentials`` + header. + "allow_headers": [ + "str" # Optional. The set of allowed + HTTP request headers. This configures the + ``Access-Control-Allow-Headers`` header. + ], + "allow_methods": [ + "str" # Optional. The set of allowed + HTTP methods. This configures the + ``Access-Control-Allow-Methods`` header. + ], + "allow_origins": [ { - "key": "str", # The variable - name. Required. - "scope": - "RUN_AND_BUILD_TIME", # Optional. Default value is - "RUN_AND_BUILD_TIME". * RUN_TIME: Made available only at - run-time * BUILD_TIME: Made available only at build-time - * RUN_AND_BUILD_TIME: Made available at both build and - run-time. Known values are: "UNSET", "RUN_TIME", - "BUILD_TIME", and "RUN_AND_BUILD_TIME". - "type": "GENERAL", # - Optional. Default value is "GENERAL". * GENERAL: A - plain-text environment variable * SECRET: A secret - encrypted environment variable. Known values are: - "GENERAL" and "SECRET". - "value": "str" # Optional. - The value. If the type is ``SECRET``"" , the value will - be encrypted on first submission. On following - submissions, the encrypted value should be used. + "exact": "str", # Optional. + Exact string match. Only 1 of ``exact``"" , ``prefix``"" + , or ``regex`` must be set. + "prefix": "str", # Optional. + Prefix-based match. Only 1 of ``exact``"" , ``prefix``"" + , or ``regex`` must be set. + "regex": "str" # Optional. + RE2 style regex-based match. Only 1 of ``exact``"" , + ``prefix``"" , or ``regex`` must be set. For more + information about RE2 syntax, see: + https://github.com/google/re2/wiki/Syntax. } ], - "git": { - "branch": "str", # Optional. The - name of the branch to use. - "repo_clone_url": "str" # Optional. - The clone URL of the repo. Example: - ``https://github.com/digitalocean/sample-golang.git``. - }, - "github": { - "branch": "str", # Optional. The - name of the branch to use. - "deploy_on_push": bool, # Optional. - Whether to automatically deploy new commits made to the repo. - "repo": "str" # Optional. The name - of the repo in the format owner/repo. Example: - ``digitalocean/sample-golang``. - }, - "gitlab": { - "branch": "str", # Optional. The - name of the branch to use. - "deploy_on_push": bool, # Optional. - Whether to automatically deploy new commits made to the repo. - "repo": "str" # Optional. The name - of the repo in the format owner/repo. Example: - ``digitalocean/sample-golang``. + "expose_headers": [ + "str" # Optional. The set of HTTP + response headers that browsers are allowed to access. This + configures the ``Access-Control-Expose-Headers`` header. + ], + "max_age": "str" # Optional. An optional + duration specifying how long browsers can cache the results of a + preflight request. This configures the ``Access-Control-Max-Age`` + header. + }, + "dockerfile_path": "str", # Optional. The path to + the Dockerfile relative to the root of the repo. If set, it will be + used to build this component. Otherwise, App Platform will attempt to + build it using buildpacks. + "environment_slug": "str", # Optional. An + environment slug describing the type of this app. For a full list, + please refer to `the product documentation + `_. + "envs": [ + { + "key": "str", # The variable name. + Required. + "scope": "RUN_AND_BUILD_TIME", # + Optional. Default value is "RUN_AND_BUILD_TIME". * RUN_TIME: + Made available only at run-time * BUILD_TIME: Made available + only at build-time * RUN_AND_BUILD_TIME: Made available at + both build and run-time. Known values are: "UNSET", + "RUN_TIME", "BUILD_TIME", and "RUN_AND_BUILD_TIME". + "type": "GENERAL", # Optional. + Default value is "GENERAL". * GENERAL: A plain-text + environment variable * SECRET: A secret encrypted environment + variable. Known values are: "GENERAL" and "SECRET". + "value": "str" # Optional. The + value. If the type is ``SECRET``"" , the value will be + encrypted on first submission. On following submissions, the + encrypted value should be used. + } + ], + "git": { + "branch": "str", # Optional. The name of the + branch to use. + "repo_clone_url": "str" # Optional. The + clone URL of the repo. Example: + ``https://github.com/digitalocean/sample-golang.git``. + }, + "github": { + "branch": "str", # Optional. The name of the + branch to use. + "deploy_on_push": bool, # Optional. Whether + to automatically deploy new commits made to the repo. + "repo": "str" # Optional. The name of the + repo in the format owner/repo. Example: + ``digitalocean/sample-golang``. + }, + "gitlab": { + "branch": "str", # Optional. The name of the + branch to use. + "deploy_on_push": bool, # Optional. Whether + to automatically deploy new commits made to the repo. + "repo": "str" # Optional. The name of the + repo in the format owner/repo. Example: + ``digitalocean/sample-golang``. + }, + "health_check": { + "failure_threshold": 0, # Optional. The + number of failed health checks before considered unhealthy. + "http_path": "str", # Optional. The route + path used for the HTTP health check ping. If not set, the HTTP + health check will be disabled and a TCP health check used + instead. + "initial_delay_seconds": 0, # Optional. The + number of seconds to wait before beginning health checks. + "period_seconds": 0, # Optional. The number + of seconds to wait between health checks. + "port": 0, # Optional. The port on which the + health check will be performed. If not set, the health check will + be performed on the component's http_port. + "success_threshold": 0, # Optional. The + number of successful health checks before considered healthy. + "timeout_seconds": 0 # Optional. The number + of seconds after which the check times out. + }, + "http_port": 0, # Optional. The internal port on + which this service's run command will listen. Default: 8080 If there + is not an environment variable with the name ``PORT``"" , one will be + automatically added with its value set to the value of this field. + "image": { + "deploy_on_push": { + "enabled": bool # Optional. Whether + to automatically deploy new images. Can only be used for + images hosted in DOCR and can only be used with an image tag, + not a specific digest. }, - "image": { - "deploy_on_push": { - "enabled": bool # Optional. - Whether to automatically deploy new images. Can only be - used for images hosted in DOCR and can only be used with - an image tag, not a specific digest. + "digest": "str", # Optional. The image + digest. Cannot be specified if tag is provided. + "registry": "str", # Optional. The registry + name. Must be left empty for the ``DOCR`` registry type. + "registry_credentials": "str", # Optional. + The credentials to be able to pull the image. The value will be + encrypted on first submission. On following submissions, the + encrypted value should be used. * "$username:$access_token" for + registries of type ``DOCKER_HUB``. * "$username:$access_token" + for registries of type ``GHCR``. + "registry_type": "str", # Optional. * + DOCKER_HUB: The DockerHub container registry type. * DOCR: The + DigitalOcean container registry type. * GHCR: The Github + container registry type. Known values are: "DOCKER_HUB", "DOCR", + and "GHCR". + "repository": "str", # Optional. The + repository name. + "tag": "latest" # Optional. Default value is + "latest". The repository tag. Defaults to ``latest`` if not + provided and no digest is provided. Cannot be specified if digest + is provided. + }, + "instance_count": 1, # Optional. Default value is 1. + The amount of instances that this component should be scaled to. + Default: 1. Must not be set if autoscaling is used. + "instance_size_slug": {}, + "internal_ports": [ + 0 # Optional. The ports on which this + service will listen for internal traffic. + ], + "liveness_health_check": { + "failure_threshold": 0, # Optional. The + number of failed health checks before considered unhealthy. + "http_path": "str", # Optional. The route + path used for the HTTP health check ping. If not set, the HTTP + health check will be disabled and a TCP health check used + instead. + "initial_delay_seconds": 0, # Optional. The + number of seconds to wait before beginning health checks. + "period_seconds": 0, # Optional. The number + of seconds to wait between health checks. + "port": 0, # Optional. The port on which the + health check will be performed. + "success_threshold": 0, # Optional. The + number of successful health checks before considered healthy. + "timeout_seconds": 0 # Optional. The number + of seconds after which the check times out. + }, + "log_destinations": [ + { + "name": "str", # Required. + "datadog": { + "api_key": "str", # Datadog + API key. Required. + "endpoint": "str" # + Optional. Datadog HTTP log intake endpoint. }, - "digest": "str", # Optional. The - image digest. Cannot be specified if tag is provided. - "registry": "str", # Optional. The - registry name. Must be left empty for the ``DOCR`` registry - type. - "registry_credentials": "str", # - Optional. The credentials to be able to pull the image. The - value will be encrypted on first submission. On following - submissions, the encrypted value should be used. * - "$username:$access_token" for registries of type - ``DOCKER_HUB``. * "$username:$access_token" for registries of - type ``GHCR``. - "registry_type": "str", # Optional. - * DOCKER_HUB: The DockerHub container registry type. * DOCR: - The DigitalOcean container registry type. * GHCR: The Github - container registry type. Known values are: "DOCKER_HUB", - "DOCR", and "GHCR". - "repository": "str", # Optional. The - repository name. - "tag": "latest" # Optional. Default - value is "latest". The repository tag. Defaults to ``latest`` - if not provided and no digest is provided. Cannot be - specified if digest is provided. - }, - "instance_count": 1, # Optional. Default - value is 1. The amount of instances that this component should be - scaled to. Default: 1. Must not be set if autoscaling is used. - "instance_size_slug": {}, - "kind": "UNSPECIFIED", # Optional. Default - value is "UNSPECIFIED". * UNSPECIFIED: Default job type, will - auto-complete to POST_DEPLOY kind. * PRE_DEPLOY: Indicates a job - that runs before an app deployment. * POST_DEPLOY: Indicates a - job that runs after an app deployment. * FAILED_DEPLOY: Indicates - a job that runs after a component fails to deploy. Known values - are: "UNSPECIFIED", "PRE_DEPLOY", "POST_DEPLOY", and - "FAILED_DEPLOY". - "log_destinations": [ - { - "name": "str", # Required. - "datadog": { - "api_key": "str", # - Datadog API key. Required. - "endpoint": "str" # - Optional. Datadog HTTP log intake endpoint. - }, - "logtail": { - "token": "str" # - Optional. Logtail token. - }, - "open_search": { - "basic_auth": { - "password": - "str", # Optional. Password for user defined in - User. Is required when ``endpoint`` is set. - Cannot be set if using a DigitalOcean DBaaS - OpenSearch cluster. - "user": "str" - # Optional. Username to authenticate with. Only - required when ``endpoint`` is set. Defaults to - ``doadmin`` when ``cluster_name`` is set. - }, - "cluster_name": - "str", # Optional. The name of a DigitalOcean DBaaS - OpenSearch cluster to use as a log forwarding - destination. Cannot be specified if ``endpoint`` is - also specified. - "endpoint": "str", # - Optional. OpenSearch API Endpoint. Only HTTPS is - supported. Format: - https://:code:``::code:``. Cannot be - specified if ``cluster_name`` is also specified. - "index_name": "logs" - # Optional. Default value is "logs". The index name - to use for the logs. If not set, the default index - name is "logs". + "logtail": { + "token": "str" # Optional. + Logtail token. + }, + "open_search": { + "basic_auth": { + "password": "str", # + Optional. Password for user defined in User. Is + required when ``endpoint`` is set. Cannot be set if + using a DigitalOcean DBaaS OpenSearch cluster. + "user": "str" # + Optional. Username to authenticate with. Only + required when ``endpoint`` is set. Defaults to + ``doadmin`` when ``cluster_name`` is set. }, - "papertrail": { - "endpoint": "str" # - Papertrail syslog endpoint. Required. - } + "cluster_name": "str", # + Optional. The name of a DigitalOcean DBaaS OpenSearch + cluster to use as a log forwarding destination. Cannot be + specified if ``endpoint`` is also specified. + "endpoint": "str", # + Optional. OpenSearch API Endpoint. Only HTTPS is + supported. Format: https://:code:``::code:``. + Cannot be specified if ``cluster_name`` is also + specified. + "index_name": "logs" # + Optional. Default value is "logs". The index name to use + for the logs. If not set, the default index name is + "logs". + }, + "papertrail": { + "endpoint": "str" # + Papertrail syslog endpoint. Required. } - ], - "name": "str", # Optional. The name. Must be - unique across all components within the same app. - "run_command": "str", # Optional. An - optional run command to override the component's default. - "source_dir": "str", # Optional. An optional - path to the working directory to use for the build. For - Dockerfile builds, this will be used as the build context. Must - be relative to the root of the repo. - "termination": { - "grace_period_seconds": 0 # - Optional. The number of seconds to wait between sending a - TERM signal to a container and issuing a KILL which causes - immediate shutdown. (Default 120). } + ], + "name": "str", # Optional. The name. Must be unique + across all components within the same app. + "protocol": "str", # Optional. The protocol which + the service uses to serve traffic on the http_port. * ``HTTP``"" : + The app is serving the HTTP protocol. Default. * ``HTTP2``"" : The + app is serving the HTTP/2 protocol. Currently, this needs to be + implemented in the service by serving HTTP/2 cleartext (h2c). Known + values are: "HTTP" and "HTTP2". + "routes": [ + { + "path": "str", # Optional. + (Deprecated - Use Ingress Rules instead). An HTTP path + prefix. Paths must start with / and must be unique across all + components within an app. + "preserve_path_prefix": bool # + Optional. An optional flag to preserve the path that is + forwarded to the backend service. By default, the HTTP + request path will be trimmed from the left when forwarded to + the component. For example, a component with ``path=/api`` + will have requests to ``/api/list`` trimmed to ``/list``. If + this value is ``true``"" , the path will remain + ``/api/list``. + } + ], + "run_command": "str", # Optional. An optional run + command to override the component's default. + "source_dir": "str", # Optional. An optional path to + the working directory to use for the build. For Dockerfile builds, + this will be used as the build context. Must be relative to the root + of the repo. + "termination": { + "drain_seconds": 0, # Optional. The number + of seconds to wait between selecting a container instance for + termination and issuing the TERM signal. Selecting a container + instance for termination begins an asynchronous drain of new + requests on upstream load-balancers. (Default 15). + "grace_period_seconds": 0 # Optional. The + number of seconds to wait between sending a TERM signal to a + container and issuing a KILL which causes immediate shutdown. + (Default 120). } - ], - "maintenance": { - "archive": bool, # Optional. Indicates whether the - app should be archived. Setting this to true implies that enabled is - set to true. - "enabled": bool, # Optional. Indicates whether - maintenance mode should be enabled for the app. - "offline_page_url": "str" # Optional. A custom - offline page to display when maintenance mode is enabled or the app - is archived. - }, - "region": "str", # Optional. The slug form of the - geographical origin of the app. Default: ``nearest available``. Known - values are: "atl", "nyc", "sfo", "tor", "ams", "fra", "lon", "blr", - "sgp", and "syd". - "services": [ - { - "autoscaling": { - "max_instance_count": 0, # Optional. - The maximum amount of instances for this component. Must be - more than min_instance_count. - "metrics": { - "cpu": { - "percent": 80 # - Optional. Default value is 80. The average target CPU - utilization for the component. - } - }, - "min_instance_count": 0 # Optional. - The minimum amount of instances for this component. Must be - less than max_instance_count. - }, - "bitbucket": { - "branch": "str", # Optional. The - name of the branch to use. - "deploy_on_push": bool, # Optional. - Whether to automatically deploy new commits made to the repo. - "repo": "str" # Optional. The name - of the repo in the format owner/repo. Example: - ``digitalocean/sample-golang``. - }, - "build_command": "str", # Optional. An - optional build command to run while building this component from - source. - "cors": { - "allow_credentials": bool, # - Optional. Whether browsers should expose the response to the - client-side JavaScript code when the request"u2019s - credentials mode is include. This configures the - ``Access-Control-Allow-Credentials`` header. - "allow_headers": [ - "str" # Optional. The set of - allowed HTTP request headers. This configures the - ``Access-Control-Allow-Headers`` header. - ], - "allow_methods": [ - "str" # Optional. The set of - allowed HTTP methods. This configures the - ``Access-Control-Allow-Methods`` header. - ], - "allow_origins": [ - { - "exact": "str", # - Optional. Exact string match. Only 1 of ``exact``"" , - ``prefix``"" , or ``regex`` must be set. - "prefix": "str", # - Optional. Prefix-based match. Only 1 of ``exact``"" , - ``prefix``"" , or ``regex`` must be set. - "regex": "str" # - Optional. RE2 style regex-based match. Only 1 of - ``exact``"" , ``prefix``"" , or ``regex`` must be - set. For more information about RE2 syntax, see: - https://github.com/google/re2/wiki/Syntax. - } - ], - "expose_headers": [ - "str" # Optional. The set of - HTTP response headers that browsers are allowed to - access. This configures the - ``Access-Control-Expose-Headers`` header. - ], - "max_age": "str" # Optional. An - optional duration specifying how long browsers can cache the - results of a preflight request. This configures the - ``Access-Control-Max-Age`` header. - }, - "dockerfile_path": "str", # Optional. The - path to the Dockerfile relative to the root of the repo. If set, - it will be used to build this component. Otherwise, App Platform - will attempt to build it using buildpacks. - "environment_slug": "str", # Optional. An - environment slug describing the type of this app. For a full - list, please refer to `the product documentation - `_. - "envs": [ + } + ], + "static_sites": [ + { + "bitbucket": { + "branch": "str", # Optional. The name of the + branch to use. + "deploy_on_push": bool, # Optional. Whether + to automatically deploy new commits made to the repo. + "repo": "str" # Optional. The name of the + repo in the format owner/repo. Example: + ``digitalocean/sample-golang``. + }, + "build_command": "str", # Optional. An optional + build command to run while building this component from source. + "catchall_document": "str", # Optional. The name of + the document to use as the fallback for any requests to documents + that are not found when serving this static site. Only 1 of + ``catchall_document`` or ``error_document`` can be set. + "cors": { + "allow_credentials": bool, # Optional. + Whether browsers should expose the response to the client-side + JavaScript code when the request"u2019s credentials mode is + include. This configures the ``Access-Control-Allow-Credentials`` + header. + "allow_headers": [ + "str" # Optional. The set of allowed + HTTP request headers. This configures the + ``Access-Control-Allow-Headers`` header. + ], + "allow_methods": [ + "str" # Optional. The set of allowed + HTTP methods. This configures the + ``Access-Control-Allow-Methods`` header. + ], + "allow_origins": [ { - "key": "str", # The variable - name. Required. - "scope": - "RUN_AND_BUILD_TIME", # Optional. Default value is - "RUN_AND_BUILD_TIME". * RUN_TIME: Made available only at - run-time * BUILD_TIME: Made available only at build-time - * RUN_AND_BUILD_TIME: Made available at both build and - run-time. Known values are: "UNSET", "RUN_TIME", - "BUILD_TIME", and "RUN_AND_BUILD_TIME". - "type": "GENERAL", # - Optional. Default value is "GENERAL". * GENERAL: A - plain-text environment variable * SECRET: A secret - encrypted environment variable. Known values are: - "GENERAL" and "SECRET". - "value": "str" # Optional. - The value. If the type is ``SECRET``"" , the value will - be encrypted on first submission. On following - submissions, the encrypted value should be used. + "exact": "str", # Optional. + Exact string match. Only 1 of ``exact``"" , ``prefix``"" + , or ``regex`` must be set. + "prefix": "str", # Optional. + Prefix-based match. Only 1 of ``exact``"" , ``prefix``"" + , or ``regex`` must be set. + "regex": "str" # Optional. + RE2 style regex-based match. Only 1 of ``exact``"" , + ``prefix``"" , or ``regex`` must be set. For more + information about RE2 syntax, see: + https://github.com/google/re2/wiki/Syntax. } ], - "git": { - "branch": "str", # Optional. The - name of the branch to use. - "repo_clone_url": "str" # Optional. - The clone URL of the repo. Example: - ``https://github.com/digitalocean/sample-golang.git``. - }, - "github": { - "branch": "str", # Optional. The - name of the branch to use. - "deploy_on_push": bool, # Optional. - Whether to automatically deploy new commits made to the repo. - "repo": "str" # Optional. The name - of the repo in the format owner/repo. Example: - ``digitalocean/sample-golang``. - }, - "gitlab": { - "branch": "str", # Optional. The - name of the branch to use. - "deploy_on_push": bool, # Optional. - Whether to automatically deploy new commits made to the repo. - "repo": "str" # Optional. The name - of the repo in the format owner/repo. Example: - ``digitalocean/sample-golang``. - }, - "health_check": { - "failure_threshold": 0, # Optional. - The number of failed health checks before considered - unhealthy. - "http_path": "str", # Optional. The - route path used for the HTTP health check ping. If not set, - the HTTP health check will be disabled and a TCP health check - used instead. - "initial_delay_seconds": 0, # - Optional. The number of seconds to wait before beginning - health checks. - "period_seconds": 0, # Optional. The - number of seconds to wait between health checks. - "port": 0, # Optional. The port on - which the health check will be performed. If not set, the - health check will be performed on the component's http_port. - "success_threshold": 0, # Optional. - The number of successful health checks before considered - healthy. - "timeout_seconds": 0 # Optional. The - number of seconds after which the check times out. - }, - "http_port": 0, # Optional. The internal - port on which this service's run command will listen. Default: - 8080 If there is not an environment variable with the name - ``PORT``"" , one will be automatically added with its value set - to the value of this field. - "image": { - "deploy_on_push": { - "enabled": bool # Optional. - Whether to automatically deploy new images. Can only be - used for images hosted in DOCR and can only be used with - an image tag, not a specific digest. - }, - "digest": "str", # Optional. The - image digest. Cannot be specified if tag is provided. - "registry": "str", # Optional. The - registry name. Must be left empty for the ``DOCR`` registry - type. - "registry_credentials": "str", # - Optional. The credentials to be able to pull the image. The - value will be encrypted on first submission. On following - submissions, the encrypted value should be used. * - "$username:$access_token" for registries of type - ``DOCKER_HUB``. * "$username:$access_token" for registries of - type ``GHCR``. - "registry_type": "str", # Optional. - * DOCKER_HUB: The DockerHub container registry type. * DOCR: - The DigitalOcean container registry type. * GHCR: The Github - container registry type. Known values are: "DOCKER_HUB", - "DOCR", and "GHCR". - "repository": "str", # Optional. The - repository name. - "tag": "latest" # Optional. Default - value is "latest". The repository tag. Defaults to ``latest`` - if not provided and no digest is provided. Cannot be - specified if digest is provided. - }, - "instance_count": 1, # Optional. Default - value is 1. The amount of instances that this component should be - scaled to. Default: 1. Must not be set if autoscaling is used. - "instance_size_slug": {}, - "internal_ports": [ - 0 # Optional. The ports on which - this service will listen for internal traffic. - ], - "liveness_health_check": { - "failure_threshold": 0, # Optional. - The number of failed health checks before considered - unhealthy. - "http_path": "str", # Optional. The - route path used for the HTTP health check ping. If not set, - the HTTP health check will be disabled and a TCP health check - used instead. - "initial_delay_seconds": 0, # - Optional. The number of seconds to wait before beginning - health checks. - "period_seconds": 0, # Optional. The - number of seconds to wait between health checks. - "port": 0, # Optional. The port on - which the health check will be performed. - "success_threshold": 0, # Optional. - The number of successful health checks before considered - healthy. - "timeout_seconds": 0 # Optional. The - number of seconds after which the check times out. - }, - "log_destinations": [ - { - "name": "str", # Required. - "datadog": { - "api_key": "str", # - Datadog API key. Required. - "endpoint": "str" # - Optional. Datadog HTTP log intake endpoint. - }, - "logtail": { - "token": "str" # - Optional. Logtail token. - }, - "open_search": { - "basic_auth": { - "password": - "str", # Optional. Password for user defined in - User. Is required when ``endpoint`` is set. - Cannot be set if using a DigitalOcean DBaaS - OpenSearch cluster. - "user": "str" - # Optional. Username to authenticate with. Only - required when ``endpoint`` is set. Defaults to - ``doadmin`` when ``cluster_name`` is set. - }, - "cluster_name": - "str", # Optional. The name of a DigitalOcean DBaaS - OpenSearch cluster to use as a log forwarding - destination. Cannot be specified if ``endpoint`` is - also specified. - "endpoint": "str", # - Optional. OpenSearch API Endpoint. Only HTTPS is - supported. Format: - https://:code:``::code:``. Cannot be - specified if ``cluster_name`` is also specified. - "index_name": "logs" - # Optional. Default value is "logs". The index name - to use for the logs. If not set, the default index - name is "logs". - }, - "papertrail": { - "endpoint": "str" # - Papertrail syslog endpoint. Required. - } - } - ], - "name": "str", # Optional. The name. Must be - unique across all components within the same app. - "protocol": "str", # Optional. The protocol - which the service uses to serve traffic on the http_port. * - ``HTTP``"" : The app is serving the HTTP protocol. Default. * - ``HTTP2``"" : The app is serving the HTTP/2 protocol. Currently, - this needs to be implemented in the service by serving HTTP/2 - cleartext (h2c). Known values are: "HTTP" and "HTTP2". - "routes": [ - { - "path": "str", # Optional. - (Deprecated - Use Ingress Rules instead). An HTTP path - prefix. Paths must start with / and must be unique across - all components within an app. - "preserve_path_prefix": bool - # Optional. An optional flag to preserve the path that is - forwarded to the backend service. By default, the HTTP - request path will be trimmed from the left when forwarded - to the component. For example, a component with - ``path=/api`` will have requests to ``/api/list`` trimmed - to ``/list``. If this value is ``true``"" , the path will - remain ``/api/list``. - } + "expose_headers": [ + "str" # Optional. The set of HTTP + response headers that browsers are allowed to access. This + configures the ``Access-Control-Expose-Headers`` header. ], - "run_command": "str", # Optional. An - optional run command to override the component's default. - "source_dir": "str", # Optional. An optional - path to the working directory to use for the build. For - Dockerfile builds, this will be used as the build context. Must - be relative to the root of the repo. - "termination": { - "drain_seconds": 0, # Optional. The - number of seconds to wait between selecting a container - instance for termination and issuing the TERM signal. - Selecting a container instance for termination begins an - asynchronous drain of new requests on upstream - load-balancers. (Default 15). - "grace_period_seconds": 0 # - Optional. The number of seconds to wait between sending a - TERM signal to a container and issuing a KILL which causes - immediate shutdown. (Default 120). + "max_age": "str" # Optional. An optional + duration specifying how long browsers can cache the results of a + preflight request. This configures the ``Access-Control-Max-Age`` + header. + }, + "dockerfile_path": "str", # Optional. The path to + the Dockerfile relative to the root of the repo. If set, it will be + used to build this component. Otherwise, App Platform will attempt to + build it using buildpacks. + "environment_slug": "str", # Optional. An + environment slug describing the type of this app. For a full list, + please refer to `the product documentation + `_. + "envs": [ + { + "key": "str", # The variable name. + Required. + "scope": "RUN_AND_BUILD_TIME", # + Optional. Default value is "RUN_AND_BUILD_TIME". * RUN_TIME: + Made available only at run-time * BUILD_TIME: Made available + only at build-time * RUN_AND_BUILD_TIME: Made available at + both build and run-time. Known values are: "UNSET", + "RUN_TIME", "BUILD_TIME", and "RUN_AND_BUILD_TIME". + "type": "GENERAL", # Optional. + Default value is "GENERAL". * GENERAL: A plain-text + environment variable * SECRET: A secret encrypted environment + variable. Known values are: "GENERAL" and "SECRET". + "value": "str" # Optional. The + value. If the type is ``SECRET``"" , the value will be + encrypted on first submission. On following submissions, the + encrypted value should be used. } - } - ], - "static_sites": [ - { - "bitbucket": { - "branch": "str", # Optional. The - name of the branch to use. - "deploy_on_push": bool, # Optional. - Whether to automatically deploy new commits made to the repo. - "repo": "str" # Optional. The name - of the repo in the format owner/repo. Example: - ``digitalocean/sample-golang``. - }, - "build_command": "str", # Optional. An - optional build command to run while building this component from - source. - "catchall_document": "str", # Optional. The - name of the document to use as the fallback for any requests to - documents that are not found when serving this static site. Only - 1 of ``catchall_document`` or ``error_document`` can be set. - "cors": { - "allow_credentials": bool, # - Optional. Whether browsers should expose the response to the - client-side JavaScript code when the request"u2019s - credentials mode is include. This configures the - ``Access-Control-Allow-Credentials`` header. - "allow_headers": [ - "str" # Optional. The set of - allowed HTTP request headers. This configures the - ``Access-Control-Allow-Headers`` header. - ], - "allow_methods": [ - "str" # Optional. The set of - allowed HTTP methods. This configures the - ``Access-Control-Allow-Methods`` header. - ], - "allow_origins": [ - { - "exact": "str", # - Optional. Exact string match. Only 1 of ``exact``"" , - ``prefix``"" , or ``regex`` must be set. - "prefix": "str", # - Optional. Prefix-based match. Only 1 of ``exact``"" , - ``prefix``"" , or ``regex`` must be set. - "regex": "str" # - Optional. RE2 style regex-based match. Only 1 of - ``exact``"" , ``prefix``"" , or ``regex`` must be - set. For more information about RE2 syntax, see: - https://github.com/google/re2/wiki/Syntax. - } - ], - "expose_headers": [ - "str" # Optional. The set of - HTTP response headers that browsers are allowed to - access. This configures the - ``Access-Control-Expose-Headers`` header. - ], - "max_age": "str" # Optional. An - optional duration specifying how long browsers can cache the - results of a preflight request. This configures the - ``Access-Control-Max-Age`` header. - }, - "dockerfile_path": "str", # Optional. The - path to the Dockerfile relative to the root of the repo. If set, - it will be used to build this component. Otherwise, App Platform - will attempt to build it using buildpacks. - "environment_slug": "str", # Optional. An - environment slug describing the type of this app. For a full - list, please refer to `the product documentation - `_. - "envs": [ - { - "key": "str", # The variable - name. Required. - "scope": - "RUN_AND_BUILD_TIME", # Optional. Default value is - "RUN_AND_BUILD_TIME". * RUN_TIME: Made available only at - run-time * BUILD_TIME: Made available only at build-time - * RUN_AND_BUILD_TIME: Made available at both build and - run-time. Known values are: "UNSET", "RUN_TIME", - "BUILD_TIME", and "RUN_AND_BUILD_TIME". - "type": "GENERAL", # - Optional. Default value is "GENERAL". * GENERAL: A - plain-text environment variable * SECRET: A secret - encrypted environment variable. Known values are: - "GENERAL" and "SECRET". - "value": "str" # Optional. - The value. If the type is ``SECRET``"" , the value will - be encrypted on first submission. On following - submissions, the encrypted value should be used. - } - ], - "error_document": "404.html", # Optional. - Default value is "404.html". The name of the error document to - use when serving this static site. Default: 404.html. If no such - file exists within the built assets, App Platform will supply - one. - "git": { - "branch": "str", # Optional. The - name of the branch to use. - "repo_clone_url": "str" # Optional. - The clone URL of the repo. Example: - ``https://github.com/digitalocean/sample-golang.git``. - }, - "github": { - "branch": "str", # Optional. The - name of the branch to use. - "deploy_on_push": bool, # Optional. - Whether to automatically deploy new commits made to the repo. - "repo": "str" # Optional. The name - of the repo in the format owner/repo. Example: - ``digitalocean/sample-golang``. - }, - "gitlab": { - "branch": "str", # Optional. The - name of the branch to use. - "deploy_on_push": bool, # Optional. - Whether to automatically deploy new commits made to the repo. - "repo": "str" # Optional. The name - of the repo in the format owner/repo. Example: - ``digitalocean/sample-golang``. + ], + "error_document": "404.html", # Optional. Default + value is "404.html". The name of the error document to use when + serving this static site. Default: 404.html. If no such file exists + within the built assets, App Platform will supply one. + "git": { + "branch": "str", # Optional. The name of the + branch to use. + "repo_clone_url": "str" # Optional. The + clone URL of the repo. Example: + ``https://github.com/digitalocean/sample-golang.git``. + }, + "github": { + "branch": "str", # Optional. The name of the + branch to use. + "deploy_on_push": bool, # Optional. Whether + to automatically deploy new commits made to the repo. + "repo": "str" # Optional. The name of the + repo in the format owner/repo. Example: + ``digitalocean/sample-golang``. + }, + "gitlab": { + "branch": "str", # Optional. The name of the + branch to use. + "deploy_on_push": bool, # Optional. Whether + to automatically deploy new commits made to the repo. + "repo": "str" # Optional. The name of the + repo in the format owner/repo. Example: + ``digitalocean/sample-golang``. + }, + "image": { + "deploy_on_push": { + "enabled": bool # Optional. Whether + to automatically deploy new images. Can only be used for + images hosted in DOCR and can only be used with an image tag, + not a specific digest. }, - "image": { - "deploy_on_push": { - "enabled": bool # Optional. - Whether to automatically deploy new images. Can only be - used for images hosted in DOCR and can only be used with - an image tag, not a specific digest. + "digest": "str", # Optional. The image + digest. Cannot be specified if tag is provided. + "registry": "str", # Optional. The registry + name. Must be left empty for the ``DOCR`` registry type. + "registry_credentials": "str", # Optional. + The credentials to be able to pull the image. The value will be + encrypted on first submission. On following submissions, the + encrypted value should be used. * "$username:$access_token" for + registries of type ``DOCKER_HUB``. * "$username:$access_token" + for registries of type ``GHCR``. + "registry_type": "str", # Optional. * + DOCKER_HUB: The DockerHub container registry type. * DOCR: The + DigitalOcean container registry type. * GHCR: The Github + container registry type. Known values are: "DOCKER_HUB", "DOCR", + and "GHCR". + "repository": "str", # Optional. The + repository name. + "tag": "latest" # Optional. Default value is + "latest". The repository tag. Defaults to ``latest`` if not + provided and no digest is provided. Cannot be specified if digest + is provided. + }, + "index_document": "index.html", # Optional. Default + value is "index.html". The name of the index document to use when + serving this static site. Default: index.html. + "log_destinations": [ + { + "name": "str", # Required. + "datadog": { + "api_key": "str", # Datadog + API key. Required. + "endpoint": "str" # + Optional. Datadog HTTP log intake endpoint. }, - "digest": "str", # Optional. The - image digest. Cannot be specified if tag is provided. - "registry": "str", # Optional. The - registry name. Must be left empty for the ``DOCR`` registry - type. - "registry_credentials": "str", # - Optional. The credentials to be able to pull the image. The - value will be encrypted on first submission. On following - submissions, the encrypted value should be used. * - "$username:$access_token" for registries of type - ``DOCKER_HUB``. * "$username:$access_token" for registries of - type ``GHCR``. - "registry_type": "str", # Optional. - * DOCKER_HUB: The DockerHub container registry type. * DOCR: - The DigitalOcean container registry type. * GHCR: The Github - container registry type. Known values are: "DOCKER_HUB", - "DOCR", and "GHCR". - "repository": "str", # Optional. The - repository name. - "tag": "latest" # Optional. Default - value is "latest". The repository tag. Defaults to ``latest`` - if not provided and no digest is provided. Cannot be - specified if digest is provided. - }, - "index_document": "index.html", # Optional. - Default value is "index.html". The name of the index document to - use when serving this static site. Default: index.html. - "log_destinations": [ - { - "name": "str", # Required. - "datadog": { - "api_key": "str", # - Datadog API key. Required. - "endpoint": "str" # - Optional. Datadog HTTP log intake endpoint. - }, - "logtail": { - "token": "str" # - Optional. Logtail token. - }, - "open_search": { - "basic_auth": { - "password": - "str", # Optional. Password for user defined in - User. Is required when ``endpoint`` is set. - Cannot be set if using a DigitalOcean DBaaS - OpenSearch cluster. - "user": "str" - # Optional. Username to authenticate with. Only - required when ``endpoint`` is set. Defaults to - ``doadmin`` when ``cluster_name`` is set. - }, - "cluster_name": - "str", # Optional. The name of a DigitalOcean DBaaS - OpenSearch cluster to use as a log forwarding - destination. Cannot be specified if ``endpoint`` is - also specified. - "endpoint": "str", # - Optional. OpenSearch API Endpoint. Only HTTPS is - supported. Format: - https://:code:``::code:``. Cannot be - specified if ``cluster_name`` is also specified. - "index_name": "logs" - # Optional. Default value is "logs". The index name - to use for the logs. If not set, the default index - name is "logs". + "logtail": { + "token": "str" # Optional. + Logtail token. + }, + "open_search": { + "basic_auth": { + "password": "str", # + Optional. Password for user defined in User. Is + required when ``endpoint`` is set. Cannot be set if + using a DigitalOcean DBaaS OpenSearch cluster. + "user": "str" # + Optional. Username to authenticate with. Only + required when ``endpoint`` is set. Defaults to + ``doadmin`` when ``cluster_name`` is set. }, - "papertrail": { - "endpoint": "str" # - Papertrail syslog endpoint. Required. - } - } - ], - "name": "str", # Optional. The name. Must be - unique across all components within the same app. - "output_dir": "str", # Optional. An optional - path to where the built assets will be located, relative to the - build context. If not set, App Platform will automatically scan - for these directory names: ``_static``"" , ``dist``"" , - ``public``"" , ``build``. - "routes": [ - { - "path": "str", # Optional. - (Deprecated - Use Ingress Rules instead). An HTTP path - prefix. Paths must start with / and must be unique across - all components within an app. - "preserve_path_prefix": bool - # Optional. An optional flag to preserve the path that is - forwarded to the backend service. By default, the HTTP - request path will be trimmed from the left when forwarded - to the component. For example, a component with - ``path=/api`` will have requests to ``/api/list`` trimmed - to ``/list``. If this value is ``true``"" , the path will - remain ``/api/list``. + "cluster_name": "str", # + Optional. The name of a DigitalOcean DBaaS OpenSearch + cluster to use as a log forwarding destination. Cannot be + specified if ``endpoint`` is also specified. + "endpoint": "str", # + Optional. OpenSearch API Endpoint. Only HTTPS is + supported. Format: https://:code:``::code:``. + Cannot be specified if ``cluster_name`` is also + specified. + "index_name": "logs" # + Optional. Default value is "logs". The index name to use + for the logs. If not set, the default index name is + "logs". + }, + "papertrail": { + "endpoint": "str" # + Papertrail syslog endpoint. Required. } - ], - "run_command": "str", # Optional. An - optional run command to override the component's default. - "source_dir": "str" # Optional. An optional - path to the working directory to use for the build. For - Dockerfile builds, this will be used as the build context. Must - be relative to the root of the repo. - } - ], - "vpc": { - "egress_ips": [ + } + ], + "name": "str", # Optional. The name. Must be unique + across all components within the same app. + "output_dir": "str", # Optional. An optional path to + where the built assets will be located, relative to the build + context. If not set, App Platform will automatically scan for these + directory names: ``_static``"" , ``dist``"" , ``public``"" , + ``build``. + "routes": [ { - "ip": "str" # Optional. The egress - ips associated with the VPC. + "path": "str", # Optional. + (Deprecated - Use Ingress Rules instead). An HTTP path + prefix. Paths must start with / and must be unique across all + components within an app. + "preserve_path_prefix": bool # + Optional. An optional flag to preserve the path that is + forwarded to the backend service. By default, the HTTP + request path will be trimmed from the left when forwarded to + the component. For example, a component with ``path=/api`` + will have requests to ``/api/list`` trimmed to ``/list``. If + this value is ``true``"" , the path will remain + ``/api/list``. } ], - "id": "str" # Optional. The ID of the VPC. - }, - "workers": [ + "run_command": "str", # Optional. An optional run + command to override the component's default. + "source_dir": "str" # Optional. An optional path to + the working directory to use for the build. For Dockerfile builds, + this will be used as the build context. Must be relative to the root + of the repo. + } + ], + "vpc": { + "egress_ips": [ { - "autoscaling": { - "max_instance_count": 0, # Optional. - The maximum amount of instances for this component. Must be - more than min_instance_count. - "metrics": { - "cpu": { - "percent": 80 # - Optional. Default value is 80. The average target CPU - utilization for the component. - } - }, - "min_instance_count": 0 # Optional. - The minimum amount of instances for this component. Must be - less than max_instance_count. - }, - "bitbucket": { - "branch": "str", # Optional. The - name of the branch to use. - "deploy_on_push": bool, # Optional. - Whether to automatically deploy new commits made to the repo. - "repo": "str" # Optional. The name - of the repo in the format owner/repo. Example: - ``digitalocean/sample-golang``. - }, - "build_command": "str", # Optional. An - optional build command to run while building this component from - source. - "dockerfile_path": "str", # Optional. The - path to the Dockerfile relative to the root of the repo. If set, - it will be used to build this component. Otherwise, App Platform - will attempt to build it using buildpacks. - "environment_slug": "str", # Optional. An - environment slug describing the type of this app. For a full - list, please refer to `the product documentation - `_. - "envs": [ - { - "key": "str", # The variable - name. Required. - "scope": - "RUN_AND_BUILD_TIME", # Optional. Default value is - "RUN_AND_BUILD_TIME". * RUN_TIME: Made available only at - run-time * BUILD_TIME: Made available only at build-time - * RUN_AND_BUILD_TIME: Made available at both build and - run-time. Known values are: "UNSET", "RUN_TIME", - "BUILD_TIME", and "RUN_AND_BUILD_TIME". - "type": "GENERAL", # - Optional. Default value is "GENERAL". * GENERAL: A - plain-text environment variable * SECRET: A secret - encrypted environment variable. Known values are: - "GENERAL" and "SECRET". - "value": "str" # Optional. - The value. If the type is ``SECRET``"" , the value will - be encrypted on first submission. On following - submissions, the encrypted value should be used. + "ip": "str" # Optional. The egress ips + associated with the VPC. + } + ], + "id": "str" # Optional. The ID of the VPC. + }, + "workers": [ + { + "autoscaling": { + "max_instance_count": 0, # Optional. The + maximum amount of instances for this component. Must be more than + min_instance_count. + "metrics": { + "cpu": { + "percent": 80 # Optional. + Default value is 80. The average target CPU utilization + for the component. } - ], - "git": { - "branch": "str", # Optional. The - name of the branch to use. - "repo_clone_url": "str" # Optional. - The clone URL of the repo. Example: - ``https://github.com/digitalocean/sample-golang.git``. - }, - "github": { - "branch": "str", # Optional. The - name of the branch to use. - "deploy_on_push": bool, # Optional. - Whether to automatically deploy new commits made to the repo. - "repo": "str" # Optional. The name - of the repo in the format owner/repo. Example: - ``digitalocean/sample-golang``. }, - "gitlab": { - "branch": "str", # Optional. The - name of the branch to use. - "deploy_on_push": bool, # Optional. - Whether to automatically deploy new commits made to the repo. - "repo": "str" # Optional. The name - of the repo in the format owner/repo. Example: - ``digitalocean/sample-golang``. + "min_instance_count": 0 # Optional. The + minimum amount of instances for this component. Must be less than + max_instance_count. + }, + "bitbucket": { + "branch": "str", # Optional. The name of the + branch to use. + "deploy_on_push": bool, # Optional. Whether + to automatically deploy new commits made to the repo. + "repo": "str" # Optional. The name of the + repo in the format owner/repo. Example: + ``digitalocean/sample-golang``. + }, + "build_command": "str", # Optional. An optional + build command to run while building this component from source. + "dockerfile_path": "str", # Optional. The path to + the Dockerfile relative to the root of the repo. If set, it will be + used to build this component. Otherwise, App Platform will attempt to + build it using buildpacks. + "environment_slug": "str", # Optional. An + environment slug describing the type of this app. For a full list, + please refer to `the product documentation + `_. + "envs": [ + { + "key": "str", # The variable name. + Required. + "scope": "RUN_AND_BUILD_TIME", # + Optional. Default value is "RUN_AND_BUILD_TIME". * RUN_TIME: + Made available only at run-time * BUILD_TIME: Made available + only at build-time * RUN_AND_BUILD_TIME: Made available at + both build and run-time. Known values are: "UNSET", + "RUN_TIME", "BUILD_TIME", and "RUN_AND_BUILD_TIME". + "type": "GENERAL", # Optional. + Default value is "GENERAL". * GENERAL: A plain-text + environment variable * SECRET: A secret encrypted environment + variable. Known values are: "GENERAL" and "SECRET". + "value": "str" # Optional. The + value. If the type is ``SECRET``"" , the value will be + encrypted on first submission. On following submissions, the + encrypted value should be used. + } + ], + "git": { + "branch": "str", # Optional. The name of the + branch to use. + "repo_clone_url": "str" # Optional. The + clone URL of the repo. Example: + ``https://github.com/digitalocean/sample-golang.git``. + }, + "github": { + "branch": "str", # Optional. The name of the + branch to use. + "deploy_on_push": bool, # Optional. Whether + to automatically deploy new commits made to the repo. + "repo": "str" # Optional. The name of the + repo in the format owner/repo. Example: + ``digitalocean/sample-golang``. + }, + "gitlab": { + "branch": "str", # Optional. The name of the + branch to use. + "deploy_on_push": bool, # Optional. Whether + to automatically deploy new commits made to the repo. + "repo": "str" # Optional. The name of the + repo in the format owner/repo. Example: + ``digitalocean/sample-golang``. + }, + "image": { + "deploy_on_push": { + "enabled": bool # Optional. Whether + to automatically deploy new images. Can only be used for + images hosted in DOCR and can only be used with an image tag, + not a specific digest. }, - "image": { - "deploy_on_push": { - "enabled": bool # Optional. - Whether to automatically deploy new images. Can only be - used for images hosted in DOCR and can only be used with - an image tag, not a specific digest. + "digest": "str", # Optional. The image + digest. Cannot be specified if tag is provided. + "registry": "str", # Optional. The registry + name. Must be left empty for the ``DOCR`` registry type. + "registry_credentials": "str", # Optional. + The credentials to be able to pull the image. The value will be + encrypted on first submission. On following submissions, the + encrypted value should be used. * "$username:$access_token" for + registries of type ``DOCKER_HUB``. * "$username:$access_token" + for registries of type ``GHCR``. + "registry_type": "str", # Optional. * + DOCKER_HUB: The DockerHub container registry type. * DOCR: The + DigitalOcean container registry type. * GHCR: The Github + container registry type. Known values are: "DOCKER_HUB", "DOCR", + and "GHCR". + "repository": "str", # Optional. The + repository name. + "tag": "latest" # Optional. Default value is + "latest". The repository tag. Defaults to ``latest`` if not + provided and no digest is provided. Cannot be specified if digest + is provided. + }, + "instance_count": 1, # Optional. Default value is 1. + The amount of instances that this component should be scaled to. + Default: 1. Must not be set if autoscaling is used. + "instance_size_slug": {}, + "liveness_health_check": { + "failure_threshold": 0, # Optional. The + number of failed health checks before considered unhealthy. + "http_path": "str", # Optional. The route + path used for the HTTP health check ping. If not set, the HTTP + health check will be disabled and a TCP health check used + instead. + "initial_delay_seconds": 0, # Optional. The + number of seconds to wait before beginning health checks. + "period_seconds": 0, # Optional. The number + of seconds to wait between health checks. + "port": 0, # Optional. The port on which the + health check will be performed. + "success_threshold": 0, # Optional. The + number of successful health checks before considered healthy. + "timeout_seconds": 0 # Optional. The number + of seconds after which the check times out. + }, + "log_destinations": [ + { + "name": "str", # Required. + "datadog": { + "api_key": "str", # Datadog + API key. Required. + "endpoint": "str" # + Optional. Datadog HTTP log intake endpoint. }, - "digest": "str", # Optional. The - image digest. Cannot be specified if tag is provided. - "registry": "str", # Optional. The - registry name. Must be left empty for the ``DOCR`` registry - type. - "registry_credentials": "str", # - Optional. The credentials to be able to pull the image. The - value will be encrypted on first submission. On following - submissions, the encrypted value should be used. * - "$username:$access_token" for registries of type - ``DOCKER_HUB``. * "$username:$access_token" for registries of - type ``GHCR``. - "registry_type": "str", # Optional. - * DOCKER_HUB: The DockerHub container registry type. * DOCR: - The DigitalOcean container registry type. * GHCR: The Github - container registry type. Known values are: "DOCKER_HUB", - "DOCR", and "GHCR". - "repository": "str", # Optional. The - repository name. - "tag": "latest" # Optional. Default - value is "latest". The repository tag. Defaults to ``latest`` - if not provided and no digest is provided. Cannot be - specified if digest is provided. - }, - "instance_count": 1, # Optional. Default - value is 1. The amount of instances that this component should be - scaled to. Default: 1. Must not be set if autoscaling is used. - "instance_size_slug": {}, - "liveness_health_check": { - "failure_threshold": 0, # Optional. - The number of failed health checks before considered - unhealthy. - "http_path": "str", # Optional. The - route path used for the HTTP health check ping. If not set, - the HTTP health check will be disabled and a TCP health check - used instead. - "initial_delay_seconds": 0, # - Optional. The number of seconds to wait before beginning - health checks. - "period_seconds": 0, # Optional. The - number of seconds to wait between health checks. - "port": 0, # Optional. The port on - which the health check will be performed. - "success_threshold": 0, # Optional. - The number of successful health checks before considered - healthy. - "timeout_seconds": 0 # Optional. The - number of seconds after which the check times out. - }, - "log_destinations": [ - { - "name": "str", # Required. - "datadog": { - "api_key": "str", # - Datadog API key. Required. - "endpoint": "str" # - Optional. Datadog HTTP log intake endpoint. - }, - "logtail": { - "token": "str" # - Optional. Logtail token. - }, - "open_search": { - "basic_auth": { - "password": - "str", # Optional. Password for user defined in - User. Is required when ``endpoint`` is set. - Cannot be set if using a DigitalOcean DBaaS - OpenSearch cluster. - "user": "str" - # Optional. Username to authenticate with. Only - required when ``endpoint`` is set. Defaults to - ``doadmin`` when ``cluster_name`` is set. - }, - "cluster_name": - "str", # Optional. The name of a DigitalOcean DBaaS - OpenSearch cluster to use as a log forwarding - destination. Cannot be specified if ``endpoint`` is - also specified. - "endpoint": "str", # - Optional. OpenSearch API Endpoint. Only HTTPS is - supported. Format: - https://:code:``::code:``. Cannot be - specified if ``cluster_name`` is also specified. - "index_name": "logs" - # Optional. Default value is "logs". The index name - to use for the logs. If not set, the default index - name is "logs". + "logtail": { + "token": "str" # Optional. + Logtail token. + }, + "open_search": { + "basic_auth": { + "password": "str", # + Optional. Password for user defined in User. Is + required when ``endpoint`` is set. Cannot be set if + using a DigitalOcean DBaaS OpenSearch cluster. + "user": "str" # + Optional. Username to authenticate with. Only + required when ``endpoint`` is set. Defaults to + ``doadmin`` when ``cluster_name`` is set. }, - "papertrail": { - "endpoint": "str" # - Papertrail syslog endpoint. Required. - } + "cluster_name": "str", # + Optional. The name of a DigitalOcean DBaaS OpenSearch + cluster to use as a log forwarding destination. Cannot be + specified if ``endpoint`` is also specified. + "endpoint": "str", # + Optional. OpenSearch API Endpoint. Only HTTPS is + supported. Format: https://:code:``::code:``. + Cannot be specified if ``cluster_name`` is also + specified. + "index_name": "logs" # + Optional. Default value is "logs". The index name to use + for the logs. If not set, the default index name is + "logs". + }, + "papertrail": { + "endpoint": "str" # + Papertrail syslog endpoint. Required. } - ], - "name": "str", # Optional. The name. Must be - unique across all components within the same app. - "run_command": "str", # Optional. An - optional run command to override the component's default. - "source_dir": "str", # Optional. An optional - path to the working directory to use for the build. For - Dockerfile builds, this will be used as the build context. Must - be relative to the root of the repo. - "termination": { - "grace_period_seconds": 0 # - Optional. The number of seconds to wait between sending a - TERM signal to a container and issuing a KILL which causes - immediate shutdown. (Default 120). } + ], + "name": "str", # Optional. The name. Must be unique + across all components within the same app. + "run_command": "str", # Optional. An optional run + command to override the component's default. + "source_dir": "str", # Optional. An optional path to + the working directory to use for the build. For Dockerfile builds, + this will be used as the build context. Must be relative to the root + of the repo. + "termination": { + "grace_period_seconds": 0 # Optional. The + number of seconds to wait between sending a TERM signal to a + container and issuing a KILL which causes immediate shutdown. + (Default 120). } - ] - }, - "static_sites": [ - { - "name": "str", # Optional. The name of this static - site. - "source_commit_hash": "str" # Optional. The commit - hash of the repository that was used to build this static site. - } - ], - "tier_slug": "str", # Optional. The current pricing tier slug of the - deployment. - "updated_at": "2020-02-20 00:00:00", # Optional. When the deployment - was last updated. - "workers": [ - { - "name": "str", # Optional. The name of this worker. - "source_commit_hash": "str" # Optional. The commit - hash of the repository that was used to build this worker. } ] } } - # response body for status code(s): 404 - response == { - "id": "str", # A short identifier corresponding to the HTTP status code - returned. For example, the ID for a response returning a 404 status code would - be "not_found.". Required. - "message": "str", # A message providing additional information about the - error, including details to help resolve it when possible. Required. - "request_id": "str" # Optional. Optionally, some endpoints may include a - request ID that should be provided when reporting bugs or opening support - tickets to help identify the issue. - } """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + 401: cast( + Type[HttpResponseError], + lambda response: ClientAuthenticationError(response=response), + ), + 429: HttpResponseError, + 500: HttpResponseError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) - @distributed_trace - def create_rollback( - self, app_id: str, body: Union[JSON, IO[bytes]], **kwargs: Any - ) -> JSON: - # pylint: disable=line-too-long - """Rollback App. + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} - Rollback an app to a previous deployment. A new deployment will be created to perform the - rollback. - The app will be pinned to the rollback deployment preventing any new deployments from being - created, - either manually or through Auto Deploy on Push webhooks. To resume deployments, the rollback - must be - either committed or reverted. + content_type: Optional[str] = kwargs.pop( + "content_type", _headers.pop("Content-Type", None) + ) + cls: ClsType[JSON] = kwargs.pop("cls", None) - It is recommended to use the Validate App Rollback endpoint to double check if the rollback is - valid and if there are any warnings. + content_type = content_type or "application/json" + _json = None + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _json = body + + _request = build_apps_validate_app_spec_request( + content_type=content_type, + json=_json, + content=_content, + headers=_headers, + params=_params, + ) + _request.url = self._client.format_url(_request.url) + + _stream = False + pipeline_response: PipelineResponse = ( + self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore + raise HttpResponseError(response=response) + + response_headers = {} + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.content: + deserialized = response.json() + else: + deserialized = None + + if cls: + return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore + + return cast(JSON, deserialized) # type: ignore + + @distributed_trace + def list_alerts(self, app_id: str, **kwargs: Any) -> JSON: + # pylint: disable=line-too-long + """List all app alerts. + + List alerts associated to the app and any components. This includes configuration information + about the alerts including emails, slack webhooks, and triggering events or conditions. :param app_id: The app ID. Required. :type app_id: str + :return: JSON object + :rtype: JSON + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 200 + response == { + "alerts": [ + { + "component_name": "str", # Optional. Name of component the + alert belongs to. + "emails": [ + "" # Optional. Default value is "". Emails for + alerts to go to. + ], + "id": "str", # Optional. The ID of the alert. + "phase": "UNKNOWN", # Optional. Default value is "UNKNOWN". + Known values are: "UNKNOWN", "PENDING", "CONFIGURING", "ACTIVE", and + "ERROR". + "progress": { + "steps": [ + { + "ended_at": "2020-02-20 00:00:00", # + Optional. The start time of this step. + "name": "str", # Optional. The name + of this step. + "reason": { + "code": "str", # Optional. + The error code. + "message": "str" # Optional. + The error message. + }, + "started_at": "2020-02-20 00:00:00", + # Optional. The start time of this step. + "status": "UNKNOWN" # Optional. + Default value is "UNKNOWN". Known values are: "UNKNOWN", + "PENDING", "RUNNING", "ERROR", and "SUCCESS". + } + ] + }, + "slack_webhooks": [ + { + "channel": "str", # Optional. Name of the + Slack Webhook Channel. + "url": "str" # Optional. URL of the Slack + webhook. + } + ], + "spec": { + "disabled": bool, # Optional. Is the alert + disabled?. + "operator": "UNSPECIFIED_OPERATOR", # Optional. + Default value is "UNSPECIFIED_OPERATOR". Known values are: + "UNSPECIFIED_OPERATOR", "GREATER_THAN", and "LESS_THAN". + "rule": "UNSPECIFIED_RULE", # Optional. Default + value is "UNSPECIFIED_RULE". Known values are: "UNSPECIFIED_RULE", + "CPU_UTILIZATION", "MEM_UTILIZATION", "RESTART_COUNT", + "DEPLOYMENT_FAILED", "DEPLOYMENT_LIVE", "DOMAIN_FAILED", + "DOMAIN_LIVE", "AUTOSCALE_FAILED", "AUTOSCALE_SUCCEEDED", + "FUNCTIONS_ACTIVATION_COUNT", "FUNCTIONS_AVERAGE_DURATION_MS", + "FUNCTIONS_ERROR_RATE_PER_MINUTE", "FUNCTIONS_AVERAGE_WAIT_TIME_MS", + "FUNCTIONS_ERROR_COUNT", and "FUNCTIONS_GB_RATE_PER_SECOND". + "value": 0.0, # Optional. Threshold value for alert. + "window": "UNSPECIFIED_WINDOW" # Optional. Default + value is "UNSPECIFIED_WINDOW". Known values are: + "UNSPECIFIED_WINDOW", "FIVE_MINUTES", "TEN_MINUTES", + "THIRTY_MINUTES", and "ONE_HOUR". + } + } + ] + } + # response body for status code(s): 404 + response == { + "id": "str", # A short identifier corresponding to the HTTP status code + returned. For example, the ID for a response returning a 404 status code would + be "not_found.". Required. + "message": "str", # A message providing additional information about the + error, including details to help resolve it when possible. Required. + "request_id": "str" # Optional. Optionally, some endpoints may include a + request ID that should be provided when reporting bugs or opening support + tickets to help identify the issue. + } + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + 401: cast( + Type[HttpResponseError], + lambda response: ClientAuthenticationError(response=response), + ), + 429: HttpResponseError, + 500: HttpResponseError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[JSON] = kwargs.pop("cls", None) + + _request = build_apps_list_alerts_request( + app_id=app_id, + headers=_headers, + params=_params, + ) + _request.url = self._client.format_url(_request.url) + + _stream = False + pipeline_response: PipelineResponse = ( + self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + ) + + response = pipeline_response.http_response + + if response.status_code not in [200, 404]: + if _stream: + response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore + raise HttpResponseError(response=response) + + response_headers = {} + if response.status_code == 200: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.content: + deserialized = response.json() + else: + deserialized = None + + if response.status_code == 404: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.content: + deserialized = response.json() + else: + deserialized = None + + if cls: + return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore + + return cast(JSON, deserialized) # type: ignore + + @overload + def assign_alert_destinations( + self, + app_id: str, + alert_id: str, + body: JSON, + *, + content_type: str = "application/json", + **kwargs: Any, + ) -> JSON: + # pylint: disable=line-too-long + """Update destinations for alerts. + + Updates the emails and slack webhook destinations for app alerts. Emails must be associated to + a user with access to the app. + + :param app_id: The app ID. Required. + :type app_id: str + :param alert_id: The alert ID. Required. + :type alert_id: str + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: JSON object + :rtype: JSON + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "emails": [ + "" # Optional. Default value is "". + ], + "slack_webhooks": [ + { + "channel": "str", # Optional. Name of the Slack Webhook + Channel. + "url": "str" # Optional. URL of the Slack webhook. + } + ] + } + + # response body for status code(s): 200 + response == { + "alert": { + "component_name": "str", # Optional. Name of component the alert + belongs to. + "emails": [ + "" # Optional. Default value is "". Emails for alerts to go + to. + ], + "id": "str", # Optional. The ID of the alert. + "phase": "UNKNOWN", # Optional. Default value is "UNKNOWN". Known + values are: "UNKNOWN", "PENDING", "CONFIGURING", "ACTIVE", and "ERROR". + "progress": { + "steps": [ + { + "ended_at": "2020-02-20 00:00:00", # + Optional. The start time of this step. + "name": "str", # Optional. The name of this + step. + "reason": { + "code": "str", # Optional. The error + code. + "message": "str" # Optional. The + error message. + }, + "started_at": "2020-02-20 00:00:00", # + Optional. The start time of this step. + "status": "UNKNOWN" # Optional. Default + value is "UNKNOWN". Known values are: "UNKNOWN", "PENDING", + "RUNNING", "ERROR", and "SUCCESS". + } + ] + }, + "slack_webhooks": [ + { + "channel": "str", # Optional. Name of the Slack + Webhook Channel. + "url": "str" # Optional. URL of the Slack webhook. + } + ], + "spec": { + "disabled": bool, # Optional. Is the alert disabled?. + "operator": "UNSPECIFIED_OPERATOR", # Optional. Default + value is "UNSPECIFIED_OPERATOR". Known values are: + "UNSPECIFIED_OPERATOR", "GREATER_THAN", and "LESS_THAN". + "rule": "UNSPECIFIED_RULE", # Optional. Default value is + "UNSPECIFIED_RULE". Known values are: "UNSPECIFIED_RULE", + "CPU_UTILIZATION", "MEM_UTILIZATION", "RESTART_COUNT", + "DEPLOYMENT_FAILED", "DEPLOYMENT_LIVE", "DOMAIN_FAILED", "DOMAIN_LIVE", + "AUTOSCALE_FAILED", "AUTOSCALE_SUCCEEDED", "FUNCTIONS_ACTIVATION_COUNT", + "FUNCTIONS_AVERAGE_DURATION_MS", "FUNCTIONS_ERROR_RATE_PER_MINUTE", + "FUNCTIONS_AVERAGE_WAIT_TIME_MS", "FUNCTIONS_ERROR_COUNT", and + "FUNCTIONS_GB_RATE_PER_SECOND". + "value": 0.0, # Optional. Threshold value for alert. + "window": "UNSPECIFIED_WINDOW" # Optional. Default value is + "UNSPECIFIED_WINDOW". Known values are: "UNSPECIFIED_WINDOW", + "FIVE_MINUTES", "TEN_MINUTES", "THIRTY_MINUTES", and "ONE_HOUR". + } + } + } + # response body for status code(s): 404 + response == { + "id": "str", # A short identifier corresponding to the HTTP status code + returned. For example, the ID for a response returning a 404 status code would + be "not_found.". Required. + "message": "str", # A message providing additional information about the + error, including details to help resolve it when possible. Required. + "request_id": "str" # Optional. Optionally, some endpoints may include a + request ID that should be provided when reporting bugs or opening support + tickets to help identify the issue. + } + """ + + @overload + def assign_alert_destinations( + self, + app_id: str, + alert_id: str, + body: IO[bytes], + *, + content_type: str = "application/json", + **kwargs: Any, + ) -> JSON: + # pylint: disable=line-too-long + """Update destinations for alerts. + + Updates the emails and slack webhook destinations for app alerts. Emails must be associated to + a user with access to the app. + + :param app_id: The app ID. Required. + :type app_id: str + :param alert_id: The alert ID. Required. + :type alert_id: str + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: JSON object + :rtype: JSON + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 200 + response == { + "alert": { + "component_name": "str", # Optional. Name of component the alert + belongs to. + "emails": [ + "" # Optional. Default value is "". Emails for alerts to go + to. + ], + "id": "str", # Optional. The ID of the alert. + "phase": "UNKNOWN", # Optional. Default value is "UNKNOWN". Known + values are: "UNKNOWN", "PENDING", "CONFIGURING", "ACTIVE", and "ERROR". + "progress": { + "steps": [ + { + "ended_at": "2020-02-20 00:00:00", # + Optional. The start time of this step. + "name": "str", # Optional. The name of this + step. + "reason": { + "code": "str", # Optional. The error + code. + "message": "str" # Optional. The + error message. + }, + "started_at": "2020-02-20 00:00:00", # + Optional. The start time of this step. + "status": "UNKNOWN" # Optional. Default + value is "UNKNOWN". Known values are: "UNKNOWN", "PENDING", + "RUNNING", "ERROR", and "SUCCESS". + } + ] + }, + "slack_webhooks": [ + { + "channel": "str", # Optional. Name of the Slack + Webhook Channel. + "url": "str" # Optional. URL of the Slack webhook. + } + ], + "spec": { + "disabled": bool, # Optional. Is the alert disabled?. + "operator": "UNSPECIFIED_OPERATOR", # Optional. Default + value is "UNSPECIFIED_OPERATOR". Known values are: + "UNSPECIFIED_OPERATOR", "GREATER_THAN", and "LESS_THAN". + "rule": "UNSPECIFIED_RULE", # Optional. Default value is + "UNSPECIFIED_RULE". Known values are: "UNSPECIFIED_RULE", + "CPU_UTILIZATION", "MEM_UTILIZATION", "RESTART_COUNT", + "DEPLOYMENT_FAILED", "DEPLOYMENT_LIVE", "DOMAIN_FAILED", "DOMAIN_LIVE", + "AUTOSCALE_FAILED", "AUTOSCALE_SUCCEEDED", "FUNCTIONS_ACTIVATION_COUNT", + "FUNCTIONS_AVERAGE_DURATION_MS", "FUNCTIONS_ERROR_RATE_PER_MINUTE", + "FUNCTIONS_AVERAGE_WAIT_TIME_MS", "FUNCTIONS_ERROR_COUNT", and + "FUNCTIONS_GB_RATE_PER_SECOND". + "value": 0.0, # Optional. Threshold value for alert. + "window": "UNSPECIFIED_WINDOW" # Optional. Default value is + "UNSPECIFIED_WINDOW". Known values are: "UNSPECIFIED_WINDOW", + "FIVE_MINUTES", "TEN_MINUTES", "THIRTY_MINUTES", and "ONE_HOUR". + } + } + } + # response body for status code(s): 404 + response == { + "id": "str", # A short identifier corresponding to the HTTP status code + returned. For example, the ID for a response returning a 404 status code would + be "not_found.". Required. + "message": "str", # A message providing additional information about the + error, including details to help resolve it when possible. Required. + "request_id": "str" # Optional. Optionally, some endpoints may include a + request ID that should be provided when reporting bugs or opening support + tickets to help identify the issue. + } + """ + + @distributed_trace + def assign_alert_destinations( + self, app_id: str, alert_id: str, body: Union[JSON, IO[bytes]], **kwargs: Any + ) -> JSON: + # pylint: disable=line-too-long + """Update destinations for alerts. + + Updates the emails and slack webhook destinations for app alerts. Emails must be associated to + a user with access to the app. + + :param app_id: The app ID. Required. + :type app_id: str + :param alert_id: The alert ID. Required. + :type alert_id: str :param body: Is either a JSON type or a IO[bytes] type. Required. :type body: JSON or IO[bytes] :return: JSON object :rtype: JSON :raises ~azure.core.exceptions.HttpResponseError: + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "emails": [ + "" # Optional. Default value is "". + ], + "slack_webhooks": [ + { + "channel": "str", # Optional. Name of the Slack Webhook + Channel. + "url": "str" # Optional. URL of the Slack webhook. + } + ] + } + + # response body for status code(s): 200 + response == { + "alert": { + "component_name": "str", # Optional. Name of component the alert + belongs to. + "emails": [ + "" # Optional. Default value is "". Emails for alerts to go + to. + ], + "id": "str", # Optional. The ID of the alert. + "phase": "UNKNOWN", # Optional. Default value is "UNKNOWN". Known + values are: "UNKNOWN", "PENDING", "CONFIGURING", "ACTIVE", and "ERROR". + "progress": { + "steps": [ + { + "ended_at": "2020-02-20 00:00:00", # + Optional. The start time of this step. + "name": "str", # Optional. The name of this + step. + "reason": { + "code": "str", # Optional. The error + code. + "message": "str" # Optional. The + error message. + }, + "started_at": "2020-02-20 00:00:00", # + Optional. The start time of this step. + "status": "UNKNOWN" # Optional. Default + value is "UNKNOWN". Known values are: "UNKNOWN", "PENDING", + "RUNNING", "ERROR", and "SUCCESS". + } + ] + }, + "slack_webhooks": [ + { + "channel": "str", # Optional. Name of the Slack + Webhook Channel. + "url": "str" # Optional. URL of the Slack webhook. + } + ], + "spec": { + "disabled": bool, # Optional. Is the alert disabled?. + "operator": "UNSPECIFIED_OPERATOR", # Optional. Default + value is "UNSPECIFIED_OPERATOR". Known values are: + "UNSPECIFIED_OPERATOR", "GREATER_THAN", and "LESS_THAN". + "rule": "UNSPECIFIED_RULE", # Optional. Default value is + "UNSPECIFIED_RULE". Known values are: "UNSPECIFIED_RULE", + "CPU_UTILIZATION", "MEM_UTILIZATION", "RESTART_COUNT", + "DEPLOYMENT_FAILED", "DEPLOYMENT_LIVE", "DOMAIN_FAILED", "DOMAIN_LIVE", + "AUTOSCALE_FAILED", "AUTOSCALE_SUCCEEDED", "FUNCTIONS_ACTIVATION_COUNT", + "FUNCTIONS_AVERAGE_DURATION_MS", "FUNCTIONS_ERROR_RATE_PER_MINUTE", + "FUNCTIONS_AVERAGE_WAIT_TIME_MS", "FUNCTIONS_ERROR_COUNT", and + "FUNCTIONS_GB_RATE_PER_SECOND". + "value": 0.0, # Optional. Threshold value for alert. + "window": "UNSPECIFIED_WINDOW" # Optional. Default value is + "UNSPECIFIED_WINDOW". Known values are: "UNSPECIFIED_WINDOW", + "FIVE_MINUTES", "TEN_MINUTES", "THIRTY_MINUTES", and "ONE_HOUR". + } + } + } + # response body for status code(s): 404 + response == { + "id": "str", # A short identifier corresponding to the HTTP status code + returned. For example, the ID for a response returning a 404 status code would + be "not_found.". Required. + "message": "str", # A message providing additional information about the + error, including details to help resolve it when possible. Required. + "request_id": "str" # Optional. Optionally, some endpoints may include a + request ID that should be provided when reporting bugs or opening support + tickets to help identify the issue. + } + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + 401: cast( + Type[HttpResponseError], + lambda response: ClientAuthenticationError(response=response), + ), + 429: HttpResponseError, + 500: HttpResponseError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop( + "content_type", _headers.pop("Content-Type", None) + ) + cls: ClsType[JSON] = kwargs.pop("cls", None) + + content_type = content_type or "application/json" + _json = None + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _json = body + + _request = build_apps_assign_alert_destinations_request( + app_id=app_id, + alert_id=alert_id, + content_type=content_type, + json=_json, + content=_content, + headers=_headers, + params=_params, + ) + _request.url = self._client.format_url(_request.url) + + _stream = False + pipeline_response: PipelineResponse = ( + self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + ) + + response = pipeline_response.http_response + + if response.status_code not in [200, 404]: + if _stream: + response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore + raise HttpResponseError(response=response) + + response_headers = {} + if response.status_code == 200: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.content: + deserialized = response.json() + else: + deserialized = None + + if response.status_code == 404: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.content: + deserialized = response.json() + else: + deserialized = None + + if cls: + return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore + + return cast(JSON, deserialized) # type: ignore + + @overload + def create_rollback( + self, + app_id: str, + body: JSON, + *, + content_type: str = "application/json", + **kwargs: Any, + ) -> JSON: + # pylint: disable=line-too-long + """Rollback App. + + Rollback an app to a previous deployment. A new deployment will be created to perform the + rollback. + The app will be pinned to the rollback deployment preventing any new deployments from being + created, + either manually or through Auto Deploy on Push webhooks. To resume deployments, the rollback + must be + either committed or reverted. + + It is recommended to use the Validate App Rollback endpoint to double check if the rollback is + valid and if there are any warnings. + + :param app_id: The app ID. Required. + :type app_id: str + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: JSON object + :rtype: JSON + :raises ~azure.core.exceptions.HttpResponseError: + Example: .. code-block:: python @@ -112007,217 +115109,9 @@ def create_rollback( tickets to help identify the issue. } """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - 401: cast( - Type[HttpResponseError], - lambda response: ClientAuthenticationError(response=response), - ), - 429: HttpResponseError, - 500: HttpResponseError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = kwargs.pop("params", {}) or {} - - content_type: Optional[str] = kwargs.pop( - "content_type", _headers.pop("Content-Type", None) - ) - cls: ClsType[JSON] = kwargs.pop("cls", None) - - content_type = content_type or "application/json" - _json = None - _content = None - if isinstance(body, (IOBase, bytes)): - _content = body - else: - _json = body - - _request = build_apps_create_rollback_request( - app_id=app_id, - content_type=content_type, - json=_json, - content=_content, - headers=_headers, - params=_params, - ) - _request.url = self._client.format_url(_request.url) - - _stream = False - pipeline_response: PipelineResponse = ( - self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - ) - - response = pipeline_response.http_response - - if response.status_code not in [200, 404]: - if _stream: - response.read() # Load the body in memory and close the socket - map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore - raise HttpResponseError(response=response) - - response_headers = {} - if response.status_code == 200: - response_headers["ratelimit-limit"] = self._deserialize( - "int", response.headers.get("ratelimit-limit") - ) - response_headers["ratelimit-remaining"] = self._deserialize( - "int", response.headers.get("ratelimit-remaining") - ) - response_headers["ratelimit-reset"] = self._deserialize( - "int", response.headers.get("ratelimit-reset") - ) - - if response.content: - deserialized = response.json() - else: - deserialized = None - - if response.status_code == 404: - response_headers["ratelimit-limit"] = self._deserialize( - "int", response.headers.get("ratelimit-limit") - ) - response_headers["ratelimit-remaining"] = self._deserialize( - "int", response.headers.get("ratelimit-remaining") - ) - response_headers["ratelimit-reset"] = self._deserialize( - "int", response.headers.get("ratelimit-reset") - ) - - if response.content: - deserialized = response.json() - else: - deserialized = None - - if cls: - return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore - - return cast(JSON, deserialized) # type: ignore - - @overload - def validate_rollback( - self, - app_id: str, - body: JSON, - *, - content_type: str = "application/json", - **kwargs: Any, - ) -> JSON: - # pylint: disable=line-too-long - """Validate App Rollback. - - Check whether an app can be rolled back to a specific deployment. This endpoint can also be - used - to check if there are any warnings or validation conditions that will cause the rollback to - proceed - under unideal circumstances. For example, if a component must be rebuilt as part of the - rollback - causing it to take longer than usual. - - :param app_id: The app ID. Required. - :type app_id: str - :param body: Required. - :type body: JSON - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: JSON object - :rtype: JSON - :raises ~azure.core.exceptions.HttpResponseError: - - Example: - .. code-block:: python - - # JSON input template you can fill out and use as your body input. - body = { - "deployment_id": "str", # Optional. The ID of the deployment to rollback to. - "skip_pin": bool # Optional. Whether to skip pinning the rollback - deployment. If false, the rollback deployment will be pinned and any new - deployments including Auto Deploy on Push hooks will be disabled until the - rollback is either manually committed or reverted via the CommitAppRollback or - RevertAppRollback endpoints respectively. If true, the rollback will be - immediately committed and the app will remain unpinned. - } - - # response body for status code(s): 200 - response == { - "error": { - "code": "str", # Optional. A code identifier that represents the - failing condition. Failing conditions: * ``incompatible_phase`` - - indicates that the deployment's phase is not suitable for rollback. * - ``incompatible_result`` - indicates that the deployment's result is not - suitable for rollback. * ``exceeded_revision_limit`` - indicates that the app - has exceeded the rollback revision limits for its tier. * ``app_pinned`` - - indicates that there is already a rollback in progress and the app is pinned. - * ``database_config_conflict`` - indicates that the deployment's database - config is different than the current config. * ``region_conflict`` - - indicates that the deployment's region differs from the current app region. - Warning conditions: * ``static_site_requires_rebuild`` - indicates that the - deployment contains at least one static site that will require a rebuild. * - ``image_source_missing_digest`` - indicates that the deployment contains at - least one component with an image source that is missing a digest. Known - values are: "incompatible_phase", "incompatible_result", - "exceeded_revision_limit", "app_pinned", "database_config_conflict", - "region_conflict", "static_site_requires_rebuild", and - "image_source_missing_digest". - "components": [ - "str" # Optional. - ], - "message": "str" # Optional. A human-readable message describing the - failing condition. - }, - "valid": bool, # Optional. Indicates whether the app can be rolled back to - the specified deployment. - "warnings": [ - { - "code": "str", # Optional. A code identifier that represents - the failing condition. Failing conditions: * ``incompatible_phase`` - - indicates that the deployment's phase is not suitable for rollback. * - ``incompatible_result`` - indicates that the deployment's result is not - suitable for rollback. * ``exceeded_revision_limit`` - indicates that the - app has exceeded the rollback revision limits for its tier. * - ``app_pinned`` - indicates that there is already a rollback in progress - and the app is pinned. * ``database_config_conflict`` - indicates that - the deployment's database config is different than the current config. * - ``region_conflict`` - indicates that the deployment's region differs from - the current app region. Warning conditions: * - ``static_site_requires_rebuild`` - indicates that the deployment contains - at least one static site that will require a rebuild. * - ``image_source_missing_digest`` - indicates that the deployment contains - at least one component with an image source that is missing a digest. - Known values are: "incompatible_phase", "incompatible_result", - "exceeded_revision_limit", "app_pinned", "database_config_conflict", - "region_conflict", "static_site_requires_rebuild", and - "image_source_missing_digest". - "components": [ - "str" # Optional. Contains a list of warnings that - may cause the rollback to run under unideal circumstances. - ], - "message": "str" # Optional. A human-readable message - describing the failing condition. - } - ] - } - # response body for status code(s): 404 - response == { - "id": "str", # A short identifier corresponding to the HTTP status code - returned. For example, the ID for a response returning a 404 status code would - be "not_found.". Required. - "message": "str", # A message providing additional information about the - error, including details to help resolve it when possible. Required. - "request_id": "str" # Optional. Optionally, some endpoints may include a - request ID that should be provided when reporting bugs or opening support - tickets to help identify the issue. - } - """ @overload - def validate_rollback( + def create_rollback( self, app_id: str, body: IO[bytes], @@ -112226,15 +115120,18 @@ def validate_rollback( **kwargs: Any, ) -> JSON: # pylint: disable=line-too-long - """Validate App Rollback. + """Rollback App. - Check whether an app can be rolled back to a specific deployment. This endpoint can also be - used - to check if there are any warnings or validation conditions that will cause the rollback to - proceed - under unideal circumstances. For example, if a component must be rebuilt as part of the - rollback - causing it to take longer than usual. + Rollback an app to a previous deployment. A new deployment will be created to perform the + rollback. + The app will be pinned to the rollback deployment preventing any new deployments from being + created, + either manually or through Auto Deploy on Push webhooks. To resume deployments, the rollback + must be + either committed or reverted. + + It is recommended to use the Validate App Rollback endpoint to double check if the rollback is + valid and if there are any warnings. :param app_id: The app ID. Required. :type app_id: str @@ -112247,399 +115144,6 @@ def validate_rollback( :rtype: JSON :raises ~azure.core.exceptions.HttpResponseError: - Example: - .. code-block:: python - - # response body for status code(s): 200 - response == { - "error": { - "code": "str", # Optional. A code identifier that represents the - failing condition. Failing conditions: * ``incompatible_phase`` - - indicates that the deployment's phase is not suitable for rollback. * - ``incompatible_result`` - indicates that the deployment's result is not - suitable for rollback. * ``exceeded_revision_limit`` - indicates that the app - has exceeded the rollback revision limits for its tier. * ``app_pinned`` - - indicates that there is already a rollback in progress and the app is pinned. - * ``database_config_conflict`` - indicates that the deployment's database - config is different than the current config. * ``region_conflict`` - - indicates that the deployment's region differs from the current app region. - Warning conditions: * ``static_site_requires_rebuild`` - indicates that the - deployment contains at least one static site that will require a rebuild. * - ``image_source_missing_digest`` - indicates that the deployment contains at - least one component with an image source that is missing a digest. Known - values are: "incompatible_phase", "incompatible_result", - "exceeded_revision_limit", "app_pinned", "database_config_conflict", - "region_conflict", "static_site_requires_rebuild", and - "image_source_missing_digest". - "components": [ - "str" # Optional. - ], - "message": "str" # Optional. A human-readable message describing the - failing condition. - }, - "valid": bool, # Optional. Indicates whether the app can be rolled back to - the specified deployment. - "warnings": [ - { - "code": "str", # Optional. A code identifier that represents - the failing condition. Failing conditions: * ``incompatible_phase`` - - indicates that the deployment's phase is not suitable for rollback. * - ``incompatible_result`` - indicates that the deployment's result is not - suitable for rollback. * ``exceeded_revision_limit`` - indicates that the - app has exceeded the rollback revision limits for its tier. * - ``app_pinned`` - indicates that there is already a rollback in progress - and the app is pinned. * ``database_config_conflict`` - indicates that - the deployment's database config is different than the current config. * - ``region_conflict`` - indicates that the deployment's region differs from - the current app region. Warning conditions: * - ``static_site_requires_rebuild`` - indicates that the deployment contains - at least one static site that will require a rebuild. * - ``image_source_missing_digest`` - indicates that the deployment contains - at least one component with an image source that is missing a digest. - Known values are: "incompatible_phase", "incompatible_result", - "exceeded_revision_limit", "app_pinned", "database_config_conflict", - "region_conflict", "static_site_requires_rebuild", and - "image_source_missing_digest". - "components": [ - "str" # Optional. Contains a list of warnings that - may cause the rollback to run under unideal circumstances. - ], - "message": "str" # Optional. A human-readable message - describing the failing condition. - } - ] - } - # response body for status code(s): 404 - response == { - "id": "str", # A short identifier corresponding to the HTTP status code - returned. For example, the ID for a response returning a 404 status code would - be "not_found.". Required. - "message": "str", # A message providing additional information about the - error, including details to help resolve it when possible. Required. - "request_id": "str" # Optional. Optionally, some endpoints may include a - request ID that should be provided when reporting bugs or opening support - tickets to help identify the issue. - } - """ - - @distributed_trace - def validate_rollback( - self, app_id: str, body: Union[JSON, IO[bytes]], **kwargs: Any - ) -> JSON: - # pylint: disable=line-too-long - """Validate App Rollback. - - Check whether an app can be rolled back to a specific deployment. This endpoint can also be - used - to check if there are any warnings or validation conditions that will cause the rollback to - proceed - under unideal circumstances. For example, if a component must be rebuilt as part of the - rollback - causing it to take longer than usual. - - :param app_id: The app ID. Required. - :type app_id: str - :param body: Is either a JSON type or a IO[bytes] type. Required. - :type body: JSON or IO[bytes] - :return: JSON object - :rtype: JSON - :raises ~azure.core.exceptions.HttpResponseError: - - Example: - .. code-block:: python - - # JSON input template you can fill out and use as your body input. - body = { - "deployment_id": "str", # Optional. The ID of the deployment to rollback to. - "skip_pin": bool # Optional. Whether to skip pinning the rollback - deployment. If false, the rollback deployment will be pinned and any new - deployments including Auto Deploy on Push hooks will be disabled until the - rollback is either manually committed or reverted via the CommitAppRollback or - RevertAppRollback endpoints respectively. If true, the rollback will be - immediately committed and the app will remain unpinned. - } - - # response body for status code(s): 200 - response == { - "error": { - "code": "str", # Optional. A code identifier that represents the - failing condition. Failing conditions: * ``incompatible_phase`` - - indicates that the deployment's phase is not suitable for rollback. * - ``incompatible_result`` - indicates that the deployment's result is not - suitable for rollback. * ``exceeded_revision_limit`` - indicates that the app - has exceeded the rollback revision limits for its tier. * ``app_pinned`` - - indicates that there is already a rollback in progress and the app is pinned. - * ``database_config_conflict`` - indicates that the deployment's database - config is different than the current config. * ``region_conflict`` - - indicates that the deployment's region differs from the current app region. - Warning conditions: * ``static_site_requires_rebuild`` - indicates that the - deployment contains at least one static site that will require a rebuild. * - ``image_source_missing_digest`` - indicates that the deployment contains at - least one component with an image source that is missing a digest. Known - values are: "incompatible_phase", "incompatible_result", - "exceeded_revision_limit", "app_pinned", "database_config_conflict", - "region_conflict", "static_site_requires_rebuild", and - "image_source_missing_digest". - "components": [ - "str" # Optional. - ], - "message": "str" # Optional. A human-readable message describing the - failing condition. - }, - "valid": bool, # Optional. Indicates whether the app can be rolled back to - the specified deployment. - "warnings": [ - { - "code": "str", # Optional. A code identifier that represents - the failing condition. Failing conditions: * ``incompatible_phase`` - - indicates that the deployment's phase is not suitable for rollback. * - ``incompatible_result`` - indicates that the deployment's result is not - suitable for rollback. * ``exceeded_revision_limit`` - indicates that the - app has exceeded the rollback revision limits for its tier. * - ``app_pinned`` - indicates that there is already a rollback in progress - and the app is pinned. * ``database_config_conflict`` - indicates that - the deployment's database config is different than the current config. * - ``region_conflict`` - indicates that the deployment's region differs from - the current app region. Warning conditions: * - ``static_site_requires_rebuild`` - indicates that the deployment contains - at least one static site that will require a rebuild. * - ``image_source_missing_digest`` - indicates that the deployment contains - at least one component with an image source that is missing a digest. - Known values are: "incompatible_phase", "incompatible_result", - "exceeded_revision_limit", "app_pinned", "database_config_conflict", - "region_conflict", "static_site_requires_rebuild", and - "image_source_missing_digest". - "components": [ - "str" # Optional. Contains a list of warnings that - may cause the rollback to run under unideal circumstances. - ], - "message": "str" # Optional. A human-readable message - describing the failing condition. - } - ] - } - # response body for status code(s): 404 - response == { - "id": "str", # A short identifier corresponding to the HTTP status code - returned. For example, the ID for a response returning a 404 status code would - be "not_found.". Required. - "message": "str", # A message providing additional information about the - error, including details to help resolve it when possible. Required. - "request_id": "str" # Optional. Optionally, some endpoints may include a - request ID that should be provided when reporting bugs or opening support - tickets to help identify the issue. - } - """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - 401: cast( - Type[HttpResponseError], - lambda response: ClientAuthenticationError(response=response), - ), - 429: HttpResponseError, - 500: HttpResponseError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = kwargs.pop("params", {}) or {} - - content_type: Optional[str] = kwargs.pop( - "content_type", _headers.pop("Content-Type", None) - ) - cls: ClsType[JSON] = kwargs.pop("cls", None) - - content_type = content_type or "application/json" - _json = None - _content = None - if isinstance(body, (IOBase, bytes)): - _content = body - else: - _json = body - - _request = build_apps_validate_rollback_request( - app_id=app_id, - content_type=content_type, - json=_json, - content=_content, - headers=_headers, - params=_params, - ) - _request.url = self._client.format_url(_request.url) - - _stream = False - pipeline_response: PipelineResponse = ( - self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - ) - - response = pipeline_response.http_response - - if response.status_code not in [200, 404]: - if _stream: - response.read() # Load the body in memory and close the socket - map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore - raise HttpResponseError(response=response) - - response_headers = {} - if response.status_code == 200: - response_headers["ratelimit-limit"] = self._deserialize( - "int", response.headers.get("ratelimit-limit") - ) - response_headers["ratelimit-remaining"] = self._deserialize( - "int", response.headers.get("ratelimit-remaining") - ) - response_headers["ratelimit-reset"] = self._deserialize( - "int", response.headers.get("ratelimit-reset") - ) - - if response.content: - deserialized = response.json() - else: - deserialized = None - - if response.status_code == 404: - response_headers["ratelimit-limit"] = self._deserialize( - "int", response.headers.get("ratelimit-limit") - ) - response_headers["ratelimit-remaining"] = self._deserialize( - "int", response.headers.get("ratelimit-remaining") - ) - response_headers["ratelimit-reset"] = self._deserialize( - "int", response.headers.get("ratelimit-reset") - ) - - if response.content: - deserialized = response.json() - else: - deserialized = None - - if cls: - return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore - - return cast(JSON, deserialized) # type: ignore - - @distributed_trace - def commit_rollback(self, app_id: str, **kwargs: Any) -> Optional[JSON]: - # pylint: disable=line-too-long - """Commit App Rollback. - - Commit an app rollback. This action permanently applies the rollback and unpins the app to - resume new deployments. - - :param app_id: The app ID. Required. - :type app_id: str - :return: JSON object or None - :rtype: JSON or None - :raises ~azure.core.exceptions.HttpResponseError: - - Example: - .. code-block:: python - - # response body for status code(s): 404 - response == { - "id": "str", # A short identifier corresponding to the HTTP status code - returned. For example, the ID for a response returning a 404 status code would - be "not_found.". Required. - "message": "str", # A message providing additional information about the - error, including details to help resolve it when possible. Required. - "request_id": "str" # Optional. Optionally, some endpoints may include a - request ID that should be provided when reporting bugs or opening support - tickets to help identify the issue. - } - """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - 401: cast( - Type[HttpResponseError], - lambda response: ClientAuthenticationError(response=response), - ), - 429: HttpResponseError, - 500: HttpResponseError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = kwargs.pop("headers", {}) or {} - _params = kwargs.pop("params", {}) or {} - - cls: ClsType[Optional[JSON]] = kwargs.pop("cls", None) - - _request = build_apps_commit_rollback_request( - app_id=app_id, - headers=_headers, - params=_params, - ) - _request.url = self._client.format_url(_request.url) - - _stream = False - pipeline_response: PipelineResponse = ( - self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - ) - - response = pipeline_response.http_response - - if response.status_code not in [200, 404]: - if _stream: - response.read() # Load the body in memory and close the socket - map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore - raise HttpResponseError(response=response) - - deserialized = None - response_headers = {} - if response.status_code == 200: - response_headers["ratelimit-limit"] = self._deserialize( - "int", response.headers.get("ratelimit-limit") - ) - response_headers["ratelimit-remaining"] = self._deserialize( - "int", response.headers.get("ratelimit-remaining") - ) - response_headers["ratelimit-reset"] = self._deserialize( - "int", response.headers.get("ratelimit-reset") - ) - - if response.status_code == 404: - response_headers["ratelimit-limit"] = self._deserialize( - "int", response.headers.get("ratelimit-limit") - ) - response_headers["ratelimit-remaining"] = self._deserialize( - "int", response.headers.get("ratelimit-remaining") - ) - response_headers["ratelimit-reset"] = self._deserialize( - "int", response.headers.get("ratelimit-reset") - ) - - if response.content: - deserialized = response.json() - else: - deserialized = None - - if cls: - return cls(pipeline_response, deserialized, response_headers) # type: ignore - - return deserialized # type: ignore - - @distributed_trace - def revert_rollback(self, app_id: str, **kwargs: Any) -> JSON: - # pylint: disable=line-too-long - """Revert App Rollback. - - Revert an app rollback. This action reverts the active rollback by creating a new deployment - from the - latest app spec prior to the rollback and unpins the app to resume new deployments. - - :param app_id: The app ID. Required. - :type app_id: str - :return: JSON object - :rtype: JSON - :raises ~azure.core.exceptions.HttpResponseError: - Example: .. code-block:: python @@ -114093,17 +116597,5858 @@ def revert_rollback(self, app_id: str, **kwargs: Any) -> JSON: hash of the repository that was used to build this static site. } ], - "tier_slug": "str", # Optional. The current pricing tier slug of the - deployment. - "updated_at": "2020-02-20 00:00:00", # Optional. When the deployment - was last updated. - "workers": [ - { - "name": "str", # Optional. The name of this worker. - "source_commit_hash": "str" # Optional. The commit - hash of the repository that was used to build this worker. - } - ] + "tier_slug": "str", # Optional. The current pricing tier slug of the + deployment. + "updated_at": "2020-02-20 00:00:00", # Optional. When the deployment + was last updated. + "workers": [ + { + "name": "str", # Optional. The name of this worker. + "source_commit_hash": "str" # Optional. The commit + hash of the repository that was used to build this worker. + } + ] + } + } + # response body for status code(s): 404 + response == { + "id": "str", # A short identifier corresponding to the HTTP status code + returned. For example, the ID for a response returning a 404 status code would + be "not_found.". Required. + "message": "str", # A message providing additional information about the + error, including details to help resolve it when possible. Required. + "request_id": "str" # Optional. Optionally, some endpoints may include a + request ID that should be provided when reporting bugs or opening support + tickets to help identify the issue. + } + """ + + @distributed_trace + def create_rollback( + self, app_id: str, body: Union[JSON, IO[bytes]], **kwargs: Any + ) -> JSON: + # pylint: disable=line-too-long + """Rollback App. + + Rollback an app to a previous deployment. A new deployment will be created to perform the + rollback. + The app will be pinned to the rollback deployment preventing any new deployments from being + created, + either manually or through Auto Deploy on Push webhooks. To resume deployments, the rollback + must be + either committed or reverted. + + It is recommended to use the Validate App Rollback endpoint to double check if the rollback is + valid and if there are any warnings. + + :param app_id: The app ID. Required. + :type app_id: str + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :return: JSON object + :rtype: JSON + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "deployment_id": "str", # Optional. The ID of the deployment to rollback to. + "skip_pin": bool # Optional. Whether to skip pinning the rollback + deployment. If false, the rollback deployment will be pinned and any new + deployments including Auto Deploy on Push hooks will be disabled until the + rollback is either manually committed or reverted via the CommitAppRollback or + RevertAppRollback endpoints respectively. If true, the rollback will be + immediately committed and the app will remain unpinned. + } + + # response body for status code(s): 200 + response == { + "deployment": { + "cause": "str", # Optional. What caused this deployment to be + created. + "cloned_from": "str", # Optional. The ID of a previous deployment + that this deployment was cloned from. + "created_at": "2020-02-20 00:00:00", # Optional. The creation time + of the deployment. + "functions": [ + { + "name": "str", # Optional. The name of this + functions component. + "namespace": "str", # Optional. The namespace where + the functions are deployed. + "source_commit_hash": "str" # Optional. The commit + hash of the repository that was used to build this functions + component. + } + ], + "id": "str", # Optional. The ID of the deployment. + "jobs": [ + { + "name": "str", # Optional. The name of this job. + "source_commit_hash": "str" # Optional. The commit + hash of the repository that was used to build this job. + } + ], + "phase": "UNKNOWN", # Optional. Default value is "UNKNOWN". Known + values are: "UNKNOWN", "PENDING_BUILD", "BUILDING", "PENDING_DEPLOY", + "DEPLOYING", "ACTIVE", "SUPERSEDED", "ERROR", and "CANCELED". + "phase_last_updated_at": "2020-02-20 00:00:00", # Optional. When the + deployment phase was last updated. + "progress": { + "error_steps": 0, # Optional. Number of unsuccessful steps. + "pending_steps": 0, # Optional. Number of pending steps. + "running_steps": 0, # Optional. Number of currently running + steps. + "steps": [ + { + "component_name": "str", # Optional. The + component name that this step is associated with. + "ended_at": "2020-02-20 00:00:00", # + Optional. The end time of this step. + "message_base": "str", # Optional. The base + of a human-readable description of the step intended to be + combined with the component name for presentation. For example: + ``message_base`` = "Building service" ``component_name`` = "api". + "name": "str", # Optional. The name of this + step. + "reason": { + "code": "str", # Optional. The error + code. + "message": "str" # Optional. The + error message. + }, + "started_at": "2020-02-20 00:00:00", # + Optional. The start time of this step. + "status": "UNKNOWN", # Optional. Default + value is "UNKNOWN". Known values are: "UNKNOWN", "PENDING", + "RUNNING", "ERROR", and "SUCCESS". + "steps": [ + {} # Optional. Child steps of this + step. + ] + } + ], + "success_steps": 0, # Optional. Number of successful steps. + "summary_steps": [ + { + "component_name": "str", # Optional. The + component name that this step is associated with. + "ended_at": "2020-02-20 00:00:00", # + Optional. The end time of this step. + "message_base": "str", # Optional. The base + of a human-readable description of the step intended to be + combined with the component name for presentation. For example: + ``message_base`` = "Building service" ``component_name`` = "api". + "name": "str", # Optional. The name of this + step. + "reason": { + "code": "str", # Optional. The error + code. + "message": "str" # Optional. The + error message. + }, + "started_at": "2020-02-20 00:00:00", # + Optional. The start time of this step. + "status": "UNKNOWN", # Optional. Default + value is "UNKNOWN". Known values are: "UNKNOWN", "PENDING", + "RUNNING", "ERROR", and "SUCCESS". + "steps": [ + {} # Optional. Child steps of this + step. + ] + } + ], + "total_steps": 0 # Optional. Total number of steps. + }, + "services": [ + { + "name": "str", # Optional. The name of this service. + "source_commit_hash": "str" # Optional. The commit + hash of the repository that was used to build this service. + } + ], + "spec": { + "name": "str", # The name of the app. Must be unique across + all apps in the same account. Required. + "databases": [ + { + "name": "str", # The database's name. The + name must be unique across all components within the same app and + cannot use capital letters. Required. + "cluster_name": "str", # Optional. The name + of the underlying DigitalOcean DBaaS cluster. This is required + for production databases. For dev databases, if cluster_name is + not set, a new cluster will be provisioned. + "db_name": "str", # Optional. The name of + the MySQL or PostgreSQL database to configure. + "db_user": "str", # Optional. The name of + the MySQL or PostgreSQL user to configure. + "engine": "UNSET", # Optional. Default value + is "UNSET". * MYSQL: MySQL * PG: PostgreSQL * REDIS: Caching * + MONGODB: MongoDB * KAFKA: Kafka * OPENSEARCH: OpenSearch * + VALKEY: ValKey. Known values are: "UNSET", "MYSQL", "PG", + "REDIS", "MONGODB", "KAFKA", "OPENSEARCH", and "VALKEY". + "production": bool, # Optional. Whether this + is a production or dev database. + "version": "str" # Optional. The version of + the database engine. + } + ], + "disable_edge_cache": False, # Optional. Default value is + False. .. role:: raw-html-m2r(raw) :format: html If set to + ``true``"" , the app will **not** be cached at the edge (CDN). Enable + this option if you want to manage CDN configuration yourself"u2014whether + by using an external CDN provider or by handling static content and + caching within your app. This setting is also recommended for apps that + require real-time data or serve dynamic content, such as those using + Server-Sent Events (SSE) over GET, or hosting an MCP (Model Context + Protocol) Server that utilizes SSE."" :raw-html-m2r:`
` **Note:** This + feature is not available for static site components."" + :raw-html-m2r:`
` For more information, see `Disable CDN Cache + `_. + "disable_email_obfuscation": False, # Optional. Default + value is False. If set to ``true``"" , email addresses in the app will + not be obfuscated. This is useful for apps that require email addresses + to be visible (in the HTML markup). + "domains": [ + { + "domain": "str", # The hostname for the + domain. Required. + "minimum_tls_version": "str", # Optional. + The minimum version of TLS a client application can use to access + resources for the domain. Must be one of the following values + wrapped within quotations: ``"1.2"`` or ``"1.3"``. Known values + are: "1.2" and "1.3". + "type": "UNSPECIFIED", # Optional. Default + value is "UNSPECIFIED". * DEFAULT: The default + ``.ondigitalocean.app`` domain assigned to this app * PRIMARY: + The primary domain for this app that is displayed as the default + in the control panel, used in bindable environment variables, and + any other places that reference an app's live URL. Only one + domain may be set as primary. * ALIAS: A non-primary domain. + Known values are: "UNSPECIFIED", "DEFAULT", "PRIMARY", and + "ALIAS". + "wildcard": bool, # Optional. Indicates + whether the domain includes all sub-domains, in addition to the + given domain. + "zone": "str" # Optional. Optional. If the + domain uses DigitalOcean DNS and you would like App Platform to + automatically manage it for you, set this to the name of the + domain on your account. For example, If the domain you are + adding is ``app.domain.com``"" , the zone could be + ``domain.com``. + } + ], + "egress": { + "type": "AUTOASSIGN" # Optional. Default value is + "AUTOASSIGN". The app egress type. Known values are: "AUTOASSIGN" and + "DEDICATED_IP". + }, + "enhanced_threat_control_enabled": False, # Optional. + Default value is False. If set to ``true``"" , suspicious requests will + go through additional security checks to help mitigate layer 7 DDoS + attacks. + "functions": [ + { + "name": "str", # The name. Must be unique + across all components within the same app. Required. + "alerts": [ + { + "disabled": bool, # + Optional. Is the alert disabled?. + "operator": + "UNSPECIFIED_OPERATOR", # Optional. Default value is + "UNSPECIFIED_OPERATOR". Known values are: + "UNSPECIFIED_OPERATOR", "GREATER_THAN", and "LESS_THAN". + "rule": "UNSPECIFIED_RULE", + # Optional. Default value is "UNSPECIFIED_RULE". Known + values are: "UNSPECIFIED_RULE", "CPU_UTILIZATION", + "MEM_UTILIZATION", "RESTART_COUNT", "DEPLOYMENT_FAILED", + "DEPLOYMENT_LIVE", "DOMAIN_FAILED", "DOMAIN_LIVE", + "AUTOSCALE_FAILED", "AUTOSCALE_SUCCEEDED", + "FUNCTIONS_ACTIVATION_COUNT", + "FUNCTIONS_AVERAGE_DURATION_MS", + "FUNCTIONS_ERROR_RATE_PER_MINUTE", + "FUNCTIONS_AVERAGE_WAIT_TIME_MS", + "FUNCTIONS_ERROR_COUNT", and + "FUNCTIONS_GB_RATE_PER_SECOND". + "value": 0.0, # Optional. + Threshold value for alert. + "window": + "UNSPECIFIED_WINDOW" # Optional. Default value is + "UNSPECIFIED_WINDOW". Known values are: + "UNSPECIFIED_WINDOW", "FIVE_MINUTES", "TEN_MINUTES", + "THIRTY_MINUTES", and "ONE_HOUR". + } + ], + "bitbucket": { + "branch": "str", # Optional. The + name of the branch to use. + "deploy_on_push": bool, # Optional. + Whether to automatically deploy new commits made to the repo. + "repo": "str" # Optional. The name + of the repo in the format owner/repo. Example: + ``digitalocean/sample-golang``. + }, + "cors": { + "allow_credentials": bool, # + Optional. Whether browsers should expose the response to the + client-side JavaScript code when the request"u2019s + credentials mode is include. This configures the + ``Access-Control-Allow-Credentials`` header. + "allow_headers": [ + "str" # Optional. The set of + allowed HTTP request headers. This configures the + ``Access-Control-Allow-Headers`` header. + ], + "allow_methods": [ + "str" # Optional. The set of + allowed HTTP methods. This configures the + ``Access-Control-Allow-Methods`` header. + ], + "allow_origins": [ + { + "exact": "str", # + Optional. Exact string match. Only 1 of ``exact``"" , + ``prefix``"" , or ``regex`` must be set. + "prefix": "str", # + Optional. Prefix-based match. Only 1 of ``exact``"" , + ``prefix``"" , or ``regex`` must be set. + "regex": "str" # + Optional. RE2 style regex-based match. Only 1 of + ``exact``"" , ``prefix``"" , or ``regex`` must be + set. For more information about RE2 syntax, see: + https://github.com/google/re2/wiki/Syntax. + } + ], + "expose_headers": [ + "str" # Optional. The set of + HTTP response headers that browsers are allowed to + access. This configures the + ``Access-Control-Expose-Headers`` header. + ], + "max_age": "str" # Optional. An + optional duration specifying how long browsers can cache the + results of a preflight request. This configures the + ``Access-Control-Max-Age`` header. + }, + "envs": [ + { + "key": "str", # The variable + name. Required. + "scope": + "RUN_AND_BUILD_TIME", # Optional. Default value is + "RUN_AND_BUILD_TIME". * RUN_TIME: Made available only at + run-time * BUILD_TIME: Made available only at build-time + * RUN_AND_BUILD_TIME: Made available at both build and + run-time. Known values are: "UNSET", "RUN_TIME", + "BUILD_TIME", and "RUN_AND_BUILD_TIME". + "type": "GENERAL", # + Optional. Default value is "GENERAL". * GENERAL: A + plain-text environment variable * SECRET: A secret + encrypted environment variable. Known values are: + "GENERAL" and "SECRET". + "value": "str" # Optional. + The value. If the type is ``SECRET``"" , the value will + be encrypted on first submission. On following + submissions, the encrypted value should be used. + } + ], + "git": { + "branch": "str", # Optional. The + name of the branch to use. + "repo_clone_url": "str" # Optional. + The clone URL of the repo. Example: + ``https://github.com/digitalocean/sample-golang.git``. + }, + "github": { + "branch": "str", # Optional. The + name of the branch to use. + "deploy_on_push": bool, # Optional. + Whether to automatically deploy new commits made to the repo. + "repo": "str" # Optional. The name + of the repo in the format owner/repo. Example: + ``digitalocean/sample-golang``. + }, + "gitlab": { + "branch": "str", # Optional. The + name of the branch to use. + "deploy_on_push": bool, # Optional. + Whether to automatically deploy new commits made to the repo. + "repo": "str" # Optional. The name + of the repo in the format owner/repo. Example: + ``digitalocean/sample-golang``. + }, + "log_destinations": [ + { + "name": "str", # Required. + "datadog": { + "api_key": "str", # + Datadog API key. Required. + "endpoint": "str" # + Optional. Datadog HTTP log intake endpoint. + }, + "logtail": { + "token": "str" # + Optional. Logtail token. + }, + "open_search": { + "basic_auth": { + "password": + "str", # Optional. Password for user defined in + User. Is required when ``endpoint`` is set. + Cannot be set if using a DigitalOcean DBaaS + OpenSearch cluster. + "user": "str" + # Optional. Username to authenticate with. Only + required when ``endpoint`` is set. Defaults to + ``doadmin`` when ``cluster_name`` is set. + }, + "cluster_name": + "str", # Optional. The name of a DigitalOcean DBaaS + OpenSearch cluster to use as a log forwarding + destination. Cannot be specified if ``endpoint`` is + also specified. + "endpoint": "str", # + Optional. OpenSearch API Endpoint. Only HTTPS is + supported. Format: + https://:code:``::code:``. Cannot be + specified if ``cluster_name`` is also specified. + "index_name": "logs" + # Optional. Default value is "logs". The index name + to use for the logs. If not set, the default index + name is "logs". + }, + "papertrail": { + "endpoint": "str" # + Papertrail syslog endpoint. Required. + } + } + ], + "routes": [ + { + "path": "str", # Optional. + (Deprecated - Use Ingress Rules instead). An HTTP path + prefix. Paths must start with / and must be unique across + all components within an app. + "preserve_path_prefix": bool + # Optional. An optional flag to preserve the path that is + forwarded to the backend service. By default, the HTTP + request path will be trimmed from the left when forwarded + to the component. For example, a component with + ``path=/api`` will have requests to ``/api/list`` trimmed + to ``/list``. If this value is ``true``"" , the path will + remain ``/api/list``. + } + ], + "source_dir": "str" # Optional. An optional + path to the working directory to use for the build. For + Dockerfile builds, this will be used as the build context. Must + be relative to the root of the repo. + } + ], + "ingress": { + "rules": [ + { + "component": { + "name": "str", # The name of + the component to route to. Required. + "preserve_path_prefix": + "str", # Optional. An optional flag to preserve the path + that is forwarded to the backend service. By default, the + HTTP request path will be trimmed from the left when + forwarded to the component. For example, a component with + ``path=/api`` will have requests to ``/api/list`` trimmed + to ``/list``. If this value is ``true``"" , the path will + remain ``/api/list``. Note: this is not applicable for + Functions Components and is mutually exclusive with + ``rewrite``. + "rewrite": "str" # Optional. + An optional field that will rewrite the path of the + component to be what is specified here. By default, the + HTTP request path will be trimmed from the left when + forwarded to the component. For example, a component with + ``path=/api`` will have requests to ``/api/list`` trimmed + to ``/list``. If you specified the rewrite to be + ``/v1/``"" , requests to ``/api/list`` would be rewritten + to ``/v1/list``. Note: this is mutually exclusive with + ``preserve_path_prefix``. + }, + "cors": { + "allow_credentials": bool, # + Optional. Whether browsers should expose the response to + the client-side JavaScript code when the request"u2019s + credentials mode is include. This configures the + ``Access-Control-Allow-Credentials`` header. + "allow_headers": [ + "str" # Optional. + The set of allowed HTTP request headers. This + configures the ``Access-Control-Allow-Headers`` + header. + ], + "allow_methods": [ + "str" # Optional. + The set of allowed HTTP methods. This configures the + ``Access-Control-Allow-Methods`` header. + ], + "allow_origins": [ + { + "exact": + "str", # Optional. Exact string match. Only 1 of + ``exact``"" , ``prefix``"" , or ``regex`` must be + set. + "prefix": + "str", # Optional. Prefix-based match. Only 1 of + ``exact``"" , ``prefix``"" , or ``regex`` must be + set. + "regex": + "str" # Optional. RE2 style regex-based match. + Only 1 of ``exact``"" , ``prefix``"" , or + ``regex`` must be set. For more information about + RE2 syntax, see: + https://github.com/google/re2/wiki/Syntax. + } + ], + "expose_headers": [ + "str" # Optional. + The set of HTTP response headers that browsers are + allowed to access. This configures the + ``Access-Control-Expose-Headers`` header. + ], + "max_age": "str" # Optional. + An optional duration specifying how long browsers can + cache the results of a preflight request. This configures + the ``Access-Control-Max-Age`` header. + }, + "match": { + "authority": { + "exact": "str" # + Required. + }, + "path": { + "prefix": "str" # + Prefix-based match. For example, ``/api`` will match + ``/api``"" , ``/api/``"" , and any nested paths such + as ``/api/v1/endpoint``. Required. + } + }, + "redirect": { + "authority": "str", # + Optional. The authority/host to redirect to. This can be + a hostname or IP address. Note: use ``port`` to set the + port. + "port": 0, # Optional. The + port to redirect to. + "redirect_code": 0, # + Optional. The redirect code to use. Defaults to ``302``. + Supported values are 300, 301, 302, 303, 304, 307, 308. + "scheme": "str", # Optional. + The scheme to redirect to. Supported values are ``http`` + or ``https``. Default: ``https``. + "uri": "str" # Optional. An + optional URI path to redirect to. Note: if this is + specified the whole URI of the original request will be + overwritten to this value, irrespective of the original + request URI being matched. + } + } + ] + }, + "jobs": [ + { + "autoscaling": { + "max_instance_count": 0, # Optional. + The maximum amount of instances for this component. Must be + more than min_instance_count. + "metrics": { + "cpu": { + "percent": 80 # + Optional. Default value is 80. The average target CPU + utilization for the component. + } + }, + "min_instance_count": 0 # Optional. + The minimum amount of instances for this component. Must be + less than max_instance_count. + }, + "bitbucket": { + "branch": "str", # Optional. The + name of the branch to use. + "deploy_on_push": bool, # Optional. + Whether to automatically deploy new commits made to the repo. + "repo": "str" # Optional. The name + of the repo in the format owner/repo. Example: + ``digitalocean/sample-golang``. + }, + "build_command": "str", # Optional. An + optional build command to run while building this component from + source. + "dockerfile_path": "str", # Optional. The + path to the Dockerfile relative to the root of the repo. If set, + it will be used to build this component. Otherwise, App Platform + will attempt to build it using buildpacks. + "environment_slug": "str", # Optional. An + environment slug describing the type of this app. For a full + list, please refer to `the product documentation + `_. + "envs": [ + { + "key": "str", # The variable + name. Required. + "scope": + "RUN_AND_BUILD_TIME", # Optional. Default value is + "RUN_AND_BUILD_TIME". * RUN_TIME: Made available only at + run-time * BUILD_TIME: Made available only at build-time + * RUN_AND_BUILD_TIME: Made available at both build and + run-time. Known values are: "UNSET", "RUN_TIME", + "BUILD_TIME", and "RUN_AND_BUILD_TIME". + "type": "GENERAL", # + Optional. Default value is "GENERAL". * GENERAL: A + plain-text environment variable * SECRET: A secret + encrypted environment variable. Known values are: + "GENERAL" and "SECRET". + "value": "str" # Optional. + The value. If the type is ``SECRET``"" , the value will + be encrypted on first submission. On following + submissions, the encrypted value should be used. + } + ], + "git": { + "branch": "str", # Optional. The + name of the branch to use. + "repo_clone_url": "str" # Optional. + The clone URL of the repo. Example: + ``https://github.com/digitalocean/sample-golang.git``. + }, + "github": { + "branch": "str", # Optional. The + name of the branch to use. + "deploy_on_push": bool, # Optional. + Whether to automatically deploy new commits made to the repo. + "repo": "str" # Optional. The name + of the repo in the format owner/repo. Example: + ``digitalocean/sample-golang``. + }, + "gitlab": { + "branch": "str", # Optional. The + name of the branch to use. + "deploy_on_push": bool, # Optional. + Whether to automatically deploy new commits made to the repo. + "repo": "str" # Optional. The name + of the repo in the format owner/repo. Example: + ``digitalocean/sample-golang``. + }, + "image": { + "deploy_on_push": { + "enabled": bool # Optional. + Whether to automatically deploy new images. Can only be + used for images hosted in DOCR and can only be used with + an image tag, not a specific digest. + }, + "digest": "str", # Optional. The + image digest. Cannot be specified if tag is provided. + "registry": "str", # Optional. The + registry name. Must be left empty for the ``DOCR`` registry + type. + "registry_credentials": "str", # + Optional. The credentials to be able to pull the image. The + value will be encrypted on first submission. On following + submissions, the encrypted value should be used. * + "$username:$access_token" for registries of type + ``DOCKER_HUB``. * "$username:$access_token" for registries of + type ``GHCR``. + "registry_type": "str", # Optional. + * DOCKER_HUB: The DockerHub container registry type. * DOCR: + The DigitalOcean container registry type. * GHCR: The Github + container registry type. Known values are: "DOCKER_HUB", + "DOCR", and "GHCR". + "repository": "str", # Optional. The + repository name. + "tag": "latest" # Optional. Default + value is "latest". The repository tag. Defaults to ``latest`` + if not provided and no digest is provided. Cannot be + specified if digest is provided. + }, + "instance_count": 1, # Optional. Default + value is 1. The amount of instances that this component should be + scaled to. Default: 1. Must not be set if autoscaling is used. + "instance_size_slug": {}, + "kind": "UNSPECIFIED", # Optional. Default + value is "UNSPECIFIED". * UNSPECIFIED: Default job type, will + auto-complete to POST_DEPLOY kind. * PRE_DEPLOY: Indicates a job + that runs before an app deployment. * POST_DEPLOY: Indicates a + job that runs after an app deployment. * FAILED_DEPLOY: Indicates + a job that runs after a component fails to deploy. Known values + are: "UNSPECIFIED", "PRE_DEPLOY", "POST_DEPLOY", and + "FAILED_DEPLOY". + "log_destinations": [ + { + "name": "str", # Required. + "datadog": { + "api_key": "str", # + Datadog API key. Required. + "endpoint": "str" # + Optional. Datadog HTTP log intake endpoint. + }, + "logtail": { + "token": "str" # + Optional. Logtail token. + }, + "open_search": { + "basic_auth": { + "password": + "str", # Optional. Password for user defined in + User. Is required when ``endpoint`` is set. + Cannot be set if using a DigitalOcean DBaaS + OpenSearch cluster. + "user": "str" + # Optional. Username to authenticate with. Only + required when ``endpoint`` is set. Defaults to + ``doadmin`` when ``cluster_name`` is set. + }, + "cluster_name": + "str", # Optional. The name of a DigitalOcean DBaaS + OpenSearch cluster to use as a log forwarding + destination. Cannot be specified if ``endpoint`` is + also specified. + "endpoint": "str", # + Optional. OpenSearch API Endpoint. Only HTTPS is + supported. Format: + https://:code:``::code:``. Cannot be + specified if ``cluster_name`` is also specified. + "index_name": "logs" + # Optional. Default value is "logs". The index name + to use for the logs. If not set, the default index + name is "logs". + }, + "papertrail": { + "endpoint": "str" # + Papertrail syslog endpoint. Required. + } + } + ], + "name": "str", # Optional. The name. Must be + unique across all components within the same app. + "run_command": "str", # Optional. An + optional run command to override the component's default. + "source_dir": "str", # Optional. An optional + path to the working directory to use for the build. For + Dockerfile builds, this will be used as the build context. Must + be relative to the root of the repo. + "termination": { + "grace_period_seconds": 0 # + Optional. The number of seconds to wait between sending a + TERM signal to a container and issuing a KILL which causes + immediate shutdown. (Default 120). + } + } + ], + "maintenance": { + "archive": bool, # Optional. Indicates whether the + app should be archived. Setting this to true implies that enabled is + set to true. + "enabled": bool, # Optional. Indicates whether + maintenance mode should be enabled for the app. + "offline_page_url": "str" # Optional. A custom + offline page to display when maintenance mode is enabled or the app + is archived. + }, + "region": "str", # Optional. The slug form of the + geographical origin of the app. Default: ``nearest available``. Known + values are: "atl", "nyc", "sfo", "tor", "ams", "fra", "lon", "blr", + "sgp", and "syd". + "services": [ + { + "autoscaling": { + "max_instance_count": 0, # Optional. + The maximum amount of instances for this component. Must be + more than min_instance_count. + "metrics": { + "cpu": { + "percent": 80 # + Optional. Default value is 80. The average target CPU + utilization for the component. + } + }, + "min_instance_count": 0 # Optional. + The minimum amount of instances for this component. Must be + less than max_instance_count. + }, + "bitbucket": { + "branch": "str", # Optional. The + name of the branch to use. + "deploy_on_push": bool, # Optional. + Whether to automatically deploy new commits made to the repo. + "repo": "str" # Optional. The name + of the repo in the format owner/repo. Example: + ``digitalocean/sample-golang``. + }, + "build_command": "str", # Optional. An + optional build command to run while building this component from + source. + "cors": { + "allow_credentials": bool, # + Optional. Whether browsers should expose the response to the + client-side JavaScript code when the request"u2019s + credentials mode is include. This configures the + ``Access-Control-Allow-Credentials`` header. + "allow_headers": [ + "str" # Optional. The set of + allowed HTTP request headers. This configures the + ``Access-Control-Allow-Headers`` header. + ], + "allow_methods": [ + "str" # Optional. The set of + allowed HTTP methods. This configures the + ``Access-Control-Allow-Methods`` header. + ], + "allow_origins": [ + { + "exact": "str", # + Optional. Exact string match. Only 1 of ``exact``"" , + ``prefix``"" , or ``regex`` must be set. + "prefix": "str", # + Optional. Prefix-based match. Only 1 of ``exact``"" , + ``prefix``"" , or ``regex`` must be set. + "regex": "str" # + Optional. RE2 style regex-based match. Only 1 of + ``exact``"" , ``prefix``"" , or ``regex`` must be + set. For more information about RE2 syntax, see: + https://github.com/google/re2/wiki/Syntax. + } + ], + "expose_headers": [ + "str" # Optional. The set of + HTTP response headers that browsers are allowed to + access. This configures the + ``Access-Control-Expose-Headers`` header. + ], + "max_age": "str" # Optional. An + optional duration specifying how long browsers can cache the + results of a preflight request. This configures the + ``Access-Control-Max-Age`` header. + }, + "dockerfile_path": "str", # Optional. The + path to the Dockerfile relative to the root of the repo. If set, + it will be used to build this component. Otherwise, App Platform + will attempt to build it using buildpacks. + "environment_slug": "str", # Optional. An + environment slug describing the type of this app. For a full + list, please refer to `the product documentation + `_. + "envs": [ + { + "key": "str", # The variable + name. Required. + "scope": + "RUN_AND_BUILD_TIME", # Optional. Default value is + "RUN_AND_BUILD_TIME". * RUN_TIME: Made available only at + run-time * BUILD_TIME: Made available only at build-time + * RUN_AND_BUILD_TIME: Made available at both build and + run-time. Known values are: "UNSET", "RUN_TIME", + "BUILD_TIME", and "RUN_AND_BUILD_TIME". + "type": "GENERAL", # + Optional. Default value is "GENERAL". * GENERAL: A + plain-text environment variable * SECRET: A secret + encrypted environment variable. Known values are: + "GENERAL" and "SECRET". + "value": "str" # Optional. + The value. If the type is ``SECRET``"" , the value will + be encrypted on first submission. On following + submissions, the encrypted value should be used. + } + ], + "git": { + "branch": "str", # Optional. The + name of the branch to use. + "repo_clone_url": "str" # Optional. + The clone URL of the repo. Example: + ``https://github.com/digitalocean/sample-golang.git``. + }, + "github": { + "branch": "str", # Optional. The + name of the branch to use. + "deploy_on_push": bool, # Optional. + Whether to automatically deploy new commits made to the repo. + "repo": "str" # Optional. The name + of the repo in the format owner/repo. Example: + ``digitalocean/sample-golang``. + }, + "gitlab": { + "branch": "str", # Optional. The + name of the branch to use. + "deploy_on_push": bool, # Optional. + Whether to automatically deploy new commits made to the repo. + "repo": "str" # Optional. The name + of the repo in the format owner/repo. Example: + ``digitalocean/sample-golang``. + }, + "health_check": { + "failure_threshold": 0, # Optional. + The number of failed health checks before considered + unhealthy. + "http_path": "str", # Optional. The + route path used for the HTTP health check ping. If not set, + the HTTP health check will be disabled and a TCP health check + used instead. + "initial_delay_seconds": 0, # + Optional. The number of seconds to wait before beginning + health checks. + "period_seconds": 0, # Optional. The + number of seconds to wait between health checks. + "port": 0, # Optional. The port on + which the health check will be performed. If not set, the + health check will be performed on the component's http_port. + "success_threshold": 0, # Optional. + The number of successful health checks before considered + healthy. + "timeout_seconds": 0 # Optional. The + number of seconds after which the check times out. + }, + "http_port": 0, # Optional. The internal + port on which this service's run command will listen. Default: + 8080 If there is not an environment variable with the name + ``PORT``"" , one will be automatically added with its value set + to the value of this field. + "image": { + "deploy_on_push": { + "enabled": bool # Optional. + Whether to automatically deploy new images. Can only be + used for images hosted in DOCR and can only be used with + an image tag, not a specific digest. + }, + "digest": "str", # Optional. The + image digest. Cannot be specified if tag is provided. + "registry": "str", # Optional. The + registry name. Must be left empty for the ``DOCR`` registry + type. + "registry_credentials": "str", # + Optional. The credentials to be able to pull the image. The + value will be encrypted on first submission. On following + submissions, the encrypted value should be used. * + "$username:$access_token" for registries of type + ``DOCKER_HUB``. * "$username:$access_token" for registries of + type ``GHCR``. + "registry_type": "str", # Optional. + * DOCKER_HUB: The DockerHub container registry type. * DOCR: + The DigitalOcean container registry type. * GHCR: The Github + container registry type. Known values are: "DOCKER_HUB", + "DOCR", and "GHCR". + "repository": "str", # Optional. The + repository name. + "tag": "latest" # Optional. Default + value is "latest". The repository tag. Defaults to ``latest`` + if not provided and no digest is provided. Cannot be + specified if digest is provided. + }, + "instance_count": 1, # Optional. Default + value is 1. The amount of instances that this component should be + scaled to. Default: 1. Must not be set if autoscaling is used. + "instance_size_slug": {}, + "internal_ports": [ + 0 # Optional. The ports on which + this service will listen for internal traffic. + ], + "liveness_health_check": { + "failure_threshold": 0, # Optional. + The number of failed health checks before considered + unhealthy. + "http_path": "str", # Optional. The + route path used for the HTTP health check ping. If not set, + the HTTP health check will be disabled and a TCP health check + used instead. + "initial_delay_seconds": 0, # + Optional. The number of seconds to wait before beginning + health checks. + "period_seconds": 0, # Optional. The + number of seconds to wait between health checks. + "port": 0, # Optional. The port on + which the health check will be performed. + "success_threshold": 0, # Optional. + The number of successful health checks before considered + healthy. + "timeout_seconds": 0 # Optional. The + number of seconds after which the check times out. + }, + "log_destinations": [ + { + "name": "str", # Required. + "datadog": { + "api_key": "str", # + Datadog API key. Required. + "endpoint": "str" # + Optional. Datadog HTTP log intake endpoint. + }, + "logtail": { + "token": "str" # + Optional. Logtail token. + }, + "open_search": { + "basic_auth": { + "password": + "str", # Optional. Password for user defined in + User. Is required when ``endpoint`` is set. + Cannot be set if using a DigitalOcean DBaaS + OpenSearch cluster. + "user": "str" + # Optional. Username to authenticate with. Only + required when ``endpoint`` is set. Defaults to + ``doadmin`` when ``cluster_name`` is set. + }, + "cluster_name": + "str", # Optional. The name of a DigitalOcean DBaaS + OpenSearch cluster to use as a log forwarding + destination. Cannot be specified if ``endpoint`` is + also specified. + "endpoint": "str", # + Optional. OpenSearch API Endpoint. Only HTTPS is + supported. Format: + https://:code:``::code:``. Cannot be + specified if ``cluster_name`` is also specified. + "index_name": "logs" + # Optional. Default value is "logs". The index name + to use for the logs. If not set, the default index + name is "logs". + }, + "papertrail": { + "endpoint": "str" # + Papertrail syslog endpoint. Required. + } + } + ], + "name": "str", # Optional. The name. Must be + unique across all components within the same app. + "protocol": "str", # Optional. The protocol + which the service uses to serve traffic on the http_port. * + ``HTTP``"" : The app is serving the HTTP protocol. Default. * + ``HTTP2``"" : The app is serving the HTTP/2 protocol. Currently, + this needs to be implemented in the service by serving HTTP/2 + cleartext (h2c). Known values are: "HTTP" and "HTTP2". + "routes": [ + { + "path": "str", # Optional. + (Deprecated - Use Ingress Rules instead). An HTTP path + prefix. Paths must start with / and must be unique across + all components within an app. + "preserve_path_prefix": bool + # Optional. An optional flag to preserve the path that is + forwarded to the backend service. By default, the HTTP + request path will be trimmed from the left when forwarded + to the component. For example, a component with + ``path=/api`` will have requests to ``/api/list`` trimmed + to ``/list``. If this value is ``true``"" , the path will + remain ``/api/list``. + } + ], + "run_command": "str", # Optional. An + optional run command to override the component's default. + "source_dir": "str", # Optional. An optional + path to the working directory to use for the build. For + Dockerfile builds, this will be used as the build context. Must + be relative to the root of the repo. + "termination": { + "drain_seconds": 0, # Optional. The + number of seconds to wait between selecting a container + instance for termination and issuing the TERM signal. + Selecting a container instance for termination begins an + asynchronous drain of new requests on upstream + load-balancers. (Default 15). + "grace_period_seconds": 0 # + Optional. The number of seconds to wait between sending a + TERM signal to a container and issuing a KILL which causes + immediate shutdown. (Default 120). + } + } + ], + "static_sites": [ + { + "bitbucket": { + "branch": "str", # Optional. The + name of the branch to use. + "deploy_on_push": bool, # Optional. + Whether to automatically deploy new commits made to the repo. + "repo": "str" # Optional. The name + of the repo in the format owner/repo. Example: + ``digitalocean/sample-golang``. + }, + "build_command": "str", # Optional. An + optional build command to run while building this component from + source. + "catchall_document": "str", # Optional. The + name of the document to use as the fallback for any requests to + documents that are not found when serving this static site. Only + 1 of ``catchall_document`` or ``error_document`` can be set. + "cors": { + "allow_credentials": bool, # + Optional. Whether browsers should expose the response to the + client-side JavaScript code when the request"u2019s + credentials mode is include. This configures the + ``Access-Control-Allow-Credentials`` header. + "allow_headers": [ + "str" # Optional. The set of + allowed HTTP request headers. This configures the + ``Access-Control-Allow-Headers`` header. + ], + "allow_methods": [ + "str" # Optional. The set of + allowed HTTP methods. This configures the + ``Access-Control-Allow-Methods`` header. + ], + "allow_origins": [ + { + "exact": "str", # + Optional. Exact string match. Only 1 of ``exact``"" , + ``prefix``"" , or ``regex`` must be set. + "prefix": "str", # + Optional. Prefix-based match. Only 1 of ``exact``"" , + ``prefix``"" , or ``regex`` must be set. + "regex": "str" # + Optional. RE2 style regex-based match. Only 1 of + ``exact``"" , ``prefix``"" , or ``regex`` must be + set. For more information about RE2 syntax, see: + https://github.com/google/re2/wiki/Syntax. + } + ], + "expose_headers": [ + "str" # Optional. The set of + HTTP response headers that browsers are allowed to + access. This configures the + ``Access-Control-Expose-Headers`` header. + ], + "max_age": "str" # Optional. An + optional duration specifying how long browsers can cache the + results of a preflight request. This configures the + ``Access-Control-Max-Age`` header. + }, + "dockerfile_path": "str", # Optional. The + path to the Dockerfile relative to the root of the repo. If set, + it will be used to build this component. Otherwise, App Platform + will attempt to build it using buildpacks. + "environment_slug": "str", # Optional. An + environment slug describing the type of this app. For a full + list, please refer to `the product documentation + `_. + "envs": [ + { + "key": "str", # The variable + name. Required. + "scope": + "RUN_AND_BUILD_TIME", # Optional. Default value is + "RUN_AND_BUILD_TIME". * RUN_TIME: Made available only at + run-time * BUILD_TIME: Made available only at build-time + * RUN_AND_BUILD_TIME: Made available at both build and + run-time. Known values are: "UNSET", "RUN_TIME", + "BUILD_TIME", and "RUN_AND_BUILD_TIME". + "type": "GENERAL", # + Optional. Default value is "GENERAL". * GENERAL: A + plain-text environment variable * SECRET: A secret + encrypted environment variable. Known values are: + "GENERAL" and "SECRET". + "value": "str" # Optional. + The value. If the type is ``SECRET``"" , the value will + be encrypted on first submission. On following + submissions, the encrypted value should be used. + } + ], + "error_document": "404.html", # Optional. + Default value is "404.html". The name of the error document to + use when serving this static site. Default: 404.html. If no such + file exists within the built assets, App Platform will supply + one. + "git": { + "branch": "str", # Optional. The + name of the branch to use. + "repo_clone_url": "str" # Optional. + The clone URL of the repo. Example: + ``https://github.com/digitalocean/sample-golang.git``. + }, + "github": { + "branch": "str", # Optional. The + name of the branch to use. + "deploy_on_push": bool, # Optional. + Whether to automatically deploy new commits made to the repo. + "repo": "str" # Optional. The name + of the repo in the format owner/repo. Example: + ``digitalocean/sample-golang``. + }, + "gitlab": { + "branch": "str", # Optional. The + name of the branch to use. + "deploy_on_push": bool, # Optional. + Whether to automatically deploy new commits made to the repo. + "repo": "str" # Optional. The name + of the repo in the format owner/repo. Example: + ``digitalocean/sample-golang``. + }, + "image": { + "deploy_on_push": { + "enabled": bool # Optional. + Whether to automatically deploy new images. Can only be + used for images hosted in DOCR and can only be used with + an image tag, not a specific digest. + }, + "digest": "str", # Optional. The + image digest. Cannot be specified if tag is provided. + "registry": "str", # Optional. The + registry name. Must be left empty for the ``DOCR`` registry + type. + "registry_credentials": "str", # + Optional. The credentials to be able to pull the image. The + value will be encrypted on first submission. On following + submissions, the encrypted value should be used. * + "$username:$access_token" for registries of type + ``DOCKER_HUB``. * "$username:$access_token" for registries of + type ``GHCR``. + "registry_type": "str", # Optional. + * DOCKER_HUB: The DockerHub container registry type. * DOCR: + The DigitalOcean container registry type. * GHCR: The Github + container registry type. Known values are: "DOCKER_HUB", + "DOCR", and "GHCR". + "repository": "str", # Optional. The + repository name. + "tag": "latest" # Optional. Default + value is "latest". The repository tag. Defaults to ``latest`` + if not provided and no digest is provided. Cannot be + specified if digest is provided. + }, + "index_document": "index.html", # Optional. + Default value is "index.html". The name of the index document to + use when serving this static site. Default: index.html. + "log_destinations": [ + { + "name": "str", # Required. + "datadog": { + "api_key": "str", # + Datadog API key. Required. + "endpoint": "str" # + Optional. Datadog HTTP log intake endpoint. + }, + "logtail": { + "token": "str" # + Optional. Logtail token. + }, + "open_search": { + "basic_auth": { + "password": + "str", # Optional. Password for user defined in + User. Is required when ``endpoint`` is set. + Cannot be set if using a DigitalOcean DBaaS + OpenSearch cluster. + "user": "str" + # Optional. Username to authenticate with. Only + required when ``endpoint`` is set. Defaults to + ``doadmin`` when ``cluster_name`` is set. + }, + "cluster_name": + "str", # Optional. The name of a DigitalOcean DBaaS + OpenSearch cluster to use as a log forwarding + destination. Cannot be specified if ``endpoint`` is + also specified. + "endpoint": "str", # + Optional. OpenSearch API Endpoint. Only HTTPS is + supported. Format: + https://:code:``::code:``. Cannot be + specified if ``cluster_name`` is also specified. + "index_name": "logs" + # Optional. Default value is "logs". The index name + to use for the logs. If not set, the default index + name is "logs". + }, + "papertrail": { + "endpoint": "str" # + Papertrail syslog endpoint. Required. + } + } + ], + "name": "str", # Optional. The name. Must be + unique across all components within the same app. + "output_dir": "str", # Optional. An optional + path to where the built assets will be located, relative to the + build context. If not set, App Platform will automatically scan + for these directory names: ``_static``"" , ``dist``"" , + ``public``"" , ``build``. + "routes": [ + { + "path": "str", # Optional. + (Deprecated - Use Ingress Rules instead). An HTTP path + prefix. Paths must start with / and must be unique across + all components within an app. + "preserve_path_prefix": bool + # Optional. An optional flag to preserve the path that is + forwarded to the backend service. By default, the HTTP + request path will be trimmed from the left when forwarded + to the component. For example, a component with + ``path=/api`` will have requests to ``/api/list`` trimmed + to ``/list``. If this value is ``true``"" , the path will + remain ``/api/list``. + } + ], + "run_command": "str", # Optional. An + optional run command to override the component's default. + "source_dir": "str" # Optional. An optional + path to the working directory to use for the build. For + Dockerfile builds, this will be used as the build context. Must + be relative to the root of the repo. + } + ], + "vpc": { + "egress_ips": [ + { + "ip": "str" # Optional. The egress + ips associated with the VPC. + } + ], + "id": "str" # Optional. The ID of the VPC. + }, + "workers": [ + { + "autoscaling": { + "max_instance_count": 0, # Optional. + The maximum amount of instances for this component. Must be + more than min_instance_count. + "metrics": { + "cpu": { + "percent": 80 # + Optional. Default value is 80. The average target CPU + utilization for the component. + } + }, + "min_instance_count": 0 # Optional. + The minimum amount of instances for this component. Must be + less than max_instance_count. + }, + "bitbucket": { + "branch": "str", # Optional. The + name of the branch to use. + "deploy_on_push": bool, # Optional. + Whether to automatically deploy new commits made to the repo. + "repo": "str" # Optional. The name + of the repo in the format owner/repo. Example: + ``digitalocean/sample-golang``. + }, + "build_command": "str", # Optional. An + optional build command to run while building this component from + source. + "dockerfile_path": "str", # Optional. The + path to the Dockerfile relative to the root of the repo. If set, + it will be used to build this component. Otherwise, App Platform + will attempt to build it using buildpacks. + "environment_slug": "str", # Optional. An + environment slug describing the type of this app. For a full + list, please refer to `the product documentation + `_. + "envs": [ + { + "key": "str", # The variable + name. Required. + "scope": + "RUN_AND_BUILD_TIME", # Optional. Default value is + "RUN_AND_BUILD_TIME". * RUN_TIME: Made available only at + run-time * BUILD_TIME: Made available only at build-time + * RUN_AND_BUILD_TIME: Made available at both build and + run-time. Known values are: "UNSET", "RUN_TIME", + "BUILD_TIME", and "RUN_AND_BUILD_TIME". + "type": "GENERAL", # + Optional. Default value is "GENERAL". * GENERAL: A + plain-text environment variable * SECRET: A secret + encrypted environment variable. Known values are: + "GENERAL" and "SECRET". + "value": "str" # Optional. + The value. If the type is ``SECRET``"" , the value will + be encrypted on first submission. On following + submissions, the encrypted value should be used. + } + ], + "git": { + "branch": "str", # Optional. The + name of the branch to use. + "repo_clone_url": "str" # Optional. + The clone URL of the repo. Example: + ``https://github.com/digitalocean/sample-golang.git``. + }, + "github": { + "branch": "str", # Optional. The + name of the branch to use. + "deploy_on_push": bool, # Optional. + Whether to automatically deploy new commits made to the repo. + "repo": "str" # Optional. The name + of the repo in the format owner/repo. Example: + ``digitalocean/sample-golang``. + }, + "gitlab": { + "branch": "str", # Optional. The + name of the branch to use. + "deploy_on_push": bool, # Optional. + Whether to automatically deploy new commits made to the repo. + "repo": "str" # Optional. The name + of the repo in the format owner/repo. Example: + ``digitalocean/sample-golang``. + }, + "image": { + "deploy_on_push": { + "enabled": bool # Optional. + Whether to automatically deploy new images. Can only be + used for images hosted in DOCR and can only be used with + an image tag, not a specific digest. + }, + "digest": "str", # Optional. The + image digest. Cannot be specified if tag is provided. + "registry": "str", # Optional. The + registry name. Must be left empty for the ``DOCR`` registry + type. + "registry_credentials": "str", # + Optional. The credentials to be able to pull the image. The + value will be encrypted on first submission. On following + submissions, the encrypted value should be used. * + "$username:$access_token" for registries of type + ``DOCKER_HUB``. * "$username:$access_token" for registries of + type ``GHCR``. + "registry_type": "str", # Optional. + * DOCKER_HUB: The DockerHub container registry type. * DOCR: + The DigitalOcean container registry type. * GHCR: The Github + container registry type. Known values are: "DOCKER_HUB", + "DOCR", and "GHCR". + "repository": "str", # Optional. The + repository name. + "tag": "latest" # Optional. Default + value is "latest". The repository tag. Defaults to ``latest`` + if not provided and no digest is provided. Cannot be + specified if digest is provided. + }, + "instance_count": 1, # Optional. Default + value is 1. The amount of instances that this component should be + scaled to. Default: 1. Must not be set if autoscaling is used. + "instance_size_slug": {}, + "liveness_health_check": { + "failure_threshold": 0, # Optional. + The number of failed health checks before considered + unhealthy. + "http_path": "str", # Optional. The + route path used for the HTTP health check ping. If not set, + the HTTP health check will be disabled and a TCP health check + used instead. + "initial_delay_seconds": 0, # + Optional. The number of seconds to wait before beginning + health checks. + "period_seconds": 0, # Optional. The + number of seconds to wait between health checks. + "port": 0, # Optional. The port on + which the health check will be performed. + "success_threshold": 0, # Optional. + The number of successful health checks before considered + healthy. + "timeout_seconds": 0 # Optional. The + number of seconds after which the check times out. + }, + "log_destinations": [ + { + "name": "str", # Required. + "datadog": { + "api_key": "str", # + Datadog API key. Required. + "endpoint": "str" # + Optional. Datadog HTTP log intake endpoint. + }, + "logtail": { + "token": "str" # + Optional. Logtail token. + }, + "open_search": { + "basic_auth": { + "password": + "str", # Optional. Password for user defined in + User. Is required when ``endpoint`` is set. + Cannot be set if using a DigitalOcean DBaaS + OpenSearch cluster. + "user": "str" + # Optional. Username to authenticate with. Only + required when ``endpoint`` is set. Defaults to + ``doadmin`` when ``cluster_name`` is set. + }, + "cluster_name": + "str", # Optional. The name of a DigitalOcean DBaaS + OpenSearch cluster to use as a log forwarding + destination. Cannot be specified if ``endpoint`` is + also specified. + "endpoint": "str", # + Optional. OpenSearch API Endpoint. Only HTTPS is + supported. Format: + https://:code:``::code:``. Cannot be + specified if ``cluster_name`` is also specified. + "index_name": "logs" + # Optional. Default value is "logs". The index name + to use for the logs. If not set, the default index + name is "logs". + }, + "papertrail": { + "endpoint": "str" # + Papertrail syslog endpoint. Required. + } + } + ], + "name": "str", # Optional. The name. Must be + unique across all components within the same app. + "run_command": "str", # Optional. An + optional run command to override the component's default. + "source_dir": "str", # Optional. An optional + path to the working directory to use for the build. For + Dockerfile builds, this will be used as the build context. Must + be relative to the root of the repo. + "termination": { + "grace_period_seconds": 0 # + Optional. The number of seconds to wait between sending a + TERM signal to a container and issuing a KILL which causes + immediate shutdown. (Default 120). + } + } + ] + }, + "static_sites": [ + { + "name": "str", # Optional. The name of this static + site. + "source_commit_hash": "str" # Optional. The commit + hash of the repository that was used to build this static site. + } + ], + "tier_slug": "str", # Optional. The current pricing tier slug of the + deployment. + "updated_at": "2020-02-20 00:00:00", # Optional. When the deployment + was last updated. + "workers": [ + { + "name": "str", # Optional. The name of this worker. + "source_commit_hash": "str" # Optional. The commit + hash of the repository that was used to build this worker. + } + ] + } + } + # response body for status code(s): 404 + response == { + "id": "str", # A short identifier corresponding to the HTTP status code + returned. For example, the ID for a response returning a 404 status code would + be "not_found.". Required. + "message": "str", # A message providing additional information about the + error, including details to help resolve it when possible. Required. + "request_id": "str" # Optional. Optionally, some endpoints may include a + request ID that should be provided when reporting bugs or opening support + tickets to help identify the issue. + } + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + 401: cast( + Type[HttpResponseError], + lambda response: ClientAuthenticationError(response=response), + ), + 429: HttpResponseError, + 500: HttpResponseError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop( + "content_type", _headers.pop("Content-Type", None) + ) + cls: ClsType[JSON] = kwargs.pop("cls", None) + + content_type = content_type or "application/json" + _json = None + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _json = body + + _request = build_apps_create_rollback_request( + app_id=app_id, + content_type=content_type, + json=_json, + content=_content, + headers=_headers, + params=_params, + ) + _request.url = self._client.format_url(_request.url) + + _stream = False + pipeline_response: PipelineResponse = ( + self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + ) + + response = pipeline_response.http_response + + if response.status_code not in [200, 404]: + if _stream: + response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore + raise HttpResponseError(response=response) + + response_headers = {} + if response.status_code == 200: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.content: + deserialized = response.json() + else: + deserialized = None + + if response.status_code == 404: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.content: + deserialized = response.json() + else: + deserialized = None + + if cls: + return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore + + return cast(JSON, deserialized) # type: ignore + + @overload + def validate_rollback( + self, + app_id: str, + body: JSON, + *, + content_type: str = "application/json", + **kwargs: Any, + ) -> JSON: + # pylint: disable=line-too-long + """Validate App Rollback. + + Check whether an app can be rolled back to a specific deployment. This endpoint can also be + used + to check if there are any warnings or validation conditions that will cause the rollback to + proceed + under unideal circumstances. For example, if a component must be rebuilt as part of the + rollback + causing it to take longer than usual. + + :param app_id: The app ID. Required. + :type app_id: str + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: JSON object + :rtype: JSON + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "deployment_id": "str", # Optional. The ID of the deployment to rollback to. + "skip_pin": bool # Optional. Whether to skip pinning the rollback + deployment. If false, the rollback deployment will be pinned and any new + deployments including Auto Deploy on Push hooks will be disabled until the + rollback is either manually committed or reverted via the CommitAppRollback or + RevertAppRollback endpoints respectively. If true, the rollback will be + immediately committed and the app will remain unpinned. + } + + # response body for status code(s): 200 + response == { + "error": { + "code": "str", # Optional. A code identifier that represents the + failing condition. Failing conditions: * ``incompatible_phase`` - + indicates that the deployment's phase is not suitable for rollback. * + ``incompatible_result`` - indicates that the deployment's result is not + suitable for rollback. * ``exceeded_revision_limit`` - indicates that the app + has exceeded the rollback revision limits for its tier. * ``app_pinned`` - + indicates that there is already a rollback in progress and the app is pinned. + * ``database_config_conflict`` - indicates that the deployment's database + config is different than the current config. * ``region_conflict`` - + indicates that the deployment's region differs from the current app region. + Warning conditions: * ``static_site_requires_rebuild`` - indicates that the + deployment contains at least one static site that will require a rebuild. * + ``image_source_missing_digest`` - indicates that the deployment contains at + least one component with an image source that is missing a digest. Known + values are: "incompatible_phase", "incompatible_result", + "exceeded_revision_limit", "app_pinned", "database_config_conflict", + "region_conflict", "static_site_requires_rebuild", and + "image_source_missing_digest". + "components": [ + "str" # Optional. + ], + "message": "str" # Optional. A human-readable message describing the + failing condition. + }, + "valid": bool, # Optional. Indicates whether the app can be rolled back to + the specified deployment. + "warnings": [ + { + "code": "str", # Optional. A code identifier that represents + the failing condition. Failing conditions: * ``incompatible_phase`` - + indicates that the deployment's phase is not suitable for rollback. * + ``incompatible_result`` - indicates that the deployment's result is not + suitable for rollback. * ``exceeded_revision_limit`` - indicates that the + app has exceeded the rollback revision limits for its tier. * + ``app_pinned`` - indicates that there is already a rollback in progress + and the app is pinned. * ``database_config_conflict`` - indicates that + the deployment's database config is different than the current config. * + ``region_conflict`` - indicates that the deployment's region differs from + the current app region. Warning conditions: * + ``static_site_requires_rebuild`` - indicates that the deployment contains + at least one static site that will require a rebuild. * + ``image_source_missing_digest`` - indicates that the deployment contains + at least one component with an image source that is missing a digest. + Known values are: "incompatible_phase", "incompatible_result", + "exceeded_revision_limit", "app_pinned", "database_config_conflict", + "region_conflict", "static_site_requires_rebuild", and + "image_source_missing_digest". + "components": [ + "str" # Optional. Contains a list of warnings that + may cause the rollback to run under unideal circumstances. + ], + "message": "str" # Optional. A human-readable message + describing the failing condition. + } + ] + } + # response body for status code(s): 404 + response == { + "id": "str", # A short identifier corresponding to the HTTP status code + returned. For example, the ID for a response returning a 404 status code would + be "not_found.". Required. + "message": "str", # A message providing additional information about the + error, including details to help resolve it when possible. Required. + "request_id": "str" # Optional. Optionally, some endpoints may include a + request ID that should be provided when reporting bugs or opening support + tickets to help identify the issue. + } + """ + + @overload + def validate_rollback( + self, + app_id: str, + body: IO[bytes], + *, + content_type: str = "application/json", + **kwargs: Any, + ) -> JSON: + # pylint: disable=line-too-long + """Validate App Rollback. + + Check whether an app can be rolled back to a specific deployment. This endpoint can also be + used + to check if there are any warnings or validation conditions that will cause the rollback to + proceed + under unideal circumstances. For example, if a component must be rebuilt as part of the + rollback + causing it to take longer than usual. + + :param app_id: The app ID. Required. + :type app_id: str + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: JSON object + :rtype: JSON + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 200 + response == { + "error": { + "code": "str", # Optional. A code identifier that represents the + failing condition. Failing conditions: * ``incompatible_phase`` - + indicates that the deployment's phase is not suitable for rollback. * + ``incompatible_result`` - indicates that the deployment's result is not + suitable for rollback. * ``exceeded_revision_limit`` - indicates that the app + has exceeded the rollback revision limits for its tier. * ``app_pinned`` - + indicates that there is already a rollback in progress and the app is pinned. + * ``database_config_conflict`` - indicates that the deployment's database + config is different than the current config. * ``region_conflict`` - + indicates that the deployment's region differs from the current app region. + Warning conditions: * ``static_site_requires_rebuild`` - indicates that the + deployment contains at least one static site that will require a rebuild. * + ``image_source_missing_digest`` - indicates that the deployment contains at + least one component with an image source that is missing a digest. Known + values are: "incompatible_phase", "incompatible_result", + "exceeded_revision_limit", "app_pinned", "database_config_conflict", + "region_conflict", "static_site_requires_rebuild", and + "image_source_missing_digest". + "components": [ + "str" # Optional. + ], + "message": "str" # Optional. A human-readable message describing the + failing condition. + }, + "valid": bool, # Optional. Indicates whether the app can be rolled back to + the specified deployment. + "warnings": [ + { + "code": "str", # Optional. A code identifier that represents + the failing condition. Failing conditions: * ``incompatible_phase`` - + indicates that the deployment's phase is not suitable for rollback. * + ``incompatible_result`` - indicates that the deployment's result is not + suitable for rollback. * ``exceeded_revision_limit`` - indicates that the + app has exceeded the rollback revision limits for its tier. * + ``app_pinned`` - indicates that there is already a rollback in progress + and the app is pinned. * ``database_config_conflict`` - indicates that + the deployment's database config is different than the current config. * + ``region_conflict`` - indicates that the deployment's region differs from + the current app region. Warning conditions: * + ``static_site_requires_rebuild`` - indicates that the deployment contains + at least one static site that will require a rebuild. * + ``image_source_missing_digest`` - indicates that the deployment contains + at least one component with an image source that is missing a digest. + Known values are: "incompatible_phase", "incompatible_result", + "exceeded_revision_limit", "app_pinned", "database_config_conflict", + "region_conflict", "static_site_requires_rebuild", and + "image_source_missing_digest". + "components": [ + "str" # Optional. Contains a list of warnings that + may cause the rollback to run under unideal circumstances. + ], + "message": "str" # Optional. A human-readable message + describing the failing condition. + } + ] + } + # response body for status code(s): 404 + response == { + "id": "str", # A short identifier corresponding to the HTTP status code + returned. For example, the ID for a response returning a 404 status code would + be "not_found.". Required. + "message": "str", # A message providing additional information about the + error, including details to help resolve it when possible. Required. + "request_id": "str" # Optional. Optionally, some endpoints may include a + request ID that should be provided when reporting bugs or opening support + tickets to help identify the issue. + } + """ + + @distributed_trace + def validate_rollback( + self, app_id: str, body: Union[JSON, IO[bytes]], **kwargs: Any + ) -> JSON: + # pylint: disable=line-too-long + """Validate App Rollback. + + Check whether an app can be rolled back to a specific deployment. This endpoint can also be + used + to check if there are any warnings or validation conditions that will cause the rollback to + proceed + under unideal circumstances. For example, if a component must be rebuilt as part of the + rollback + causing it to take longer than usual. + + :param app_id: The app ID. Required. + :type app_id: str + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :return: JSON object + :rtype: JSON + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "deployment_id": "str", # Optional. The ID of the deployment to rollback to. + "skip_pin": bool # Optional. Whether to skip pinning the rollback + deployment. If false, the rollback deployment will be pinned and any new + deployments including Auto Deploy on Push hooks will be disabled until the + rollback is either manually committed or reverted via the CommitAppRollback or + RevertAppRollback endpoints respectively. If true, the rollback will be + immediately committed and the app will remain unpinned. + } + + # response body for status code(s): 200 + response == { + "error": { + "code": "str", # Optional. A code identifier that represents the + failing condition. Failing conditions: * ``incompatible_phase`` - + indicates that the deployment's phase is not suitable for rollback. * + ``incompatible_result`` - indicates that the deployment's result is not + suitable for rollback. * ``exceeded_revision_limit`` - indicates that the app + has exceeded the rollback revision limits for its tier. * ``app_pinned`` - + indicates that there is already a rollback in progress and the app is pinned. + * ``database_config_conflict`` - indicates that the deployment's database + config is different than the current config. * ``region_conflict`` - + indicates that the deployment's region differs from the current app region. + Warning conditions: * ``static_site_requires_rebuild`` - indicates that the + deployment contains at least one static site that will require a rebuild. * + ``image_source_missing_digest`` - indicates that the deployment contains at + least one component with an image source that is missing a digest. Known + values are: "incompatible_phase", "incompatible_result", + "exceeded_revision_limit", "app_pinned", "database_config_conflict", + "region_conflict", "static_site_requires_rebuild", and + "image_source_missing_digest". + "components": [ + "str" # Optional. + ], + "message": "str" # Optional. A human-readable message describing the + failing condition. + }, + "valid": bool, # Optional. Indicates whether the app can be rolled back to + the specified deployment. + "warnings": [ + { + "code": "str", # Optional. A code identifier that represents + the failing condition. Failing conditions: * ``incompatible_phase`` - + indicates that the deployment's phase is not suitable for rollback. * + ``incompatible_result`` - indicates that the deployment's result is not + suitable for rollback. * ``exceeded_revision_limit`` - indicates that the + app has exceeded the rollback revision limits for its tier. * + ``app_pinned`` - indicates that there is already a rollback in progress + and the app is pinned. * ``database_config_conflict`` - indicates that + the deployment's database config is different than the current config. * + ``region_conflict`` - indicates that the deployment's region differs from + the current app region. Warning conditions: * + ``static_site_requires_rebuild`` - indicates that the deployment contains + at least one static site that will require a rebuild. * + ``image_source_missing_digest`` - indicates that the deployment contains + at least one component with an image source that is missing a digest. + Known values are: "incompatible_phase", "incompatible_result", + "exceeded_revision_limit", "app_pinned", "database_config_conflict", + "region_conflict", "static_site_requires_rebuild", and + "image_source_missing_digest". + "components": [ + "str" # Optional. Contains a list of warnings that + may cause the rollback to run under unideal circumstances. + ], + "message": "str" # Optional. A human-readable message + describing the failing condition. + } + ] + } + # response body for status code(s): 404 + response == { + "id": "str", # A short identifier corresponding to the HTTP status code + returned. For example, the ID for a response returning a 404 status code would + be "not_found.". Required. + "message": "str", # A message providing additional information about the + error, including details to help resolve it when possible. Required. + "request_id": "str" # Optional. Optionally, some endpoints may include a + request ID that should be provided when reporting bugs or opening support + tickets to help identify the issue. + } + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + 401: cast( + Type[HttpResponseError], + lambda response: ClientAuthenticationError(response=response), + ), + 429: HttpResponseError, + 500: HttpResponseError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop( + "content_type", _headers.pop("Content-Type", None) + ) + cls: ClsType[JSON] = kwargs.pop("cls", None) + + content_type = content_type or "application/json" + _json = None + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _json = body + + _request = build_apps_validate_rollback_request( + app_id=app_id, + content_type=content_type, + json=_json, + content=_content, + headers=_headers, + params=_params, + ) + _request.url = self._client.format_url(_request.url) + + _stream = False + pipeline_response: PipelineResponse = ( + self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + ) + + response = pipeline_response.http_response + + if response.status_code not in [200, 404]: + if _stream: + response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore + raise HttpResponseError(response=response) + + response_headers = {} + if response.status_code == 200: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.content: + deserialized = response.json() + else: + deserialized = None + + if response.status_code == 404: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.content: + deserialized = response.json() + else: + deserialized = None + + if cls: + return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore + + return cast(JSON, deserialized) # type: ignore + + @distributed_trace + def commit_rollback(self, app_id: str, **kwargs: Any) -> Optional[JSON]: + # pylint: disable=line-too-long + """Commit App Rollback. + + Commit an app rollback. This action permanently applies the rollback and unpins the app to + resume new deployments. + + :param app_id: The app ID. Required. + :type app_id: str + :return: JSON object or None + :rtype: JSON or None + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 404 + response == { + "id": "str", # A short identifier corresponding to the HTTP status code + returned. For example, the ID for a response returning a 404 status code would + be "not_found.". Required. + "message": "str", # A message providing additional information about the + error, including details to help resolve it when possible. Required. + "request_id": "str" # Optional. Optionally, some endpoints may include a + request ID that should be provided when reporting bugs or opening support + tickets to help identify the issue. + } + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + 401: cast( + Type[HttpResponseError], + lambda response: ClientAuthenticationError(response=response), + ), + 429: HttpResponseError, + 500: HttpResponseError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[Optional[JSON]] = kwargs.pop("cls", None) + + _request = build_apps_commit_rollback_request( + app_id=app_id, + headers=_headers, + params=_params, + ) + _request.url = self._client.format_url(_request.url) + + _stream = False + pipeline_response: PipelineResponse = ( + self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + ) + + response = pipeline_response.http_response + + if response.status_code not in [200, 404]: + if _stream: + response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore + raise HttpResponseError(response=response) + + deserialized = None + response_headers = {} + if response.status_code == 200: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.status_code == 404: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.content: + deserialized = response.json() + else: + deserialized = None + + if cls: + return cls(pipeline_response, deserialized, response_headers) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + def revert_rollback(self, app_id: str, **kwargs: Any) -> JSON: + # pylint: disable=line-too-long + """Revert App Rollback. + + Revert an app rollback. This action reverts the active rollback by creating a new deployment + from the + latest app spec prior to the rollback and unpins the app to resume new deployments. + + :param app_id: The app ID. Required. + :type app_id: str + :return: JSON object + :rtype: JSON + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 200 + response == { + "deployment": { + "cause": "str", # Optional. What caused this deployment to be + created. + "cloned_from": "str", # Optional. The ID of a previous deployment + that this deployment was cloned from. + "created_at": "2020-02-20 00:00:00", # Optional. The creation time + of the deployment. + "functions": [ + { + "name": "str", # Optional. The name of this + functions component. + "namespace": "str", # Optional. The namespace where + the functions are deployed. + "source_commit_hash": "str" # Optional. The commit + hash of the repository that was used to build this functions + component. + } + ], + "id": "str", # Optional. The ID of the deployment. + "jobs": [ + { + "name": "str", # Optional. The name of this job. + "source_commit_hash": "str" # Optional. The commit + hash of the repository that was used to build this job. + } + ], + "phase": "UNKNOWN", # Optional. Default value is "UNKNOWN". Known + values are: "UNKNOWN", "PENDING_BUILD", "BUILDING", "PENDING_DEPLOY", + "DEPLOYING", "ACTIVE", "SUPERSEDED", "ERROR", and "CANCELED". + "phase_last_updated_at": "2020-02-20 00:00:00", # Optional. When the + deployment phase was last updated. + "progress": { + "error_steps": 0, # Optional. Number of unsuccessful steps. + "pending_steps": 0, # Optional. Number of pending steps. + "running_steps": 0, # Optional. Number of currently running + steps. + "steps": [ + { + "component_name": "str", # Optional. The + component name that this step is associated with. + "ended_at": "2020-02-20 00:00:00", # + Optional. The end time of this step. + "message_base": "str", # Optional. The base + of a human-readable description of the step intended to be + combined with the component name for presentation. For example: + ``message_base`` = "Building service" ``component_name`` = "api". + "name": "str", # Optional. The name of this + step. + "reason": { + "code": "str", # Optional. The error + code. + "message": "str" # Optional. The + error message. + }, + "started_at": "2020-02-20 00:00:00", # + Optional. The start time of this step. + "status": "UNKNOWN", # Optional. Default + value is "UNKNOWN". Known values are: "UNKNOWN", "PENDING", + "RUNNING", "ERROR", and "SUCCESS". + "steps": [ + {} # Optional. Child steps of this + step. + ] + } + ], + "success_steps": 0, # Optional. Number of successful steps. + "summary_steps": [ + { + "component_name": "str", # Optional. The + component name that this step is associated with. + "ended_at": "2020-02-20 00:00:00", # + Optional. The end time of this step. + "message_base": "str", # Optional. The base + of a human-readable description of the step intended to be + combined with the component name for presentation. For example: + ``message_base`` = "Building service" ``component_name`` = "api". + "name": "str", # Optional. The name of this + step. + "reason": { + "code": "str", # Optional. The error + code. + "message": "str" # Optional. The + error message. + }, + "started_at": "2020-02-20 00:00:00", # + Optional. The start time of this step. + "status": "UNKNOWN", # Optional. Default + value is "UNKNOWN". Known values are: "UNKNOWN", "PENDING", + "RUNNING", "ERROR", and "SUCCESS". + "steps": [ + {} # Optional. Child steps of this + step. + ] + } + ], + "total_steps": 0 # Optional. Total number of steps. + }, + "services": [ + { + "name": "str", # Optional. The name of this service. + "source_commit_hash": "str" # Optional. The commit + hash of the repository that was used to build this service. + } + ], + "spec": { + "name": "str", # The name of the app. Must be unique across + all apps in the same account. Required. + "databases": [ + { + "name": "str", # The database's name. The + name must be unique across all components within the same app and + cannot use capital letters. Required. + "cluster_name": "str", # Optional. The name + of the underlying DigitalOcean DBaaS cluster. This is required + for production databases. For dev databases, if cluster_name is + not set, a new cluster will be provisioned. + "db_name": "str", # Optional. The name of + the MySQL or PostgreSQL database to configure. + "db_user": "str", # Optional. The name of + the MySQL or PostgreSQL user to configure. + "engine": "UNSET", # Optional. Default value + is "UNSET". * MYSQL: MySQL * PG: PostgreSQL * REDIS: Caching * + MONGODB: MongoDB * KAFKA: Kafka * OPENSEARCH: OpenSearch * + VALKEY: ValKey. Known values are: "UNSET", "MYSQL", "PG", + "REDIS", "MONGODB", "KAFKA", "OPENSEARCH", and "VALKEY". + "production": bool, # Optional. Whether this + is a production or dev database. + "version": "str" # Optional. The version of + the database engine. + } + ], + "disable_edge_cache": False, # Optional. Default value is + False. .. role:: raw-html-m2r(raw) :format: html If set to + ``true``"" , the app will **not** be cached at the edge (CDN). Enable + this option if you want to manage CDN configuration yourself"u2014whether + by using an external CDN provider or by handling static content and + caching within your app. This setting is also recommended for apps that + require real-time data or serve dynamic content, such as those using + Server-Sent Events (SSE) over GET, or hosting an MCP (Model Context + Protocol) Server that utilizes SSE."" :raw-html-m2r:`
` **Note:** This + feature is not available for static site components."" + :raw-html-m2r:`
` For more information, see `Disable CDN Cache + `_. + "disable_email_obfuscation": False, # Optional. Default + value is False. If set to ``true``"" , email addresses in the app will + not be obfuscated. This is useful for apps that require email addresses + to be visible (in the HTML markup). + "domains": [ + { + "domain": "str", # The hostname for the + domain. Required. + "minimum_tls_version": "str", # Optional. + The minimum version of TLS a client application can use to access + resources for the domain. Must be one of the following values + wrapped within quotations: ``"1.2"`` or ``"1.3"``. Known values + are: "1.2" and "1.3". + "type": "UNSPECIFIED", # Optional. Default + value is "UNSPECIFIED". * DEFAULT: The default + ``.ondigitalocean.app`` domain assigned to this app * PRIMARY: + The primary domain for this app that is displayed as the default + in the control panel, used in bindable environment variables, and + any other places that reference an app's live URL. Only one + domain may be set as primary. * ALIAS: A non-primary domain. + Known values are: "UNSPECIFIED", "DEFAULT", "PRIMARY", and + "ALIAS". + "wildcard": bool, # Optional. Indicates + whether the domain includes all sub-domains, in addition to the + given domain. + "zone": "str" # Optional. Optional. If the + domain uses DigitalOcean DNS and you would like App Platform to + automatically manage it for you, set this to the name of the + domain on your account. For example, If the domain you are + adding is ``app.domain.com``"" , the zone could be + ``domain.com``. + } + ], + "egress": { + "type": "AUTOASSIGN" # Optional. Default value is + "AUTOASSIGN". The app egress type. Known values are: "AUTOASSIGN" and + "DEDICATED_IP". + }, + "enhanced_threat_control_enabled": False, # Optional. + Default value is False. If set to ``true``"" , suspicious requests will + go through additional security checks to help mitigate layer 7 DDoS + attacks. + "functions": [ + { + "name": "str", # The name. Must be unique + across all components within the same app. Required. + "alerts": [ + { + "disabled": bool, # + Optional. Is the alert disabled?. + "operator": + "UNSPECIFIED_OPERATOR", # Optional. Default value is + "UNSPECIFIED_OPERATOR". Known values are: + "UNSPECIFIED_OPERATOR", "GREATER_THAN", and "LESS_THAN". + "rule": "UNSPECIFIED_RULE", + # Optional. Default value is "UNSPECIFIED_RULE". Known + values are: "UNSPECIFIED_RULE", "CPU_UTILIZATION", + "MEM_UTILIZATION", "RESTART_COUNT", "DEPLOYMENT_FAILED", + "DEPLOYMENT_LIVE", "DOMAIN_FAILED", "DOMAIN_LIVE", + "AUTOSCALE_FAILED", "AUTOSCALE_SUCCEEDED", + "FUNCTIONS_ACTIVATION_COUNT", + "FUNCTIONS_AVERAGE_DURATION_MS", + "FUNCTIONS_ERROR_RATE_PER_MINUTE", + "FUNCTIONS_AVERAGE_WAIT_TIME_MS", + "FUNCTIONS_ERROR_COUNT", and + "FUNCTIONS_GB_RATE_PER_SECOND". + "value": 0.0, # Optional. + Threshold value for alert. + "window": + "UNSPECIFIED_WINDOW" # Optional. Default value is + "UNSPECIFIED_WINDOW". Known values are: + "UNSPECIFIED_WINDOW", "FIVE_MINUTES", "TEN_MINUTES", + "THIRTY_MINUTES", and "ONE_HOUR". + } + ], + "bitbucket": { + "branch": "str", # Optional. The + name of the branch to use. + "deploy_on_push": bool, # Optional. + Whether to automatically deploy new commits made to the repo. + "repo": "str" # Optional. The name + of the repo in the format owner/repo. Example: + ``digitalocean/sample-golang``. + }, + "cors": { + "allow_credentials": bool, # + Optional. Whether browsers should expose the response to the + client-side JavaScript code when the request"u2019s + credentials mode is include. This configures the + ``Access-Control-Allow-Credentials`` header. + "allow_headers": [ + "str" # Optional. The set of + allowed HTTP request headers. This configures the + ``Access-Control-Allow-Headers`` header. + ], + "allow_methods": [ + "str" # Optional. The set of + allowed HTTP methods. This configures the + ``Access-Control-Allow-Methods`` header. + ], + "allow_origins": [ + { + "exact": "str", # + Optional. Exact string match. Only 1 of ``exact``"" , + ``prefix``"" , or ``regex`` must be set. + "prefix": "str", # + Optional. Prefix-based match. Only 1 of ``exact``"" , + ``prefix``"" , or ``regex`` must be set. + "regex": "str" # + Optional. RE2 style regex-based match. Only 1 of + ``exact``"" , ``prefix``"" , or ``regex`` must be + set. For more information about RE2 syntax, see: + https://github.com/google/re2/wiki/Syntax. + } + ], + "expose_headers": [ + "str" # Optional. The set of + HTTP response headers that browsers are allowed to + access. This configures the + ``Access-Control-Expose-Headers`` header. + ], + "max_age": "str" # Optional. An + optional duration specifying how long browsers can cache the + results of a preflight request. This configures the + ``Access-Control-Max-Age`` header. + }, + "envs": [ + { + "key": "str", # The variable + name. Required. + "scope": + "RUN_AND_BUILD_TIME", # Optional. Default value is + "RUN_AND_BUILD_TIME". * RUN_TIME: Made available only at + run-time * BUILD_TIME: Made available only at build-time + * RUN_AND_BUILD_TIME: Made available at both build and + run-time. Known values are: "UNSET", "RUN_TIME", + "BUILD_TIME", and "RUN_AND_BUILD_TIME". + "type": "GENERAL", # + Optional. Default value is "GENERAL". * GENERAL: A + plain-text environment variable * SECRET: A secret + encrypted environment variable. Known values are: + "GENERAL" and "SECRET". + "value": "str" # Optional. + The value. If the type is ``SECRET``"" , the value will + be encrypted on first submission. On following + submissions, the encrypted value should be used. + } + ], + "git": { + "branch": "str", # Optional. The + name of the branch to use. + "repo_clone_url": "str" # Optional. + The clone URL of the repo. Example: + ``https://github.com/digitalocean/sample-golang.git``. + }, + "github": { + "branch": "str", # Optional. The + name of the branch to use. + "deploy_on_push": bool, # Optional. + Whether to automatically deploy new commits made to the repo. + "repo": "str" # Optional. The name + of the repo in the format owner/repo. Example: + ``digitalocean/sample-golang``. + }, + "gitlab": { + "branch": "str", # Optional. The + name of the branch to use. + "deploy_on_push": bool, # Optional. + Whether to automatically deploy new commits made to the repo. + "repo": "str" # Optional. The name + of the repo in the format owner/repo. Example: + ``digitalocean/sample-golang``. + }, + "log_destinations": [ + { + "name": "str", # Required. + "datadog": { + "api_key": "str", # + Datadog API key. Required. + "endpoint": "str" # + Optional. Datadog HTTP log intake endpoint. + }, + "logtail": { + "token": "str" # + Optional. Logtail token. + }, + "open_search": { + "basic_auth": { + "password": + "str", # Optional. Password for user defined in + User. Is required when ``endpoint`` is set. + Cannot be set if using a DigitalOcean DBaaS + OpenSearch cluster. + "user": "str" + # Optional. Username to authenticate with. Only + required when ``endpoint`` is set. Defaults to + ``doadmin`` when ``cluster_name`` is set. + }, + "cluster_name": + "str", # Optional. The name of a DigitalOcean DBaaS + OpenSearch cluster to use as a log forwarding + destination. Cannot be specified if ``endpoint`` is + also specified. + "endpoint": "str", # + Optional. OpenSearch API Endpoint. Only HTTPS is + supported. Format: + https://:code:``::code:``. Cannot be + specified if ``cluster_name`` is also specified. + "index_name": "logs" + # Optional. Default value is "logs". The index name + to use for the logs. If not set, the default index + name is "logs". + }, + "papertrail": { + "endpoint": "str" # + Papertrail syslog endpoint. Required. + } + } + ], + "routes": [ + { + "path": "str", # Optional. + (Deprecated - Use Ingress Rules instead). An HTTP path + prefix. Paths must start with / and must be unique across + all components within an app. + "preserve_path_prefix": bool + # Optional. An optional flag to preserve the path that is + forwarded to the backend service. By default, the HTTP + request path will be trimmed from the left when forwarded + to the component. For example, a component with + ``path=/api`` will have requests to ``/api/list`` trimmed + to ``/list``. If this value is ``true``"" , the path will + remain ``/api/list``. + } + ], + "source_dir": "str" # Optional. An optional + path to the working directory to use for the build. For + Dockerfile builds, this will be used as the build context. Must + be relative to the root of the repo. + } + ], + "ingress": { + "rules": [ + { + "component": { + "name": "str", # The name of + the component to route to. Required. + "preserve_path_prefix": + "str", # Optional. An optional flag to preserve the path + that is forwarded to the backend service. By default, the + HTTP request path will be trimmed from the left when + forwarded to the component. For example, a component with + ``path=/api`` will have requests to ``/api/list`` trimmed + to ``/list``. If this value is ``true``"" , the path will + remain ``/api/list``. Note: this is not applicable for + Functions Components and is mutually exclusive with + ``rewrite``. + "rewrite": "str" # Optional. + An optional field that will rewrite the path of the + component to be what is specified here. By default, the + HTTP request path will be trimmed from the left when + forwarded to the component. For example, a component with + ``path=/api`` will have requests to ``/api/list`` trimmed + to ``/list``. If you specified the rewrite to be + ``/v1/``"" , requests to ``/api/list`` would be rewritten + to ``/v1/list``. Note: this is mutually exclusive with + ``preserve_path_prefix``. + }, + "cors": { + "allow_credentials": bool, # + Optional. Whether browsers should expose the response to + the client-side JavaScript code when the request"u2019s + credentials mode is include. This configures the + ``Access-Control-Allow-Credentials`` header. + "allow_headers": [ + "str" # Optional. + The set of allowed HTTP request headers. This + configures the ``Access-Control-Allow-Headers`` + header. + ], + "allow_methods": [ + "str" # Optional. + The set of allowed HTTP methods. This configures the + ``Access-Control-Allow-Methods`` header. + ], + "allow_origins": [ + { + "exact": + "str", # Optional. Exact string match. Only 1 of + ``exact``"" , ``prefix``"" , or ``regex`` must be + set. + "prefix": + "str", # Optional. Prefix-based match. Only 1 of + ``exact``"" , ``prefix``"" , or ``regex`` must be + set. + "regex": + "str" # Optional. RE2 style regex-based match. + Only 1 of ``exact``"" , ``prefix``"" , or + ``regex`` must be set. For more information about + RE2 syntax, see: + https://github.com/google/re2/wiki/Syntax. + } + ], + "expose_headers": [ + "str" # Optional. + The set of HTTP response headers that browsers are + allowed to access. This configures the + ``Access-Control-Expose-Headers`` header. + ], + "max_age": "str" # Optional. + An optional duration specifying how long browsers can + cache the results of a preflight request. This configures + the ``Access-Control-Max-Age`` header. + }, + "match": { + "authority": { + "exact": "str" # + Required. + }, + "path": { + "prefix": "str" # + Prefix-based match. For example, ``/api`` will match + ``/api``"" , ``/api/``"" , and any nested paths such + as ``/api/v1/endpoint``. Required. + } + }, + "redirect": { + "authority": "str", # + Optional. The authority/host to redirect to. This can be + a hostname or IP address. Note: use ``port`` to set the + port. + "port": 0, # Optional. The + port to redirect to. + "redirect_code": 0, # + Optional. The redirect code to use. Defaults to ``302``. + Supported values are 300, 301, 302, 303, 304, 307, 308. + "scheme": "str", # Optional. + The scheme to redirect to. Supported values are ``http`` + or ``https``. Default: ``https``. + "uri": "str" # Optional. An + optional URI path to redirect to. Note: if this is + specified the whole URI of the original request will be + overwritten to this value, irrespective of the original + request URI being matched. + } + } + ] + }, + "jobs": [ + { + "autoscaling": { + "max_instance_count": 0, # Optional. + The maximum amount of instances for this component. Must be + more than min_instance_count. + "metrics": { + "cpu": { + "percent": 80 # + Optional. Default value is 80. The average target CPU + utilization for the component. + } + }, + "min_instance_count": 0 # Optional. + The minimum amount of instances for this component. Must be + less than max_instance_count. + }, + "bitbucket": { + "branch": "str", # Optional. The + name of the branch to use. + "deploy_on_push": bool, # Optional. + Whether to automatically deploy new commits made to the repo. + "repo": "str" # Optional. The name + of the repo in the format owner/repo. Example: + ``digitalocean/sample-golang``. + }, + "build_command": "str", # Optional. An + optional build command to run while building this component from + source. + "dockerfile_path": "str", # Optional. The + path to the Dockerfile relative to the root of the repo. If set, + it will be used to build this component. Otherwise, App Platform + will attempt to build it using buildpacks. + "environment_slug": "str", # Optional. An + environment slug describing the type of this app. For a full + list, please refer to `the product documentation + `_. + "envs": [ + { + "key": "str", # The variable + name. Required. + "scope": + "RUN_AND_BUILD_TIME", # Optional. Default value is + "RUN_AND_BUILD_TIME". * RUN_TIME: Made available only at + run-time * BUILD_TIME: Made available only at build-time + * RUN_AND_BUILD_TIME: Made available at both build and + run-time. Known values are: "UNSET", "RUN_TIME", + "BUILD_TIME", and "RUN_AND_BUILD_TIME". + "type": "GENERAL", # + Optional. Default value is "GENERAL". * GENERAL: A + plain-text environment variable * SECRET: A secret + encrypted environment variable. Known values are: + "GENERAL" and "SECRET". + "value": "str" # Optional. + The value. If the type is ``SECRET``"" , the value will + be encrypted on first submission. On following + submissions, the encrypted value should be used. + } + ], + "git": { + "branch": "str", # Optional. The + name of the branch to use. + "repo_clone_url": "str" # Optional. + The clone URL of the repo. Example: + ``https://github.com/digitalocean/sample-golang.git``. + }, + "github": { + "branch": "str", # Optional. The + name of the branch to use. + "deploy_on_push": bool, # Optional. + Whether to automatically deploy new commits made to the repo. + "repo": "str" # Optional. The name + of the repo in the format owner/repo. Example: + ``digitalocean/sample-golang``. + }, + "gitlab": { + "branch": "str", # Optional. The + name of the branch to use. + "deploy_on_push": bool, # Optional. + Whether to automatically deploy new commits made to the repo. + "repo": "str" # Optional. The name + of the repo in the format owner/repo. Example: + ``digitalocean/sample-golang``. + }, + "image": { + "deploy_on_push": { + "enabled": bool # Optional. + Whether to automatically deploy new images. Can only be + used for images hosted in DOCR and can only be used with + an image tag, not a specific digest. + }, + "digest": "str", # Optional. The + image digest. Cannot be specified if tag is provided. + "registry": "str", # Optional. The + registry name. Must be left empty for the ``DOCR`` registry + type. + "registry_credentials": "str", # + Optional. The credentials to be able to pull the image. The + value will be encrypted on first submission. On following + submissions, the encrypted value should be used. * + "$username:$access_token" for registries of type + ``DOCKER_HUB``. * "$username:$access_token" for registries of + type ``GHCR``. + "registry_type": "str", # Optional. + * DOCKER_HUB: The DockerHub container registry type. * DOCR: + The DigitalOcean container registry type. * GHCR: The Github + container registry type. Known values are: "DOCKER_HUB", + "DOCR", and "GHCR". + "repository": "str", # Optional. The + repository name. + "tag": "latest" # Optional. Default + value is "latest". The repository tag. Defaults to ``latest`` + if not provided and no digest is provided. Cannot be + specified if digest is provided. + }, + "instance_count": 1, # Optional. Default + value is 1. The amount of instances that this component should be + scaled to. Default: 1. Must not be set if autoscaling is used. + "instance_size_slug": {}, + "kind": "UNSPECIFIED", # Optional. Default + value is "UNSPECIFIED". * UNSPECIFIED: Default job type, will + auto-complete to POST_DEPLOY kind. * PRE_DEPLOY: Indicates a job + that runs before an app deployment. * POST_DEPLOY: Indicates a + job that runs after an app deployment. * FAILED_DEPLOY: Indicates + a job that runs after a component fails to deploy. Known values + are: "UNSPECIFIED", "PRE_DEPLOY", "POST_DEPLOY", and + "FAILED_DEPLOY". + "log_destinations": [ + { + "name": "str", # Required. + "datadog": { + "api_key": "str", # + Datadog API key. Required. + "endpoint": "str" # + Optional. Datadog HTTP log intake endpoint. + }, + "logtail": { + "token": "str" # + Optional. Logtail token. + }, + "open_search": { + "basic_auth": { + "password": + "str", # Optional. Password for user defined in + User. Is required when ``endpoint`` is set. + Cannot be set if using a DigitalOcean DBaaS + OpenSearch cluster. + "user": "str" + # Optional. Username to authenticate with. Only + required when ``endpoint`` is set. Defaults to + ``doadmin`` when ``cluster_name`` is set. + }, + "cluster_name": + "str", # Optional. The name of a DigitalOcean DBaaS + OpenSearch cluster to use as a log forwarding + destination. Cannot be specified if ``endpoint`` is + also specified. + "endpoint": "str", # + Optional. OpenSearch API Endpoint. Only HTTPS is + supported. Format: + https://:code:``::code:``. Cannot be + specified if ``cluster_name`` is also specified. + "index_name": "logs" + # Optional. Default value is "logs". The index name + to use for the logs. If not set, the default index + name is "logs". + }, + "papertrail": { + "endpoint": "str" # + Papertrail syslog endpoint. Required. + } + } + ], + "name": "str", # Optional. The name. Must be + unique across all components within the same app. + "run_command": "str", # Optional. An + optional run command to override the component's default. + "source_dir": "str", # Optional. An optional + path to the working directory to use for the build. For + Dockerfile builds, this will be used as the build context. Must + be relative to the root of the repo. + "termination": { + "grace_period_seconds": 0 # + Optional. The number of seconds to wait between sending a + TERM signal to a container and issuing a KILL which causes + immediate shutdown. (Default 120). + } + } + ], + "maintenance": { + "archive": bool, # Optional. Indicates whether the + app should be archived. Setting this to true implies that enabled is + set to true. + "enabled": bool, # Optional. Indicates whether + maintenance mode should be enabled for the app. + "offline_page_url": "str" # Optional. A custom + offline page to display when maintenance mode is enabled or the app + is archived. + }, + "region": "str", # Optional. The slug form of the + geographical origin of the app. Default: ``nearest available``. Known + values are: "atl", "nyc", "sfo", "tor", "ams", "fra", "lon", "blr", + "sgp", and "syd". + "services": [ + { + "autoscaling": { + "max_instance_count": 0, # Optional. + The maximum amount of instances for this component. Must be + more than min_instance_count. + "metrics": { + "cpu": { + "percent": 80 # + Optional. Default value is 80. The average target CPU + utilization for the component. + } + }, + "min_instance_count": 0 # Optional. + The minimum amount of instances for this component. Must be + less than max_instance_count. + }, + "bitbucket": { + "branch": "str", # Optional. The + name of the branch to use. + "deploy_on_push": bool, # Optional. + Whether to automatically deploy new commits made to the repo. + "repo": "str" # Optional. The name + of the repo in the format owner/repo. Example: + ``digitalocean/sample-golang``. + }, + "build_command": "str", # Optional. An + optional build command to run while building this component from + source. + "cors": { + "allow_credentials": bool, # + Optional. Whether browsers should expose the response to the + client-side JavaScript code when the request"u2019s + credentials mode is include. This configures the + ``Access-Control-Allow-Credentials`` header. + "allow_headers": [ + "str" # Optional. The set of + allowed HTTP request headers. This configures the + ``Access-Control-Allow-Headers`` header. + ], + "allow_methods": [ + "str" # Optional. The set of + allowed HTTP methods. This configures the + ``Access-Control-Allow-Methods`` header. + ], + "allow_origins": [ + { + "exact": "str", # + Optional. Exact string match. Only 1 of ``exact``"" , + ``prefix``"" , or ``regex`` must be set. + "prefix": "str", # + Optional. Prefix-based match. Only 1 of ``exact``"" , + ``prefix``"" , or ``regex`` must be set. + "regex": "str" # + Optional. RE2 style regex-based match. Only 1 of + ``exact``"" , ``prefix``"" , or ``regex`` must be + set. For more information about RE2 syntax, see: + https://github.com/google/re2/wiki/Syntax. + } + ], + "expose_headers": [ + "str" # Optional. The set of + HTTP response headers that browsers are allowed to + access. This configures the + ``Access-Control-Expose-Headers`` header. + ], + "max_age": "str" # Optional. An + optional duration specifying how long browsers can cache the + results of a preflight request. This configures the + ``Access-Control-Max-Age`` header. + }, + "dockerfile_path": "str", # Optional. The + path to the Dockerfile relative to the root of the repo. If set, + it will be used to build this component. Otherwise, App Platform + will attempt to build it using buildpacks. + "environment_slug": "str", # Optional. An + environment slug describing the type of this app. For a full + list, please refer to `the product documentation + `_. + "envs": [ + { + "key": "str", # The variable + name. Required. + "scope": + "RUN_AND_BUILD_TIME", # Optional. Default value is + "RUN_AND_BUILD_TIME". * RUN_TIME: Made available only at + run-time * BUILD_TIME: Made available only at build-time + * RUN_AND_BUILD_TIME: Made available at both build and + run-time. Known values are: "UNSET", "RUN_TIME", + "BUILD_TIME", and "RUN_AND_BUILD_TIME". + "type": "GENERAL", # + Optional. Default value is "GENERAL". * GENERAL: A + plain-text environment variable * SECRET: A secret + encrypted environment variable. Known values are: + "GENERAL" and "SECRET". + "value": "str" # Optional. + The value. If the type is ``SECRET``"" , the value will + be encrypted on first submission. On following + submissions, the encrypted value should be used. + } + ], + "git": { + "branch": "str", # Optional. The + name of the branch to use. + "repo_clone_url": "str" # Optional. + The clone URL of the repo. Example: + ``https://github.com/digitalocean/sample-golang.git``. + }, + "github": { + "branch": "str", # Optional. The + name of the branch to use. + "deploy_on_push": bool, # Optional. + Whether to automatically deploy new commits made to the repo. + "repo": "str" # Optional. The name + of the repo in the format owner/repo. Example: + ``digitalocean/sample-golang``. + }, + "gitlab": { + "branch": "str", # Optional. The + name of the branch to use. + "deploy_on_push": bool, # Optional. + Whether to automatically deploy new commits made to the repo. + "repo": "str" # Optional. The name + of the repo in the format owner/repo. Example: + ``digitalocean/sample-golang``. + }, + "health_check": { + "failure_threshold": 0, # Optional. + The number of failed health checks before considered + unhealthy. + "http_path": "str", # Optional. The + route path used for the HTTP health check ping. If not set, + the HTTP health check will be disabled and a TCP health check + used instead. + "initial_delay_seconds": 0, # + Optional. The number of seconds to wait before beginning + health checks. + "period_seconds": 0, # Optional. The + number of seconds to wait between health checks. + "port": 0, # Optional. The port on + which the health check will be performed. If not set, the + health check will be performed on the component's http_port. + "success_threshold": 0, # Optional. + The number of successful health checks before considered + healthy. + "timeout_seconds": 0 # Optional. The + number of seconds after which the check times out. + }, + "http_port": 0, # Optional. The internal + port on which this service's run command will listen. Default: + 8080 If there is not an environment variable with the name + ``PORT``"" , one will be automatically added with its value set + to the value of this field. + "image": { + "deploy_on_push": { + "enabled": bool # Optional. + Whether to automatically deploy new images. Can only be + used for images hosted in DOCR and can only be used with + an image tag, not a specific digest. + }, + "digest": "str", # Optional. The + image digest. Cannot be specified if tag is provided. + "registry": "str", # Optional. The + registry name. Must be left empty for the ``DOCR`` registry + type. + "registry_credentials": "str", # + Optional. The credentials to be able to pull the image. The + value will be encrypted on first submission. On following + submissions, the encrypted value should be used. * + "$username:$access_token" for registries of type + ``DOCKER_HUB``. * "$username:$access_token" for registries of + type ``GHCR``. + "registry_type": "str", # Optional. + * DOCKER_HUB: The DockerHub container registry type. * DOCR: + The DigitalOcean container registry type. * GHCR: The Github + container registry type. Known values are: "DOCKER_HUB", + "DOCR", and "GHCR". + "repository": "str", # Optional. The + repository name. + "tag": "latest" # Optional. Default + value is "latest". The repository tag. Defaults to ``latest`` + if not provided and no digest is provided. Cannot be + specified if digest is provided. + }, + "instance_count": 1, # Optional. Default + value is 1. The amount of instances that this component should be + scaled to. Default: 1. Must not be set if autoscaling is used. + "instance_size_slug": {}, + "internal_ports": [ + 0 # Optional. The ports on which + this service will listen for internal traffic. + ], + "liveness_health_check": { + "failure_threshold": 0, # Optional. + The number of failed health checks before considered + unhealthy. + "http_path": "str", # Optional. The + route path used for the HTTP health check ping. If not set, + the HTTP health check will be disabled and a TCP health check + used instead. + "initial_delay_seconds": 0, # + Optional. The number of seconds to wait before beginning + health checks. + "period_seconds": 0, # Optional. The + number of seconds to wait between health checks. + "port": 0, # Optional. The port on + which the health check will be performed. + "success_threshold": 0, # Optional. + The number of successful health checks before considered + healthy. + "timeout_seconds": 0 # Optional. The + number of seconds after which the check times out. + }, + "log_destinations": [ + { + "name": "str", # Required. + "datadog": { + "api_key": "str", # + Datadog API key. Required. + "endpoint": "str" # + Optional. Datadog HTTP log intake endpoint. + }, + "logtail": { + "token": "str" # + Optional. Logtail token. + }, + "open_search": { + "basic_auth": { + "password": + "str", # Optional. Password for user defined in + User. Is required when ``endpoint`` is set. + Cannot be set if using a DigitalOcean DBaaS + OpenSearch cluster. + "user": "str" + # Optional. Username to authenticate with. Only + required when ``endpoint`` is set. Defaults to + ``doadmin`` when ``cluster_name`` is set. + }, + "cluster_name": + "str", # Optional. The name of a DigitalOcean DBaaS + OpenSearch cluster to use as a log forwarding + destination. Cannot be specified if ``endpoint`` is + also specified. + "endpoint": "str", # + Optional. OpenSearch API Endpoint. Only HTTPS is + supported. Format: + https://:code:``::code:``. Cannot be + specified if ``cluster_name`` is also specified. + "index_name": "logs" + # Optional. Default value is "logs". The index name + to use for the logs. If not set, the default index + name is "logs". + }, + "papertrail": { + "endpoint": "str" # + Papertrail syslog endpoint. Required. + } + } + ], + "name": "str", # Optional. The name. Must be + unique across all components within the same app. + "protocol": "str", # Optional. The protocol + which the service uses to serve traffic on the http_port. * + ``HTTP``"" : The app is serving the HTTP protocol. Default. * + ``HTTP2``"" : The app is serving the HTTP/2 protocol. Currently, + this needs to be implemented in the service by serving HTTP/2 + cleartext (h2c). Known values are: "HTTP" and "HTTP2". + "routes": [ + { + "path": "str", # Optional. + (Deprecated - Use Ingress Rules instead). An HTTP path + prefix. Paths must start with / and must be unique across + all components within an app. + "preserve_path_prefix": bool + # Optional. An optional flag to preserve the path that is + forwarded to the backend service. By default, the HTTP + request path will be trimmed from the left when forwarded + to the component. For example, a component with + ``path=/api`` will have requests to ``/api/list`` trimmed + to ``/list``. If this value is ``true``"" , the path will + remain ``/api/list``. + } + ], + "run_command": "str", # Optional. An + optional run command to override the component's default. + "source_dir": "str", # Optional. An optional + path to the working directory to use for the build. For + Dockerfile builds, this will be used as the build context. Must + be relative to the root of the repo. + "termination": { + "drain_seconds": 0, # Optional. The + number of seconds to wait between selecting a container + instance for termination and issuing the TERM signal. + Selecting a container instance for termination begins an + asynchronous drain of new requests on upstream + load-balancers. (Default 15). + "grace_period_seconds": 0 # + Optional. The number of seconds to wait between sending a + TERM signal to a container and issuing a KILL which causes + immediate shutdown. (Default 120). + } + } + ], + "static_sites": [ + { + "bitbucket": { + "branch": "str", # Optional. The + name of the branch to use. + "deploy_on_push": bool, # Optional. + Whether to automatically deploy new commits made to the repo. + "repo": "str" # Optional. The name + of the repo in the format owner/repo. Example: + ``digitalocean/sample-golang``. + }, + "build_command": "str", # Optional. An + optional build command to run while building this component from + source. + "catchall_document": "str", # Optional. The + name of the document to use as the fallback for any requests to + documents that are not found when serving this static site. Only + 1 of ``catchall_document`` or ``error_document`` can be set. + "cors": { + "allow_credentials": bool, # + Optional. Whether browsers should expose the response to the + client-side JavaScript code when the request"u2019s + credentials mode is include. This configures the + ``Access-Control-Allow-Credentials`` header. + "allow_headers": [ + "str" # Optional. The set of + allowed HTTP request headers. This configures the + ``Access-Control-Allow-Headers`` header. + ], + "allow_methods": [ + "str" # Optional. The set of + allowed HTTP methods. This configures the + ``Access-Control-Allow-Methods`` header. + ], + "allow_origins": [ + { + "exact": "str", # + Optional. Exact string match. Only 1 of ``exact``"" , + ``prefix``"" , or ``regex`` must be set. + "prefix": "str", # + Optional. Prefix-based match. Only 1 of ``exact``"" , + ``prefix``"" , or ``regex`` must be set. + "regex": "str" # + Optional. RE2 style regex-based match. Only 1 of + ``exact``"" , ``prefix``"" , or ``regex`` must be + set. For more information about RE2 syntax, see: + https://github.com/google/re2/wiki/Syntax. + } + ], + "expose_headers": [ + "str" # Optional. The set of + HTTP response headers that browsers are allowed to + access. This configures the + ``Access-Control-Expose-Headers`` header. + ], + "max_age": "str" # Optional. An + optional duration specifying how long browsers can cache the + results of a preflight request. This configures the + ``Access-Control-Max-Age`` header. + }, + "dockerfile_path": "str", # Optional. The + path to the Dockerfile relative to the root of the repo. If set, + it will be used to build this component. Otherwise, App Platform + will attempt to build it using buildpacks. + "environment_slug": "str", # Optional. An + environment slug describing the type of this app. For a full + list, please refer to `the product documentation + `_. + "envs": [ + { + "key": "str", # The variable + name. Required. + "scope": + "RUN_AND_BUILD_TIME", # Optional. Default value is + "RUN_AND_BUILD_TIME". * RUN_TIME: Made available only at + run-time * BUILD_TIME: Made available only at build-time + * RUN_AND_BUILD_TIME: Made available at both build and + run-time. Known values are: "UNSET", "RUN_TIME", + "BUILD_TIME", and "RUN_AND_BUILD_TIME". + "type": "GENERAL", # + Optional. Default value is "GENERAL". * GENERAL: A + plain-text environment variable * SECRET: A secret + encrypted environment variable. Known values are: + "GENERAL" and "SECRET". + "value": "str" # Optional. + The value. If the type is ``SECRET``"" , the value will + be encrypted on first submission. On following + submissions, the encrypted value should be used. + } + ], + "error_document": "404.html", # Optional. + Default value is "404.html". The name of the error document to + use when serving this static site. Default: 404.html. If no such + file exists within the built assets, App Platform will supply + one. + "git": { + "branch": "str", # Optional. The + name of the branch to use. + "repo_clone_url": "str" # Optional. + The clone URL of the repo. Example: + ``https://github.com/digitalocean/sample-golang.git``. + }, + "github": { + "branch": "str", # Optional. The + name of the branch to use. + "deploy_on_push": bool, # Optional. + Whether to automatically deploy new commits made to the repo. + "repo": "str" # Optional. The name + of the repo in the format owner/repo. Example: + ``digitalocean/sample-golang``. + }, + "gitlab": { + "branch": "str", # Optional. The + name of the branch to use. + "deploy_on_push": bool, # Optional. + Whether to automatically deploy new commits made to the repo. + "repo": "str" # Optional. The name + of the repo in the format owner/repo. Example: + ``digitalocean/sample-golang``. + }, + "image": { + "deploy_on_push": { + "enabled": bool # Optional. + Whether to automatically deploy new images. Can only be + used for images hosted in DOCR and can only be used with + an image tag, not a specific digest. + }, + "digest": "str", # Optional. The + image digest. Cannot be specified if tag is provided. + "registry": "str", # Optional. The + registry name. Must be left empty for the ``DOCR`` registry + type. + "registry_credentials": "str", # + Optional. The credentials to be able to pull the image. The + value will be encrypted on first submission. On following + submissions, the encrypted value should be used. * + "$username:$access_token" for registries of type + ``DOCKER_HUB``. * "$username:$access_token" for registries of + type ``GHCR``. + "registry_type": "str", # Optional. + * DOCKER_HUB: The DockerHub container registry type. * DOCR: + The DigitalOcean container registry type. * GHCR: The Github + container registry type. Known values are: "DOCKER_HUB", + "DOCR", and "GHCR". + "repository": "str", # Optional. The + repository name. + "tag": "latest" # Optional. Default + value is "latest". The repository tag. Defaults to ``latest`` + if not provided and no digest is provided. Cannot be + specified if digest is provided. + }, + "index_document": "index.html", # Optional. + Default value is "index.html". The name of the index document to + use when serving this static site. Default: index.html. + "log_destinations": [ + { + "name": "str", # Required. + "datadog": { + "api_key": "str", # + Datadog API key. Required. + "endpoint": "str" # + Optional. Datadog HTTP log intake endpoint. + }, + "logtail": { + "token": "str" # + Optional. Logtail token. + }, + "open_search": { + "basic_auth": { + "password": + "str", # Optional. Password for user defined in + User. Is required when ``endpoint`` is set. + Cannot be set if using a DigitalOcean DBaaS + OpenSearch cluster. + "user": "str" + # Optional. Username to authenticate with. Only + required when ``endpoint`` is set. Defaults to + ``doadmin`` when ``cluster_name`` is set. + }, + "cluster_name": + "str", # Optional. The name of a DigitalOcean DBaaS + OpenSearch cluster to use as a log forwarding + destination. Cannot be specified if ``endpoint`` is + also specified. + "endpoint": "str", # + Optional. OpenSearch API Endpoint. Only HTTPS is + supported. Format: + https://:code:``::code:``. Cannot be + specified if ``cluster_name`` is also specified. + "index_name": "logs" + # Optional. Default value is "logs". The index name + to use for the logs. If not set, the default index + name is "logs". + }, + "papertrail": { + "endpoint": "str" # + Papertrail syslog endpoint. Required. + } + } + ], + "name": "str", # Optional. The name. Must be + unique across all components within the same app. + "output_dir": "str", # Optional. An optional + path to where the built assets will be located, relative to the + build context. If not set, App Platform will automatically scan + for these directory names: ``_static``"" , ``dist``"" , + ``public``"" , ``build``. + "routes": [ + { + "path": "str", # Optional. + (Deprecated - Use Ingress Rules instead). An HTTP path + prefix. Paths must start with / and must be unique across + all components within an app. + "preserve_path_prefix": bool + # Optional. An optional flag to preserve the path that is + forwarded to the backend service. By default, the HTTP + request path will be trimmed from the left when forwarded + to the component. For example, a component with + ``path=/api`` will have requests to ``/api/list`` trimmed + to ``/list``. If this value is ``true``"" , the path will + remain ``/api/list``. + } + ], + "run_command": "str", # Optional. An + optional run command to override the component's default. + "source_dir": "str" # Optional. An optional + path to the working directory to use for the build. For + Dockerfile builds, this will be used as the build context. Must + be relative to the root of the repo. + } + ], + "vpc": { + "egress_ips": [ + { + "ip": "str" # Optional. The egress + ips associated with the VPC. + } + ], + "id": "str" # Optional. The ID of the VPC. + }, + "workers": [ + { + "autoscaling": { + "max_instance_count": 0, # Optional. + The maximum amount of instances for this component. Must be + more than min_instance_count. + "metrics": { + "cpu": { + "percent": 80 # + Optional. Default value is 80. The average target CPU + utilization for the component. + } + }, + "min_instance_count": 0 # Optional. + The minimum amount of instances for this component. Must be + less than max_instance_count. + }, + "bitbucket": { + "branch": "str", # Optional. The + name of the branch to use. + "deploy_on_push": bool, # Optional. + Whether to automatically deploy new commits made to the repo. + "repo": "str" # Optional. The name + of the repo in the format owner/repo. Example: + ``digitalocean/sample-golang``. + }, + "build_command": "str", # Optional. An + optional build command to run while building this component from + source. + "dockerfile_path": "str", # Optional. The + path to the Dockerfile relative to the root of the repo. If set, + it will be used to build this component. Otherwise, App Platform + will attempt to build it using buildpacks. + "environment_slug": "str", # Optional. An + environment slug describing the type of this app. For a full + list, please refer to `the product documentation + `_. + "envs": [ + { + "key": "str", # The variable + name. Required. + "scope": + "RUN_AND_BUILD_TIME", # Optional. Default value is + "RUN_AND_BUILD_TIME". * RUN_TIME: Made available only at + run-time * BUILD_TIME: Made available only at build-time + * RUN_AND_BUILD_TIME: Made available at both build and + run-time. Known values are: "UNSET", "RUN_TIME", + "BUILD_TIME", and "RUN_AND_BUILD_TIME". + "type": "GENERAL", # + Optional. Default value is "GENERAL". * GENERAL: A + plain-text environment variable * SECRET: A secret + encrypted environment variable. Known values are: + "GENERAL" and "SECRET". + "value": "str" # Optional. + The value. If the type is ``SECRET``"" , the value will + be encrypted on first submission. On following + submissions, the encrypted value should be used. + } + ], + "git": { + "branch": "str", # Optional. The + name of the branch to use. + "repo_clone_url": "str" # Optional. + The clone URL of the repo. Example: + ``https://github.com/digitalocean/sample-golang.git``. + }, + "github": { + "branch": "str", # Optional. The + name of the branch to use. + "deploy_on_push": bool, # Optional. + Whether to automatically deploy new commits made to the repo. + "repo": "str" # Optional. The name + of the repo in the format owner/repo. Example: + ``digitalocean/sample-golang``. + }, + "gitlab": { + "branch": "str", # Optional. The + name of the branch to use. + "deploy_on_push": bool, # Optional. + Whether to automatically deploy new commits made to the repo. + "repo": "str" # Optional. The name + of the repo in the format owner/repo. Example: + ``digitalocean/sample-golang``. + }, + "image": { + "deploy_on_push": { + "enabled": bool # Optional. + Whether to automatically deploy new images. Can only be + used for images hosted in DOCR and can only be used with + an image tag, not a specific digest. + }, + "digest": "str", # Optional. The + image digest. Cannot be specified if tag is provided. + "registry": "str", # Optional. The + registry name. Must be left empty for the ``DOCR`` registry + type. + "registry_credentials": "str", # + Optional. The credentials to be able to pull the image. The + value will be encrypted on first submission. On following + submissions, the encrypted value should be used. * + "$username:$access_token" for registries of type + ``DOCKER_HUB``. * "$username:$access_token" for registries of + type ``GHCR``. + "registry_type": "str", # Optional. + * DOCKER_HUB: The DockerHub container registry type. * DOCR: + The DigitalOcean container registry type. * GHCR: The Github + container registry type. Known values are: "DOCKER_HUB", + "DOCR", and "GHCR". + "repository": "str", # Optional. The + repository name. + "tag": "latest" # Optional. Default + value is "latest". The repository tag. Defaults to ``latest`` + if not provided and no digest is provided. Cannot be + specified if digest is provided. + }, + "instance_count": 1, # Optional. Default + value is 1. The amount of instances that this component should be + scaled to. Default: 1. Must not be set if autoscaling is used. + "instance_size_slug": {}, + "liveness_health_check": { + "failure_threshold": 0, # Optional. + The number of failed health checks before considered + unhealthy. + "http_path": "str", # Optional. The + route path used for the HTTP health check ping. If not set, + the HTTP health check will be disabled and a TCP health check + used instead. + "initial_delay_seconds": 0, # + Optional. The number of seconds to wait before beginning + health checks. + "period_seconds": 0, # Optional. The + number of seconds to wait between health checks. + "port": 0, # Optional. The port on + which the health check will be performed. + "success_threshold": 0, # Optional. + The number of successful health checks before considered + healthy. + "timeout_seconds": 0 # Optional. The + number of seconds after which the check times out. + }, + "log_destinations": [ + { + "name": "str", # Required. + "datadog": { + "api_key": "str", # + Datadog API key. Required. + "endpoint": "str" # + Optional. Datadog HTTP log intake endpoint. + }, + "logtail": { + "token": "str" # + Optional. Logtail token. + }, + "open_search": { + "basic_auth": { + "password": + "str", # Optional. Password for user defined in + User. Is required when ``endpoint`` is set. + Cannot be set if using a DigitalOcean DBaaS + OpenSearch cluster. + "user": "str" + # Optional. Username to authenticate with. Only + required when ``endpoint`` is set. Defaults to + ``doadmin`` when ``cluster_name`` is set. + }, + "cluster_name": + "str", # Optional. The name of a DigitalOcean DBaaS + OpenSearch cluster to use as a log forwarding + destination. Cannot be specified if ``endpoint`` is + also specified. + "endpoint": "str", # + Optional. OpenSearch API Endpoint. Only HTTPS is + supported. Format: + https://:code:``::code:``. Cannot be + specified if ``cluster_name`` is also specified. + "index_name": "logs" + # Optional. Default value is "logs". The index name + to use for the logs. If not set, the default index + name is "logs". + }, + "papertrail": { + "endpoint": "str" # + Papertrail syslog endpoint. Required. + } + } + ], + "name": "str", # Optional. The name. Must be + unique across all components within the same app. + "run_command": "str", # Optional. An + optional run command to override the component's default. + "source_dir": "str", # Optional. An optional + path to the working directory to use for the build. For + Dockerfile builds, this will be used as the build context. Must + be relative to the root of the repo. + "termination": { + "grace_period_seconds": 0 # + Optional. The number of seconds to wait between sending a + TERM signal to a container and issuing a KILL which causes + immediate shutdown. (Default 120). + } + } + ] + }, + "static_sites": [ + { + "name": "str", # Optional. The name of this static + site. + "source_commit_hash": "str" # Optional. The commit + hash of the repository that was used to build this static site. + } + ], + "tier_slug": "str", # Optional. The current pricing tier slug of the + deployment. + "updated_at": "2020-02-20 00:00:00", # Optional. When the deployment + was last updated. + "workers": [ + { + "name": "str", # Optional. The name of this worker. + "source_commit_hash": "str" # Optional. The commit + hash of the repository that was used to build this worker. + } + ] + } + } + # response body for status code(s): 404 + response == { + "id": "str", # A short identifier corresponding to the HTTP status code + returned. For example, the ID for a response returning a 404 status code would + be "not_found.". Required. + "message": "str", # A message providing additional information about the + error, including details to help resolve it when possible. Required. + "request_id": "str" # Optional. Optionally, some endpoints may include a + request ID that should be provided when reporting bugs or opening support + tickets to help identify the issue. + } + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + 401: cast( + Type[HttpResponseError], + lambda response: ClientAuthenticationError(response=response), + ), + 429: HttpResponseError, + 500: HttpResponseError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[JSON] = kwargs.pop("cls", None) + + _request = build_apps_revert_rollback_request( + app_id=app_id, + headers=_headers, + params=_params, + ) + _request.url = self._client.format_url(_request.url) + + _stream = False + pipeline_response: PipelineResponse = ( + self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + ) + + response = pipeline_response.http_response + + if response.status_code not in [200, 404]: + if _stream: + response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore + raise HttpResponseError(response=response) + + response_headers = {} + if response.status_code == 200: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.content: + deserialized = response.json() + else: + deserialized = None + + if response.status_code == 404: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.content: + deserialized = response.json() + else: + deserialized = None + + if cls: + return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore + + return cast(JSON, deserialized) # type: ignore + + @distributed_trace + def get_metrics_bandwidth_daily( + self, app_id: str, *, date: Optional[datetime.datetime] = None, **kwargs: Any + ) -> JSON: + # pylint: disable=line-too-long + """Retrieve App Daily Bandwidth Metrics. + + Retrieve daily bandwidth usage metrics for a single app. + + :param app_id: The app ID. Required. + :type app_id: str + :keyword date: Optional day to query. Only the date component of the timestamp will be + considered. Default: yesterday. Default value is None. + :paramtype date: ~datetime.datetime + :return: JSON object + :rtype: JSON + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 200 + response == { + "app_bandwidth_usage": [ + { + "app_id": "str", # Optional. The ID of the app. + "bandwidth_bytes": "str" # Optional. The used bandwidth + amount in bytes. + } + ], + "date": "2020-02-20 00:00:00" # Optional. The date for the metrics data. + } + # response body for status code(s): 404 + response == { + "id": "str", # A short identifier corresponding to the HTTP status code + returned. For example, the ID for a response returning a 404 status code would + be "not_found.". Required. + "message": "str", # A message providing additional information about the + error, including details to help resolve it when possible. Required. + "request_id": "str" # Optional. Optionally, some endpoints may include a + request ID that should be provided when reporting bugs or opening support + tickets to help identify the issue. + } + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + 401: cast( + Type[HttpResponseError], + lambda response: ClientAuthenticationError(response=response), + ), + 429: HttpResponseError, + 500: HttpResponseError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[JSON] = kwargs.pop("cls", None) + + _request = build_apps_get_metrics_bandwidth_daily_request( + app_id=app_id, + date=date, + headers=_headers, + params=_params, + ) + _request.url = self._client.format_url(_request.url) + + _stream = False + pipeline_response: PipelineResponse = ( + self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + ) + + response = pipeline_response.http_response + + if response.status_code not in [200, 404]: + if _stream: + response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore + raise HttpResponseError(response=response) + + response_headers = {} + if response.status_code == 200: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.content: + deserialized = response.json() + else: + deserialized = None + + if response.status_code == 404: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.content: + deserialized = response.json() + else: + deserialized = None + + if cls: + return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore + + return cast(JSON, deserialized) # type: ignore + + @overload + def list_metrics_bandwidth_daily( + self, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> JSON: + # pylint: disable=line-too-long + """Retrieve Multiple Apps' Daily Bandwidth Metrics. + + Retrieve daily bandwidth usage metrics for multiple apps. + + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: JSON object + :rtype: JSON + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "app_ids": [ + "str" # A list of app IDs to query bandwidth metrics for. Required. + ], + "date": "2020-02-20 00:00:00" # Optional. Optional day to query. Only the + date component of the timestamp will be considered. Default: yesterday. + } + + # response body for status code(s): 200 + response == { + "app_bandwidth_usage": [ + { + "app_id": "str", # Optional. The ID of the app. + "bandwidth_bytes": "str" # Optional. The used bandwidth + amount in bytes. + } + ], + "date": "2020-02-20 00:00:00" # Optional. The date for the metrics data. + } + # response body for status code(s): 404 + response == { + "id": "str", # A short identifier corresponding to the HTTP status code + returned. For example, the ID for a response returning a 404 status code would + be "not_found.". Required. + "message": "str", # A message providing additional information about the + error, including details to help resolve it when possible. Required. + "request_id": "str" # Optional. Optionally, some endpoints may include a + request ID that should be provided when reporting bugs or opening support + tickets to help identify the issue. + } + """ + + @overload + def list_metrics_bandwidth_daily( + self, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> JSON: + # pylint: disable=line-too-long + """Retrieve Multiple Apps' Daily Bandwidth Metrics. + + Retrieve daily bandwidth usage metrics for multiple apps. + + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: JSON object + :rtype: JSON + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 200 + response == { + "app_bandwidth_usage": [ + { + "app_id": "str", # Optional. The ID of the app. + "bandwidth_bytes": "str" # Optional. The used bandwidth + amount in bytes. + } + ], + "date": "2020-02-20 00:00:00" # Optional. The date for the metrics data. + } + # response body for status code(s): 404 + response == { + "id": "str", # A short identifier corresponding to the HTTP status code + returned. For example, the ID for a response returning a 404 status code would + be "not_found.". Required. + "message": "str", # A message providing additional information about the + error, including details to help resolve it when possible. Required. + "request_id": "str" # Optional. Optionally, some endpoints may include a + request ID that should be provided when reporting bugs or opening support + tickets to help identify the issue. + } + """ + + @distributed_trace + def list_metrics_bandwidth_daily( + self, body: Union[JSON, IO[bytes]], **kwargs: Any + ) -> JSON: + # pylint: disable=line-too-long + """Retrieve Multiple Apps' Daily Bandwidth Metrics. + + Retrieve daily bandwidth usage metrics for multiple apps. + + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :return: JSON object + :rtype: JSON + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "app_ids": [ + "str" # A list of app IDs to query bandwidth metrics for. Required. + ], + "date": "2020-02-20 00:00:00" # Optional. Optional day to query. Only the + date component of the timestamp will be considered. Default: yesterday. + } + + # response body for status code(s): 200 + response == { + "app_bandwidth_usage": [ + { + "app_id": "str", # Optional. The ID of the app. + "bandwidth_bytes": "str" # Optional. The used bandwidth + amount in bytes. + } + ], + "date": "2020-02-20 00:00:00" # Optional. The date for the metrics data. + } + # response body for status code(s): 404 + response == { + "id": "str", # A short identifier corresponding to the HTTP status code + returned. For example, the ID for a response returning a 404 status code would + be "not_found.". Required. + "message": "str", # A message providing additional information about the + error, including details to help resolve it when possible. Required. + "request_id": "str" # Optional. Optionally, some endpoints may include a + request ID that should be provided when reporting bugs or opening support + tickets to help identify the issue. + } + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + 401: cast( + Type[HttpResponseError], + lambda response: ClientAuthenticationError(response=response), + ), + 429: HttpResponseError, + 500: HttpResponseError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop( + "content_type", _headers.pop("Content-Type", None) + ) + cls: ClsType[JSON] = kwargs.pop("cls", None) + + content_type = content_type or "application/json" + _json = None + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _json = body + + _request = build_apps_list_metrics_bandwidth_daily_request( + content_type=content_type, + json=_json, + content=_content, + headers=_headers, + params=_params, + ) + _request.url = self._client.format_url(_request.url) + + _stream = False + pipeline_response: PipelineResponse = ( + self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + ) + + response = pipeline_response.http_response + + if response.status_code not in [200, 404]: + if _stream: + response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore + raise HttpResponseError(response=response) + + response_headers = {} + if response.status_code == 200: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.content: + deserialized = response.json() + else: + deserialized = None + + if response.status_code == 404: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.content: + deserialized = response.json() + else: + deserialized = None + + if cls: + return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore + + return cast(JSON, deserialized) # type: ignore + + @distributed_trace + def get_health(self, app_id: str, **kwargs: Any) -> JSON: + # pylint: disable=line-too-long + """Retrieve App Health. + + Retrieve information like health status, cpu and memory utilization of app components. + + :param app_id: The app ID. Required. + :type app_id: str + :return: JSON object + :rtype: JSON + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 200 + response == { + "app_health": { + "components": [ + { + "cpu_usage_percent": 0.0, # Optional. + "memory_usage_percent": 0.0, # Optional. + "name": "str", # Optional. + "replicas_desired": 0, # Optional. + "replicas_ready": 0, # Optional. + "state": "UNKNOWN" # Optional. Default value is + "UNKNOWN". Known values are: "UNKNOWN", "HEALTHY", and "UNHEALTHY". + } + ], + "functions_components": [ + { + "functions_component_health_metrics": [ + { + "metric_label": "str", # Optional. + "metric_value": 0.0, # Optional. + "time_window": "str" # Optional. + } + ], + "name": "str" # Optional. + } + ] + } + } + # response body for status code(s): 404 + response == { + "id": "str", # A short identifier corresponding to the HTTP status code + returned. For example, the ID for a response returning a 404 status code would + be "not_found.". Required. + "message": "str", # A message providing additional information about the + error, including details to help resolve it when possible. Required. + "request_id": "str" # Optional. Optionally, some endpoints may include a + request ID that should be provided when reporting bugs or opening support + tickets to help identify the issue. + } + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + 401: cast( + Type[HttpResponseError], + lambda response: ClientAuthenticationError(response=response), + ), + 429: HttpResponseError, + 500: HttpResponseError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[JSON] = kwargs.pop("cls", None) + + _request = build_apps_get_health_request( + app_id=app_id, + headers=_headers, + params=_params, + ) + _request.url = self._client.format_url(_request.url) + + _stream = False + pipeline_response: PipelineResponse = ( + self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + ) + + response = pipeline_response.http_response + + if response.status_code not in [200, 404]: + if _stream: + response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore + raise HttpResponseError(response=response) + + response_headers = {} + if response.status_code == 200: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.content: + deserialized = response.json() + else: + deserialized = None + + if response.status_code == 404: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.content: + deserialized = response.json() + else: + deserialized = None + + if cls: + return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore + + return cast(JSON, deserialized) # type: ignore + + +class CdnOperations: + """ + .. warning:: + **DO NOT** instantiate this class directly. + + Instead, you should access the following operations through + :class:`~pydo.GeneratedClient`'s + :attr:`cdn` attribute. + """ + + def __init__(self, *args, **kwargs): + input_args = list(args) + self._client = input_args.pop(0) if input_args else kwargs.pop("client") + self._config = input_args.pop(0) if input_args else kwargs.pop("config") + self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer") + self._deserialize = ( + input_args.pop(0) if input_args else kwargs.pop("deserializer") + ) + + @distributed_trace + def list_endpoints( + self, *, per_page: int = 20, page: int = 1, **kwargs: Any + ) -> JSON: + # pylint: disable=line-too-long + """List All CDN Endpoints. + + To list all of the CDN endpoints available on your account, send a GET request to + ``/v2/cdn/endpoints``. + + :keyword per_page: Number of items returned per page. Default value is 20. + :paramtype per_page: int + :keyword page: Which 'page' of paginated results to return. Default value is 1. + :paramtype page: int + :return: JSON object + :rtype: JSON + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 200 + response == { + "meta": { + "total": 0 # Optional. Number of objects returned by the request. + }, + "endpoints": [ + { + "origin": "str", # The fully qualified domain name (FQDN) + for the origin server which provides the content for the CDN. This is + currently restricted to a Space. Required. + "certificate_id": "str", # Optional. The ID of a + DigitalOcean managed TLS certificate used for SSL when a custom subdomain + is provided. + "created_at": "2020-02-20 00:00:00", # Optional. A time + value given in ISO8601 combined date and time format that represents when + the CDN endpoint was created. + "custom_domain": "str", # Optional. The fully qualified + domain name (FQDN) of the custom subdomain used with the CDN endpoint. + "endpoint": "str", # Optional. The fully qualified domain + name (FQDN) from which the CDN-backed content is served. + "id": "str", # Optional. A unique ID that can be used to + identify and reference a CDN endpoint. + "ttl": 3600 # Optional. Default value is 3600. The amount of + time the content is cached by the CDN's edge servers in seconds. TTL must + be one of 60, 600, 3600, 86400, or 604800. Defaults to 3600 (one hour) + when excluded. Known values are: 60, 600, 3600, 86400, and 604800. + } + ], + "links": { + "pages": {} + } + } + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + 401: cast( + Type[HttpResponseError], + lambda response: ClientAuthenticationError(response=response), + ), + 429: HttpResponseError, + 500: HttpResponseError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[JSON] = kwargs.pop("cls", None) + + _request = build_cdn_list_endpoints_request( + per_page=per_page, + page=page, + headers=_headers, + params=_params, + ) + _request.url = self._client.format_url(_request.url) + + _stream = False + pipeline_response: PipelineResponse = ( + self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore + raise HttpResponseError(response=response) + + response_headers = {} + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.content: + deserialized = response.json() + else: + deserialized = None + + if cls: + return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore + + return cast(JSON, deserialized) # type: ignore + + @overload + def create_endpoint( + self, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> JSON: + # pylint: disable=line-too-long + """Create a New CDN Endpoint. + + To create a new CDN endpoint, send a POST request to ``/v2/cdn/endpoints``. The + origin attribute must be set to the fully qualified domain name (FQDN) of a + DigitalOcean Space. Optionally, the TTL may be configured by setting the ``ttl`` + attribute. + + A custom subdomain may be configured by specifying the ``custom_domain`` and + ``certificate_id`` attributes. + + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: JSON object + :rtype: JSON + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "origin": "str", # The fully qualified domain name (FQDN) for the origin + server which provides the content for the CDN. This is currently restricted to a + Space. Required. + "certificate_id": "str", # Optional. The ID of a DigitalOcean managed TLS + certificate used for SSL when a custom subdomain is provided. + "created_at": "2020-02-20 00:00:00", # Optional. A time value given in + ISO8601 combined date and time format that represents when the CDN endpoint was + created. + "custom_domain": "str", # Optional. The fully qualified domain name (FQDN) + of the custom subdomain used with the CDN endpoint. + "endpoint": "str", # Optional. The fully qualified domain name (FQDN) from + which the CDN-backed content is served. + "id": "str", # Optional. A unique ID that can be used to identify and + reference a CDN endpoint. + "ttl": 3600 # Optional. Default value is 3600. The amount of time the + content is cached by the CDN's edge servers in seconds. TTL must be one of 60, + 600, 3600, 86400, or 604800. Defaults to 3600 (one hour) when excluded. Known + values are: 60, 600, 3600, 86400, and 604800. + } + + # response body for status code(s): 201 + response == { + "endpoint": { + "origin": "str", # The fully qualified domain name (FQDN) for the + origin server which provides the content for the CDN. This is currently + restricted to a Space. Required. + "certificate_id": "str", # Optional. The ID of a DigitalOcean + managed TLS certificate used for SSL when a custom subdomain is provided. + "created_at": "2020-02-20 00:00:00", # Optional. A time value given + in ISO8601 combined date and time format that represents when the CDN + endpoint was created. + "custom_domain": "str", # Optional. The fully qualified domain name + (FQDN) of the custom subdomain used with the CDN endpoint. + "endpoint": "str", # Optional. The fully qualified domain name + (FQDN) from which the CDN-backed content is served. + "id": "str", # Optional. A unique ID that can be used to identify + and reference a CDN endpoint. + "ttl": 3600 # Optional. Default value is 3600. The amount of time + the content is cached by the CDN's edge servers in seconds. TTL must be one + of 60, 600, 3600, 86400, or 604800. Defaults to 3600 (one hour) when + excluded. Known values are: 60, 600, 3600, 86400, and 604800. + } + } + """ + + @overload + def create_endpoint( + self, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> JSON: + # pylint: disable=line-too-long + """Create a New CDN Endpoint. + + To create a new CDN endpoint, send a POST request to ``/v2/cdn/endpoints``. The + origin attribute must be set to the fully qualified domain name (FQDN) of a + DigitalOcean Space. Optionally, the TTL may be configured by setting the ``ttl`` + attribute. + + A custom subdomain may be configured by specifying the ``custom_domain`` and + ``certificate_id`` attributes. + + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: JSON object + :rtype: JSON + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 201 + response == { + "endpoint": { + "origin": "str", # The fully qualified domain name (FQDN) for the + origin server which provides the content for the CDN. This is currently + restricted to a Space. Required. + "certificate_id": "str", # Optional. The ID of a DigitalOcean + managed TLS certificate used for SSL when a custom subdomain is provided. + "created_at": "2020-02-20 00:00:00", # Optional. A time value given + in ISO8601 combined date and time format that represents when the CDN + endpoint was created. + "custom_domain": "str", # Optional. The fully qualified domain name + (FQDN) of the custom subdomain used with the CDN endpoint. + "endpoint": "str", # Optional. The fully qualified domain name + (FQDN) from which the CDN-backed content is served. + "id": "str", # Optional. A unique ID that can be used to identify + and reference a CDN endpoint. + "ttl": 3600 # Optional. Default value is 3600. The amount of time + the content is cached by the CDN's edge servers in seconds. TTL must be one + of 60, 600, 3600, 86400, or 604800. Defaults to 3600 (one hour) when + excluded. Known values are: 60, 600, 3600, 86400, and 604800. + } + } + """ + + @distributed_trace + def create_endpoint(self, body: Union[JSON, IO[bytes]], **kwargs: Any) -> JSON: + # pylint: disable=line-too-long + """Create a New CDN Endpoint. + + To create a new CDN endpoint, send a POST request to ``/v2/cdn/endpoints``. The + origin attribute must be set to the fully qualified domain name (FQDN) of a + DigitalOcean Space. Optionally, the TTL may be configured by setting the ``ttl`` + attribute. + + A custom subdomain may be configured by specifying the ``custom_domain`` and + ``certificate_id`` attributes. + + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :return: JSON object + :rtype: JSON + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "origin": "str", # The fully qualified domain name (FQDN) for the origin + server which provides the content for the CDN. This is currently restricted to a + Space. Required. + "certificate_id": "str", # Optional. The ID of a DigitalOcean managed TLS + certificate used for SSL when a custom subdomain is provided. + "created_at": "2020-02-20 00:00:00", # Optional. A time value given in + ISO8601 combined date and time format that represents when the CDN endpoint was + created. + "custom_domain": "str", # Optional. The fully qualified domain name (FQDN) + of the custom subdomain used with the CDN endpoint. + "endpoint": "str", # Optional. The fully qualified domain name (FQDN) from + which the CDN-backed content is served. + "id": "str", # Optional. A unique ID that can be used to identify and + reference a CDN endpoint. + "ttl": 3600 # Optional. Default value is 3600. The amount of time the + content is cached by the CDN's edge servers in seconds. TTL must be one of 60, + 600, 3600, 86400, or 604800. Defaults to 3600 (one hour) when excluded. Known + values are: 60, 600, 3600, 86400, and 604800. + } + + # response body for status code(s): 201 + response == { + "endpoint": { + "origin": "str", # The fully qualified domain name (FQDN) for the + origin server which provides the content for the CDN. This is currently + restricted to a Space. Required. + "certificate_id": "str", # Optional. The ID of a DigitalOcean + managed TLS certificate used for SSL when a custom subdomain is provided. + "created_at": "2020-02-20 00:00:00", # Optional. A time value given + in ISO8601 combined date and time format that represents when the CDN + endpoint was created. + "custom_domain": "str", # Optional. The fully qualified domain name + (FQDN) of the custom subdomain used with the CDN endpoint. + "endpoint": "str", # Optional. The fully qualified domain name + (FQDN) from which the CDN-backed content is served. + "id": "str", # Optional. A unique ID that can be used to identify + and reference a CDN endpoint. + "ttl": 3600 # Optional. Default value is 3600. The amount of time + the content is cached by the CDN's edge servers in seconds. TTL must be one + of 60, 600, 3600, 86400, or 604800. Defaults to 3600 (one hour) when + excluded. Known values are: 60, 600, 3600, 86400, and 604800. + } + } + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + 401: cast( + Type[HttpResponseError], + lambda response: ClientAuthenticationError(response=response), + ), + 429: HttpResponseError, + 500: HttpResponseError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop( + "content_type", _headers.pop("Content-Type", None) + ) + cls: ClsType[JSON] = kwargs.pop("cls", None) + + content_type = content_type or "application/json" + _json = None + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _json = body + + _request = build_cdn_create_endpoint_request( + content_type=content_type, + json=_json, + content=_content, + headers=_headers, + params=_params, + ) + _request.url = self._client.format_url(_request.url) + + _stream = False + pipeline_response: PipelineResponse = ( + self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + ) + + response = pipeline_response.http_response + + if response.status_code not in [201]: + if _stream: + response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore + raise HttpResponseError(response=response) + + response_headers = {} + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.content: + deserialized = response.json() + else: + deserialized = None + + if cls: + return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore + + return cast(JSON, deserialized) # type: ignore + + @distributed_trace + def get_endpoint(self, cdn_id: str, **kwargs: Any) -> JSON: + # pylint: disable=line-too-long + """Retrieve an Existing CDN Endpoint. + + To show information about an existing CDN endpoint, send a GET request to + ``/v2/cdn/endpoints/$ENDPOINT_ID``. + + :param cdn_id: A unique identifier for a CDN endpoint. Required. + :type cdn_id: str + :return: JSON object + :rtype: JSON + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 200 + response == { + "endpoint": { + "origin": "str", # The fully qualified domain name (FQDN) for the + origin server which provides the content for the CDN. This is currently + restricted to a Space. Required. + "certificate_id": "str", # Optional. The ID of a DigitalOcean + managed TLS certificate used for SSL when a custom subdomain is provided. + "created_at": "2020-02-20 00:00:00", # Optional. A time value given + in ISO8601 combined date and time format that represents when the CDN + endpoint was created. + "custom_domain": "str", # Optional. The fully qualified domain name + (FQDN) of the custom subdomain used with the CDN endpoint. + "endpoint": "str", # Optional. The fully qualified domain name + (FQDN) from which the CDN-backed content is served. + "id": "str", # Optional. A unique ID that can be used to identify + and reference a CDN endpoint. + "ttl": 3600 # Optional. Default value is 3600. The amount of time + the content is cached by the CDN's edge servers in seconds. TTL must be one + of 60, 600, 3600, 86400, or 604800. Defaults to 3600 (one hour) when + excluded. Known values are: 60, 600, 3600, 86400, and 604800. + } + } + # response body for status code(s): 404 + response == { + "id": "str", # A short identifier corresponding to the HTTP status code + returned. For example, the ID for a response returning a 404 status code would + be "not_found.". Required. + "message": "str", # A message providing additional information about the + error, including details to help resolve it when possible. Required. + "request_id": "str" # Optional. Optionally, some endpoints may include a + request ID that should be provided when reporting bugs or opening support + tickets to help identify the issue. + } + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + 401: cast( + Type[HttpResponseError], + lambda response: ClientAuthenticationError(response=response), + ), + 429: HttpResponseError, + 500: HttpResponseError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[JSON] = kwargs.pop("cls", None) + + _request = build_cdn_get_endpoint_request( + cdn_id=cdn_id, + headers=_headers, + params=_params, + ) + _request.url = self._client.format_url(_request.url) + + _stream = False + pipeline_response: PipelineResponse = ( + self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + ) + + response = pipeline_response.http_response + + if response.status_code not in [200, 404]: + if _stream: + response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore + raise HttpResponseError(response=response) + + response_headers = {} + if response.status_code == 200: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.content: + deserialized = response.json() + else: + deserialized = None + + if response.status_code == 404: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.content: + deserialized = response.json() + else: + deserialized = None + + if cls: + return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore + + return cast(JSON, deserialized) # type: ignore + + @overload + def update_endpoints( + self, + cdn_id: str, + body: JSON, + *, + content_type: str = "application/json", + **kwargs: Any, + ) -> JSON: + # pylint: disable=line-too-long + """Update a CDN Endpoint. + + To update the TTL, certificate ID, or the FQDN of the custom subdomain for + an existing CDN endpoint, send a PUT request to + ``/v2/cdn/endpoints/$ENDPOINT_ID``. + + :param cdn_id: A unique identifier for a CDN endpoint. Required. + :type cdn_id: str + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: JSON object + :rtype: JSON + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "certificate_id": "str", # Optional. The ID of a DigitalOcean managed TLS + certificate used for SSL when a custom subdomain is provided. + "custom_domain": "str", # Optional. The fully qualified domain name (FQDN) + of the custom subdomain used with the CDN endpoint. + "ttl": 3600 # Optional. Default value is 3600. The amount of time the + content is cached by the CDN's edge servers in seconds. TTL must be one of 60, + 600, 3600, 86400, or 604800. Defaults to 3600 (one hour) when excluded. Known + values are: 60, 600, 3600, 86400, and 604800. + } + + # response body for status code(s): 200 + response == { + "endpoint": { + "origin": "str", # The fully qualified domain name (FQDN) for the + origin server which provides the content for the CDN. This is currently + restricted to a Space. Required. + "certificate_id": "str", # Optional. The ID of a DigitalOcean + managed TLS certificate used for SSL when a custom subdomain is provided. + "created_at": "2020-02-20 00:00:00", # Optional. A time value given + in ISO8601 combined date and time format that represents when the CDN + endpoint was created. + "custom_domain": "str", # Optional. The fully qualified domain name + (FQDN) of the custom subdomain used with the CDN endpoint. + "endpoint": "str", # Optional. The fully qualified domain name + (FQDN) from which the CDN-backed content is served. + "id": "str", # Optional. A unique ID that can be used to identify + and reference a CDN endpoint. + "ttl": 3600 # Optional. Default value is 3600. The amount of time + the content is cached by the CDN's edge servers in seconds. TTL must be one + of 60, 600, 3600, 86400, or 604800. Defaults to 3600 (one hour) when + excluded. Known values are: 60, 600, 3600, 86400, and 604800. + } + } + # response body for status code(s): 404 + response == { + "id": "str", # A short identifier corresponding to the HTTP status code + returned. For example, the ID for a response returning a 404 status code would + be "not_found.". Required. + "message": "str", # A message providing additional information about the + error, including details to help resolve it when possible. Required. + "request_id": "str" # Optional. Optionally, some endpoints may include a + request ID that should be provided when reporting bugs or opening support + tickets to help identify the issue. + } + """ + + @overload + def update_endpoints( + self, + cdn_id: str, + body: IO[bytes], + *, + content_type: str = "application/json", + **kwargs: Any, + ) -> JSON: + # pylint: disable=line-too-long + """Update a CDN Endpoint. + + To update the TTL, certificate ID, or the FQDN of the custom subdomain for + an existing CDN endpoint, send a PUT request to + ``/v2/cdn/endpoints/$ENDPOINT_ID``. + + :param cdn_id: A unique identifier for a CDN endpoint. Required. + :type cdn_id: str + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: JSON object + :rtype: JSON + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 200 + response == { + "endpoint": { + "origin": "str", # The fully qualified domain name (FQDN) for the + origin server which provides the content for the CDN. This is currently + restricted to a Space. Required. + "certificate_id": "str", # Optional. The ID of a DigitalOcean + managed TLS certificate used for SSL when a custom subdomain is provided. + "created_at": "2020-02-20 00:00:00", # Optional. A time value given + in ISO8601 combined date and time format that represents when the CDN + endpoint was created. + "custom_domain": "str", # Optional. The fully qualified domain name + (FQDN) of the custom subdomain used with the CDN endpoint. + "endpoint": "str", # Optional. The fully qualified domain name + (FQDN) from which the CDN-backed content is served. + "id": "str", # Optional. A unique ID that can be used to identify + and reference a CDN endpoint. + "ttl": 3600 # Optional. Default value is 3600. The amount of time + the content is cached by the CDN's edge servers in seconds. TTL must be one + of 60, 600, 3600, 86400, or 604800. Defaults to 3600 (one hour) when + excluded. Known values are: 60, 600, 3600, 86400, and 604800. + } + } + # response body for status code(s): 404 + response == { + "id": "str", # A short identifier corresponding to the HTTP status code + returned. For example, the ID for a response returning a 404 status code would + be "not_found.". Required. + "message": "str", # A message providing additional information about the + error, including details to help resolve it when possible. Required. + "request_id": "str" # Optional. Optionally, some endpoints may include a + request ID that should be provided when reporting bugs or opening support + tickets to help identify the issue. + } + """ + + @distributed_trace + def update_endpoints( + self, cdn_id: str, body: Union[JSON, IO[bytes]], **kwargs: Any + ) -> JSON: + # pylint: disable=line-too-long + """Update a CDN Endpoint. + + To update the TTL, certificate ID, or the FQDN of the custom subdomain for + an existing CDN endpoint, send a PUT request to + ``/v2/cdn/endpoints/$ENDPOINT_ID``. + + :param cdn_id: A unique identifier for a CDN endpoint. Required. + :type cdn_id: str + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :return: JSON object + :rtype: JSON + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "certificate_id": "str", # Optional. The ID of a DigitalOcean managed TLS + certificate used for SSL when a custom subdomain is provided. + "custom_domain": "str", # Optional. The fully qualified domain name (FQDN) + of the custom subdomain used with the CDN endpoint. + "ttl": 3600 # Optional. Default value is 3600. The amount of time the + content is cached by the CDN's edge servers in seconds. TTL must be one of 60, + 600, 3600, 86400, or 604800. Defaults to 3600 (one hour) when excluded. Known + values are: 60, 600, 3600, 86400, and 604800. + } + + # response body for status code(s): 200 + response == { + "endpoint": { + "origin": "str", # The fully qualified domain name (FQDN) for the + origin server which provides the content for the CDN. This is currently + restricted to a Space. Required. + "certificate_id": "str", # Optional. The ID of a DigitalOcean + managed TLS certificate used for SSL when a custom subdomain is provided. + "created_at": "2020-02-20 00:00:00", # Optional. A time value given + in ISO8601 combined date and time format that represents when the CDN + endpoint was created. + "custom_domain": "str", # Optional. The fully qualified domain name + (FQDN) of the custom subdomain used with the CDN endpoint. + "endpoint": "str", # Optional. The fully qualified domain name + (FQDN) from which the CDN-backed content is served. + "id": "str", # Optional. A unique ID that can be used to identify + and reference a CDN endpoint. + "ttl": 3600 # Optional. Default value is 3600. The amount of time + the content is cached by the CDN's edge servers in seconds. TTL must be one + of 60, 600, 3600, 86400, or 604800. Defaults to 3600 (one hour) when + excluded. Known values are: 60, 600, 3600, 86400, and 604800. + } + } + # response body for status code(s): 404 + response == { + "id": "str", # A short identifier corresponding to the HTTP status code + returned. For example, the ID for a response returning a 404 status code would + be "not_found.". Required. + "message": "str", # A message providing additional information about the + error, including details to help resolve it when possible. Required. + "request_id": "str" # Optional. Optionally, some endpoints may include a + request ID that should be provided when reporting bugs or opening support + tickets to help identify the issue. + } + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + 401: cast( + Type[HttpResponseError], + lambda response: ClientAuthenticationError(response=response), + ), + 429: HttpResponseError, + 500: HttpResponseError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop( + "content_type", _headers.pop("Content-Type", None) + ) + cls: ClsType[JSON] = kwargs.pop("cls", None) + + content_type = content_type or "application/json" + _json = None + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _json = body + + _request = build_cdn_update_endpoints_request( + cdn_id=cdn_id, + content_type=content_type, + json=_json, + content=_content, + headers=_headers, + params=_params, + ) + _request.url = self._client.format_url(_request.url) + + _stream = False + pipeline_response: PipelineResponse = ( + self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + ) + + response = pipeline_response.http_response + + if response.status_code not in [200, 404]: + if _stream: + response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore + raise HttpResponseError(response=response) + + response_headers = {} + if response.status_code == 200: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.content: + deserialized = response.json() + else: + deserialized = None + + if response.status_code == 404: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.content: + deserialized = response.json() + else: + deserialized = None + + if cls: + return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore + + return cast(JSON, deserialized) # type: ignore + + @distributed_trace + def delete_endpoint(self, cdn_id: str, **kwargs: Any) -> Optional[JSON]: + # pylint: disable=line-too-long + """Delete a CDN Endpoint. + + To delete a specific CDN endpoint, send a DELETE request to + ``/v2/cdn/endpoints/$ENDPOINT_ID``. + + A status of 204 will be given. This indicates that the request was processed + successfully, but that no response body is needed. + + :param cdn_id: A unique identifier for a CDN endpoint. Required. + :type cdn_id: str + :return: JSON object or None + :rtype: JSON or None + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 404 + response == { + "id": "str", # A short identifier corresponding to the HTTP status code + returned. For example, the ID for a response returning a 404 status code would + be "not_found.". Required. + "message": "str", # A message providing additional information about the + error, including details to help resolve it when possible. Required. + "request_id": "str" # Optional. Optionally, some endpoints may include a + request ID that should be provided when reporting bugs or opening support + tickets to help identify the issue. + } + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + 401: cast( + Type[HttpResponseError], + lambda response: ClientAuthenticationError(response=response), + ), + 429: HttpResponseError, + 500: HttpResponseError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[Optional[JSON]] = kwargs.pop("cls", None) + + _request = build_cdn_delete_endpoint_request( + cdn_id=cdn_id, + headers=_headers, + params=_params, + ) + _request.url = self._client.format_url(_request.url) + + _stream = False + pipeline_response: PipelineResponse = ( + self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + ) + + response = pipeline_response.http_response + + if response.status_code not in [204, 404]: + if _stream: + response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore + raise HttpResponseError(response=response) + + deserialized = None + response_headers = {} + if response.status_code == 204: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.status_code == 404: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.content: + deserialized = response.json() + else: + deserialized = None + + if cls: + return cls(pipeline_response, deserialized, response_headers) # type: ignore + + return deserialized # type: ignore + + @overload + def purge_cache( + self, + cdn_id: str, + body: JSON, + *, + content_type: str = "application/json", + **kwargs: Any, + ) -> Optional[JSON]: + # pylint: disable=line-too-long + """Purge the Cache for an Existing CDN Endpoint. + + To purge cached content from a CDN endpoint, send a DELETE request to + ``/v2/cdn/endpoints/$ENDPOINT_ID/cache``. The body of the request should include + a ``files`` attribute containing a list of cached file paths to be purged. A + path may be for a single file or may contain a wildcard (\\ ``*``\\ ) to recursively + purge all files under a directory. When only a wildcard is provided, all cached + files will be purged. There is a rate limit of 50 files per 20 seconds that can + be purged. CDN endpoints have a rate limit of 5 requests per 10 seconds. + Purging files using a wildcard path counts as a single request against the API's + rate limit. Two identical purge requests cannot be sent at the same time. + + :param cdn_id: A unique identifier for a CDN endpoint. Required. + :type cdn_id: str + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: JSON object or None + :rtype: JSON or None + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "files": [ + "str" # An array of strings containing the path to the content to be + purged from the CDN cache. Required. + ] + } + + # response body for status code(s): 404 + response == { + "id": "str", # A short identifier corresponding to the HTTP status code + returned. For example, the ID for a response returning a 404 status code would + be "not_found.". Required. + "message": "str", # A message providing additional information about the + error, including details to help resolve it when possible. Required. + "request_id": "str" # Optional. Optionally, some endpoints may include a + request ID that should be provided when reporting bugs or opening support + tickets to help identify the issue. + } + """ + + @overload + def purge_cache( + self, + cdn_id: str, + body: IO[bytes], + *, + content_type: str = "application/json", + **kwargs: Any, + ) -> Optional[JSON]: + # pylint: disable=line-too-long + """Purge the Cache for an Existing CDN Endpoint. + + To purge cached content from a CDN endpoint, send a DELETE request to + ``/v2/cdn/endpoints/$ENDPOINT_ID/cache``. The body of the request should include + a ``files`` attribute containing a list of cached file paths to be purged. A + path may be for a single file or may contain a wildcard (\\ ``*``\\ ) to recursively + purge all files under a directory. When only a wildcard is provided, all cached + files will be purged. There is a rate limit of 50 files per 20 seconds that can + be purged. CDN endpoints have a rate limit of 5 requests per 10 seconds. + Purging files using a wildcard path counts as a single request against the API's + rate limit. Two identical purge requests cannot be sent at the same time. + + :param cdn_id: A unique identifier for a CDN endpoint. Required. + :type cdn_id: str + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: JSON object or None + :rtype: JSON or None + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 404 + response == { + "id": "str", # A short identifier corresponding to the HTTP status code + returned. For example, the ID for a response returning a 404 status code would + be "not_found.". Required. + "message": "str", # A message providing additional information about the + error, including details to help resolve it when possible. Required. + "request_id": "str" # Optional. Optionally, some endpoints may include a + request ID that should be provided when reporting bugs or opening support + tickets to help identify the issue. + } + """ + + @distributed_trace + def purge_cache( + self, cdn_id: str, body: Union[JSON, IO[bytes]], **kwargs: Any + ) -> Optional[JSON]: + # pylint: disable=line-too-long + """Purge the Cache for an Existing CDN Endpoint. + + To purge cached content from a CDN endpoint, send a DELETE request to + ``/v2/cdn/endpoints/$ENDPOINT_ID/cache``. The body of the request should include + a ``files`` attribute containing a list of cached file paths to be purged. A + path may be for a single file or may contain a wildcard (\\ ``*``\\ ) to recursively + purge all files under a directory. When only a wildcard is provided, all cached + files will be purged. There is a rate limit of 50 files per 20 seconds that can + be purged. CDN endpoints have a rate limit of 5 requests per 10 seconds. + Purging files using a wildcard path counts as a single request against the API's + rate limit. Two identical purge requests cannot be sent at the same time. + + :param cdn_id: A unique identifier for a CDN endpoint. Required. + :type cdn_id: str + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :return: JSON object or None + :rtype: JSON or None + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "files": [ + "str" # An array of strings containing the path to the content to be + purged from the CDN cache. Required. + ] + } + + # response body for status code(s): 404 + response == { + "id": "str", # A short identifier corresponding to the HTTP status code + returned. For example, the ID for a response returning a 404 status code would + be "not_found.". Required. + "message": "str", # A message providing additional information about the + error, including details to help resolve it when possible. Required. + "request_id": "str" # Optional. Optionally, some endpoints may include a + request ID that should be provided when reporting bugs or opening support + tickets to help identify the issue. + } + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + 401: cast( + Type[HttpResponseError], + lambda response: ClientAuthenticationError(response=response), + ), + 429: HttpResponseError, + 500: HttpResponseError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop( + "content_type", _headers.pop("Content-Type", None) + ) + cls: ClsType[Optional[JSON]] = kwargs.pop("cls", None) + + content_type = content_type or "application/json" + _json = None + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _json = body + + _request = build_cdn_purge_cache_request( + cdn_id=cdn_id, + content_type=content_type, + json=_json, + content=_content, + headers=_headers, + params=_params, + ) + _request.url = self._client.format_url(_request.url) + + _stream = False + pipeline_response: PipelineResponse = ( + self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + ) + + response = pipeline_response.http_response + + if response.status_code not in [204, 404]: + if _stream: + response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore + raise HttpResponseError(response=response) + + deserialized = None + response_headers = {} + if response.status_code == 204: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.status_code == 404: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.content: + deserialized = response.json() + else: + deserialized = None + + if cls: + return cls(pipeline_response, deserialized, response_headers) # type: ignore + + return deserialized # type: ignore + + +class CertificatesOperations: + """ + .. warning:: + **DO NOT** instantiate this class directly. + + Instead, you should access the following operations through + :class:`~pydo.GeneratedClient`'s + :attr:`certificates` attribute. + """ + + def __init__(self, *args, **kwargs): + input_args = list(args) + self._client = input_args.pop(0) if input_args else kwargs.pop("client") + self._config = input_args.pop(0) if input_args else kwargs.pop("config") + self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer") + self._deserialize = ( + input_args.pop(0) if input_args else kwargs.pop("deserializer") + ) + + @distributed_trace + def list( + self, *, per_page: int = 20, page: int = 1, name: str = "", **kwargs: Any + ) -> JSON: + # pylint: disable=line-too-long + """List All Certificates. + + To list all of the certificates available on your account, send a GET request to + ``/v2/certificates``. + + :keyword per_page: Number of items returned per page. Default value is 20. + :paramtype per_page: int + :keyword page: Which 'page' of paginated results to return. Default value is 1. + :paramtype page: int + :keyword name: Name of expected certificate. Default value is "". + :paramtype name: str + :return: JSON object + :rtype: JSON + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 200 + response == { + "meta": { + "total": 0 # Optional. Number of objects returned by the request. + }, + "certificates": [ + { + "created_at": "2020-02-20 00:00:00", # Optional. A time + value given in ISO8601 combined date and time format that represents when + the certificate was created. + "dns_names": [ + "str" # Optional. An array of fully qualified domain + names (FQDNs) for which the certificate was issued. + ], + "id": "str", # Optional. A unique ID that can be used to + identify and reference a certificate. + "name": "str", # Optional. A unique human-readable name + referring to a certificate. + "not_after": "2020-02-20 00:00:00", # Optional. A time value + given in ISO8601 combined date and time format that represents the + certificate's expiration date. + "sha1_fingerprint": "str", # Optional. A unique identifier + generated from the SHA-1 fingerprint of the certificate. + "state": "str", # Optional. A string representing the + current state of the certificate. It may be ``pending``"" , + ``verified``"" , or ``error``. Known values are: "pending", "verified", + and "error". + "type": "str" # Optional. A string representing the type of + the certificate. The value will be ``custom`` for a user-uploaded + certificate or ``lets_encrypt`` for one automatically generated with + Let's Encrypt. Known values are: "custom" and "lets_encrypt". + } + ], + "links": { + "pages": {} + } + } + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + 401: cast( + Type[HttpResponseError], + lambda response: ClientAuthenticationError(response=response), + ), + 429: HttpResponseError, + 500: HttpResponseError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[JSON] = kwargs.pop("cls", None) + + _request = build_certificates_list_request( + per_page=per_page, + page=page, + name=name, + headers=_headers, + params=_params, + ) + _request.url = self._client.format_url(_request.url) + + _stream = False + pipeline_response: PipelineResponse = ( + self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore + raise HttpResponseError(response=response) + + response_headers = {} + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.content: + deserialized = response.json() + else: + deserialized = None + + if cls: + return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore + + return cast(JSON, deserialized) # type: ignore + + @overload + def create( + self, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> JSON: + # pylint: disable=line-too-long + """Create a New Certificate. + + To upload new SSL certificate which you have previously generated, send a POST + request to ``/v2/certificates``. + + When uploading a user-generated certificate, the ``private_key``\\ , + ``leaf_certificate``\\ , and optionally the ``certificate_chain`` attributes should + be provided. The type must be set to ``custom``. + + When using Let's Encrypt to create a certificate, the ``dns_names`` attribute + must be provided, and the type must be set to ``lets_encrypt``. + + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: JSON object + :rtype: JSON + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = {} + + # response body for status code(s): 201 + response == { + "certificate": { + "created_at": "2020-02-20 00:00:00", # Optional. A time value given + in ISO8601 combined date and time format that represents when the certificate + was created. + "dns_names": [ + "str" # Optional. An array of fully qualified domain names + (FQDNs) for which the certificate was issued. + ], + "id": "str", # Optional. A unique ID that can be used to identify + and reference a certificate. + "name": "str", # Optional. A unique human-readable name referring to + a certificate. + "not_after": "2020-02-20 00:00:00", # Optional. A time value given + in ISO8601 combined date and time format that represents the certificate's + expiration date. + "sha1_fingerprint": "str", # Optional. A unique identifier generated + from the SHA-1 fingerprint of the certificate. + "state": "str", # Optional. A string representing the current state + of the certificate. It may be ``pending``"" , ``verified``"" , or ``error``. + Known values are: "pending", "verified", and "error". + "type": "str" # Optional. A string representing the type of the + certificate. The value will be ``custom`` for a user-uploaded certificate or + ``lets_encrypt`` for one automatically generated with Let's Encrypt. Known + values are: "custom" and "lets_encrypt". + } + } + """ + + @overload + def create( + self, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> JSON: + # pylint: disable=line-too-long + """Create a New Certificate. + + To upload new SSL certificate which you have previously generated, send a POST + request to ``/v2/certificates``. + + When uploading a user-generated certificate, the ``private_key``\\ , + ``leaf_certificate``\\ , and optionally the ``certificate_chain`` attributes should + be provided. The type must be set to ``custom``. + + When using Let's Encrypt to create a certificate, the ``dns_names`` attribute + must be provided, and the type must be set to ``lets_encrypt``. + + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: JSON object + :rtype: JSON + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 201 + response == { + "certificate": { + "created_at": "2020-02-20 00:00:00", # Optional. A time value given + in ISO8601 combined date and time format that represents when the certificate + was created. + "dns_names": [ + "str" # Optional. An array of fully qualified domain names + (FQDNs) for which the certificate was issued. + ], + "id": "str", # Optional. A unique ID that can be used to identify + and reference a certificate. + "name": "str", # Optional. A unique human-readable name referring to + a certificate. + "not_after": "2020-02-20 00:00:00", # Optional. A time value given + in ISO8601 combined date and time format that represents the certificate's + expiration date. + "sha1_fingerprint": "str", # Optional. A unique identifier generated + from the SHA-1 fingerprint of the certificate. + "state": "str", # Optional. A string representing the current state + of the certificate. It may be ``pending``"" , ``verified``"" , or ``error``. + Known values are: "pending", "verified", and "error". + "type": "str" # Optional. A string representing the type of the + certificate. The value will be ``custom`` for a user-uploaded certificate or + ``lets_encrypt`` for one automatically generated with Let's Encrypt. Known + values are: "custom" and "lets_encrypt". + } + } + """ + + @distributed_trace + def create(self, body: Union[JSON, IO[bytes]], **kwargs: Any) -> JSON: + # pylint: disable=line-too-long + """Create a New Certificate. + + To upload new SSL certificate which you have previously generated, send a POST + request to ``/v2/certificates``. + + When uploading a user-generated certificate, the ``private_key``\\ , + ``leaf_certificate``\\ , and optionally the ``certificate_chain`` attributes should + be provided. The type must be set to ``custom``. + + When using Let's Encrypt to create a certificate, the ``dns_names`` attribute + must be provided, and the type must be set to ``lets_encrypt``. + + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :return: JSON object + :rtype: JSON + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = {} + + # response body for status code(s): 201 + response == { + "certificate": { + "created_at": "2020-02-20 00:00:00", # Optional. A time value given + in ISO8601 combined date and time format that represents when the certificate + was created. + "dns_names": [ + "str" # Optional. An array of fully qualified domain names + (FQDNs) for which the certificate was issued. + ], + "id": "str", # Optional. A unique ID that can be used to identify + and reference a certificate. + "name": "str", # Optional. A unique human-readable name referring to + a certificate. + "not_after": "2020-02-20 00:00:00", # Optional. A time value given + in ISO8601 combined date and time format that represents the certificate's + expiration date. + "sha1_fingerprint": "str", # Optional. A unique identifier generated + from the SHA-1 fingerprint of the certificate. + "state": "str", # Optional. A string representing the current state + of the certificate. It may be ``pending``"" , ``verified``"" , or ``error``. + Known values are: "pending", "verified", and "error". + "type": "str" # Optional. A string representing the type of the + certificate. The value will be ``custom`` for a user-uploaded certificate or + ``lets_encrypt`` for one automatically generated with Let's Encrypt. Known + values are: "custom" and "lets_encrypt". + } + } + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + 401: cast( + Type[HttpResponseError], + lambda response: ClientAuthenticationError(response=response), + ), + 429: HttpResponseError, + 500: HttpResponseError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop( + "content_type", _headers.pop("Content-Type", None) + ) + cls: ClsType[JSON] = kwargs.pop("cls", None) + + content_type = content_type or "application/json" + _json = None + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _json = body + + _request = build_certificates_create_request( + content_type=content_type, + json=_json, + content=_content, + headers=_headers, + params=_params, + ) + _request.url = self._client.format_url(_request.url) + + _stream = False + pipeline_response: PipelineResponse = ( + self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + ) + + response = pipeline_response.http_response + + if response.status_code not in [201]: + if _stream: + response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore + raise HttpResponseError(response=response) + + response_headers = {} + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.content: + deserialized = response.json() + else: + deserialized = None + + if cls: + return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore + + return cast(JSON, deserialized) # type: ignore + + @distributed_trace + def get(self, certificate_id: str, **kwargs: Any) -> JSON: + # pylint: disable=line-too-long + """Retrieve an Existing Certificate. + + To show information about an existing certificate, send a GET request to + ``/v2/certificates/$CERTIFICATE_ID``. + + :param certificate_id: A unique identifier for a certificate. Required. + :type certificate_id: str + :return: JSON object + :rtype: JSON + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 200 + response == { + "certificate": { + "created_at": "2020-02-20 00:00:00", # Optional. A time value given + in ISO8601 combined date and time format that represents when the certificate + was created. + "dns_names": [ + "str" # Optional. An array of fully qualified domain names + (FQDNs) for which the certificate was issued. + ], + "id": "str", # Optional. A unique ID that can be used to identify + and reference a certificate. + "name": "str", # Optional. A unique human-readable name referring to + a certificate. + "not_after": "2020-02-20 00:00:00", # Optional. A time value given + in ISO8601 combined date and time format that represents the certificate's + expiration date. + "sha1_fingerprint": "str", # Optional. A unique identifier generated + from the SHA-1 fingerprint of the certificate. + "state": "str", # Optional. A string representing the current state + of the certificate. It may be ``pending``"" , ``verified``"" , or ``error``. + Known values are: "pending", "verified", and "error". + "type": "str" # Optional. A string representing the type of the + certificate. The value will be ``custom`` for a user-uploaded certificate or + ``lets_encrypt`` for one automatically generated with Let's Encrypt. Known + values are: "custom" and "lets_encrypt". } } # response body for status code(s): 404 @@ -114136,8 +122481,8 @@ def revert_rollback(self, app_id: str, **kwargs: Any) -> JSON: cls: ClsType[JSON] = kwargs.pop("cls", None) - _request = build_apps_revert_rollback_request( - app_id=app_id, + _request = build_certificates_get_request( + certificate_id=certificate_id, headers=_headers, params=_params, ) @@ -114197,19 +122542,136 @@ def revert_rollback(self, app_id: str, **kwargs: Any) -> JSON: return cast(JSON, deserialized) # type: ignore @distributed_trace - def get_metrics_bandwidth_daily( - self, app_id: str, *, date: Optional[datetime.datetime] = None, **kwargs: Any - ) -> JSON: + def delete(self, certificate_id: str, **kwargs: Any) -> Optional[JSON]: # pylint: disable=line-too-long - """Retrieve App Daily Bandwidth Metrics. + """Delete a Certificate. - Retrieve daily bandwidth usage metrics for a single app. + To delete a specific certificate, send a DELETE request to + ``/v2/certificates/$CERTIFICATE_ID``. + + :param certificate_id: A unique identifier for a certificate. Required. + :type certificate_id: str + :return: JSON object or None + :rtype: JSON or None + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 404 + response == { + "id": "str", # A short identifier corresponding to the HTTP status code + returned. For example, the ID for a response returning a 404 status code would + be "not_found.". Required. + "message": "str", # A message providing additional information about the + error, including details to help resolve it when possible. Required. + "request_id": "str" # Optional. Optionally, some endpoints may include a + request ID that should be provided when reporting bugs or opening support + tickets to help identify the issue. + } + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + 401: cast( + Type[HttpResponseError], + lambda response: ClientAuthenticationError(response=response), + ), + 429: HttpResponseError, + 500: HttpResponseError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[Optional[JSON]] = kwargs.pop("cls", None) + + _request = build_certificates_delete_request( + certificate_id=certificate_id, + headers=_headers, + params=_params, + ) + _request.url = self._client.format_url(_request.url) + + _stream = False + pipeline_response: PipelineResponse = ( + self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + ) + + response = pipeline_response.http_response + + if response.status_code not in [204, 404]: + if _stream: + response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore + raise HttpResponseError(response=response) + + deserialized = None + response_headers = {} + if response.status_code == 204: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.status_code == 404: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.content: + deserialized = response.json() + else: + deserialized = None + + if cls: + return cls(pipeline_response, deserialized, response_headers) # type: ignore + + return deserialized # type: ignore + + +class BalanceOperations: + """ + .. warning:: + **DO NOT** instantiate this class directly. + + Instead, you should access the following operations through + :class:`~pydo.GeneratedClient`'s + :attr:`balance` attribute. + """ + + def __init__(self, *args, **kwargs): + input_args = list(args) + self._client = input_args.pop(0) if input_args else kwargs.pop("client") + self._config = input_args.pop(0) if input_args else kwargs.pop("config") + self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer") + self._deserialize = ( + input_args.pop(0) if input_args else kwargs.pop("deserializer") + ) + + @distributed_trace + def get(self, **kwargs: Any) -> JSON: + # pylint: disable=line-too-long + """Get Customer Balance. + + To retrieve the balances on a customer's account, send a GET request to + ``/v2/customers/my/balance``. - :param app_id: The app ID. Required. - :type app_id: str - :keyword date: Optional day to query. Only the date component of the timestamp will be - considered. Default: yesterday. Default value is None. - :paramtype date: ~datetime.datetime :return: JSON object :rtype: JSON :raises ~azure.core.exceptions.HttpResponseError: @@ -114219,14 +122681,15 @@ def get_metrics_bandwidth_daily( # response body for status code(s): 200 response == { - "app_bandwidth_usage": [ - { - "app_id": "str", # Optional. The ID of the app. - "bandwidth_bytes": "str" # Optional. The used bandwidth - amount in bytes. - } - ], - "date": "2020-02-20 00:00:00" # Optional. The date for the metrics data. + "account_balance": "str", # Optional. Current balance of the customer's most + recent billing activity. Does not reflect ``month_to_date_usage``. + "generated_at": "2020-02-20 00:00:00", # Optional. The time at which + balances were most recently generated. + "month_to_date_balance": "str", # Optional. Balance as of the + ``generated_at`` time. This value includes the ``account_balance`` and + ``month_to_date_usage``. + "month_to_date_usage": "str" # Optional. Amount used in the current billing + period as of the ``generated_at`` time. } # response body for status code(s): 404 response == { @@ -114258,9 +122721,7 @@ def get_metrics_bandwidth_daily( cls: ClsType[JSON] = kwargs.pop("cls", None) - _request = build_apps_get_metrics_bandwidth_daily_request( - app_id=app_id, - date=date, + _request = build_balance_get_request( headers=_headers, params=_params, ) @@ -114319,20 +122780,34 @@ def get_metrics_bandwidth_daily( return cast(JSON, deserialized) # type: ignore - @overload - def list_metrics_bandwidth_daily( - self, body: JSON, *, content_type: str = "application/json", **kwargs: Any - ) -> JSON: + +class BillingHistoryOperations: + """ + .. warning:: + **DO NOT** instantiate this class directly. + + Instead, you should access the following operations through + :class:`~pydo.GeneratedClient`'s + :attr:`billing_history` attribute. + """ + + def __init__(self, *args, **kwargs): + input_args = list(args) + self._client = input_args.pop(0) if input_args else kwargs.pop("client") + self._config = input_args.pop(0) if input_args else kwargs.pop("config") + self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer") + self._deserialize = ( + input_args.pop(0) if input_args else kwargs.pop("deserializer") + ) + + @distributed_trace + def list(self, **kwargs: Any) -> JSON: # pylint: disable=line-too-long - """Retrieve Multiple Apps' Daily Bandwidth Metrics. + """List Billing History. - Retrieve daily bandwidth usage metrics for multiple apps. + To retrieve a list of all billing history entries, send a GET request to + ``/v2/customers/my/billing_history``. - :param body: Required. - :type body: JSON - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str :return: JSON object :rtype: JSON :raises ~azure.core.exceptions.HttpResponseError: @@ -114340,25 +122815,32 @@ def list_metrics_bandwidth_daily( Example: .. code-block:: python - # JSON input template you can fill out and use as your body input. - body = { - "app_ids": [ - "str" # A list of app IDs to query bandwidth metrics for. Required. - ], - "date": "2020-02-20 00:00:00" # Optional. Optional day to query. Only the - date component of the timestamp will be considered. Default: yesterday. - } - # response body for status code(s): 200 response == { - "app_bandwidth_usage": [ + "meta": { + "total": 0 # Optional. Number of objects returned by the request. + }, + "billing_history": [ { - "app_id": "str", # Optional. The ID of the app. - "bandwidth_bytes": "str" # Optional. The used bandwidth - amount in bytes. + "amount": "str", # Optional. Amount of the billing history + entry. + "date": "2020-02-20 00:00:00", # Optional. Time the billing + history entry occurred. + "description": "str", # Optional. Description of the billing + history entry. + "invoice_id": "str", # Optional. ID of the invoice + associated with the billing history entry, if applicable. + "invoice_uuid": "str", # Optional. UUID of the invoice + associated with the billing history entry, if applicable. + "type": "str" # Optional. Type of billing history entry. + Known values are: "ACHFailure", "Adjustment", "AttemptFailed", + "Chargeback", "Credit", "CreditExpiration", "Invoice", "Payment", + "Refund", and "Reversal". } ], - "date": "2020-02-20 00:00:00" # Optional. The date for the metrics data. + "links": { + "pages": {} + } } # response body for status code(s): 404 response == { @@ -114372,21 +122854,235 @@ def list_metrics_bandwidth_daily( tickets to help identify the issue. } """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + 401: cast( + Type[HttpResponseError], + lambda response: ClientAuthenticationError(response=response), + ), + 429: HttpResponseError, + 500: HttpResponseError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) - @overload - def list_metrics_bandwidth_daily( - self, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[JSON] = kwargs.pop("cls", None) + + _request = build_billing_history_list_request( + headers=_headers, + params=_params, + ) + _request.url = self._client.format_url(_request.url) + + _stream = False + pipeline_response: PipelineResponse = ( + self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + ) + + response = pipeline_response.http_response + + if response.status_code not in [200, 404]: + if _stream: + response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore + raise HttpResponseError(response=response) + + response_headers = {} + if response.status_code == 200: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.content: + deserialized = response.json() + else: + deserialized = None + + if response.status_code == 404: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.content: + deserialized = response.json() + else: + deserialized = None + + if cls: + return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore + + return cast(JSON, deserialized) # type: ignore + + +class InvoicesOperations: + """ + .. warning:: + **DO NOT** instantiate this class directly. + + Instead, you should access the following operations through + :class:`~pydo.GeneratedClient`'s + :attr:`invoices` attribute. + """ + + def __init__(self, *args, **kwargs): + input_args = list(args) + self._client = input_args.pop(0) if input_args else kwargs.pop("client") + self._config = input_args.pop(0) if input_args else kwargs.pop("config") + self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer") + self._deserialize = ( + input_args.pop(0) if input_args else kwargs.pop("deserializer") + ) + + @distributed_trace + def list(self, *, per_page: int = 20, page: int = 1, **kwargs: Any) -> JSON: + # pylint: disable=line-too-long + """List All Invoices. + + To retrieve a list of all invoices, send a GET request to ``/v2/customers/my/invoices``. + + :keyword per_page: Number of items returned per page. Default value is 20. + :paramtype per_page: int + :keyword page: Which 'page' of paginated results to return. Default value is 1. + :paramtype page: int + :return: JSON object + :rtype: JSON + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 200 + response == { + "meta": { + "total": 0 # Optional. Number of objects returned by the request. + }, + "invoice_preview": { + "amount": "str", # Optional. Total amount of the invoice, in USD. + This will reflect month-to-date usage in the invoice preview. + "invoice_id": "str", # Optional. ID of the invoice. Listed on the + face of the invoice PDF as the "Invoice number". + "invoice_period": "str", # Optional. Billing period of usage for + which the invoice is issued, in ``YYYY-MM`` format. + "invoice_uuid": "str", # Optional. The UUID of the invoice. The + canonical reference for the invoice. + "updated_at": "str" # Optional. Time the invoice was last updated. + This is only included with the invoice preview. + }, + "invoices": [ + { + "amount": "str", # Optional. Total amount of the invoice, in + USD. This will reflect month-to-date usage in the invoice preview. + "invoice_id": "str", # Optional. ID of the invoice. Listed + on the face of the invoice PDF as the "Invoice number". + "invoice_period": "str", # Optional. Billing period of usage + for which the invoice is issued, in ``YYYY-MM`` format. + "invoice_uuid": "str", # Optional. The UUID of the invoice. + The canonical reference for the invoice. + "updated_at": "str" # Optional. Time the invoice was last + updated. This is only included with the invoice preview. + } + ], + "links": { + "pages": {} + } + } + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + 401: cast( + Type[HttpResponseError], + lambda response: ClientAuthenticationError(response=response), + ), + 429: HttpResponseError, + 500: HttpResponseError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[JSON] = kwargs.pop("cls", None) + + _request = build_invoices_list_request( + per_page=per_page, + page=page, + headers=_headers, + params=_params, + ) + _request.url = self._client.format_url(_request.url) + + _stream = False + pipeline_response: PipelineResponse = ( + self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore + raise HttpResponseError(response=response) + + response_headers = {} + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.content: + deserialized = response.json() + else: + deserialized = None + + if cls: + return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore + + return cast(JSON, deserialized) # type: ignore + + @distributed_trace + def get_by_uuid( + self, invoice_uuid: str, *, per_page: int = 20, page: int = 1, **kwargs: Any ) -> JSON: # pylint: disable=line-too-long - """Retrieve Multiple Apps' Daily Bandwidth Metrics. + """Retrieve an Invoice by UUID. - Retrieve daily bandwidth usage metrics for multiple apps. + To retrieve the invoice items for an invoice, send a GET request to + ``/v2/customers/my/invoices/$INVOICE_UUID``. - :param body: Required. - :type body: IO[bytes] - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str + :param invoice_uuid: UUID of the invoice. Required. + :type invoice_uuid: str + :keyword per_page: Number of items returned per page. Default value is 20. + :paramtype per_page: int + :keyword page: Which 'page' of paginated results to return. Default value is 1. + :paramtype page: int :return: JSON object :rtype: JSON :raises ~azure.core.exceptions.HttpResponseError: @@ -114396,14 +123092,39 @@ def list_metrics_bandwidth_daily( # response body for status code(s): 200 response == { - "app_bandwidth_usage": [ + "meta": { + "total": 0 # Optional. Number of objects returned by the request. + }, + "invoice_items": [ { - "app_id": "str", # Optional. The ID of the app. - "bandwidth_bytes": "str" # Optional. The used bandwidth - amount in bytes. + "amount": "str", # Optional. Billed amount of this invoice + item. Billed in USD. + "description": "str", # Optional. Description of the invoice + item. + "duration": "str", # Optional. Duration of time this invoice + item was used and subsequently billed. + "duration_unit": "str", # Optional. Unit of time for + duration. + "end_time": "str", # Optional. Time the invoice item stopped + being billed for usage. + "group_description": "str", # Optional. Description of the + invoice item when it is a grouped set of usage, such as DOKS or + databases. + "product": "str", # Optional. Name of the product being + billed in the invoice item. + "project_name": "str", # Optional. Name of the DigitalOcean + Project this resource belongs to. + "resource_id": "str", # Optional. ID of the resource billing + in the invoice item if available. + "resource_uuid": "str", # Optional. UUID of the resource + billing in the invoice item if available. + "start_time": "str" # Optional. Time the invoice item began + to be billed for usage. } ], - "date": "2020-02-20 00:00:00" # Optional. The date for the metrics data. + "links": { + "pages": {} + } } # response body for status code(s): 404 response == { @@ -114417,45 +123138,215 @@ def list_metrics_bandwidth_daily( tickets to help identify the issue. } """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + 401: cast( + Type[HttpResponseError], + lambda response: ClientAuthenticationError(response=response), + ), + 429: HttpResponseError, + 500: HttpResponseError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[JSON] = kwargs.pop("cls", None) + + _request = build_invoices_get_by_uuid_request( + invoice_uuid=invoice_uuid, + per_page=per_page, + page=page, + headers=_headers, + params=_params, + ) + _request.url = self._client.format_url(_request.url) + + _stream = False + pipeline_response: PipelineResponse = ( + self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + ) + + response = pipeline_response.http_response + + if response.status_code not in [200, 404]: + if _stream: + response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore + raise HttpResponseError(response=response) + + response_headers = {} + if response.status_code == 200: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.content: + deserialized = response.json() + else: + deserialized = None + + if response.status_code == 404: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.content: + deserialized = response.json() + else: + deserialized = None + + if cls: + return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore + + return cast(JSON, deserialized) # type: ignore @distributed_trace - def list_metrics_bandwidth_daily( - self, body: Union[JSON, IO[bytes]], **kwargs: Any - ) -> JSON: + def get_csv_by_uuid(self, invoice_uuid: str, **kwargs: Any) -> Union[str, JSON]: # pylint: disable=line-too-long - """Retrieve Multiple Apps' Daily Bandwidth Metrics. + """Retrieve an Invoice CSV by UUID. - Retrieve daily bandwidth usage metrics for multiple apps. + To retrieve a CSV for an invoice, send a GET request to + ``/v2/customers/my/invoices/$INVOICE_UUID/csv``. - :param body: Is either a JSON type or a IO[bytes] type. Required. - :type body: JSON or IO[bytes] - :return: JSON object - :rtype: JSON + :param invoice_uuid: UUID of the invoice. Required. + :type invoice_uuid: str + :return: str or JSON object + :rtype: str or JSON :raises ~azure.core.exceptions.HttpResponseError: Example: .. code-block:: python - # JSON input template you can fill out and use as your body input. - body = { - "app_ids": [ - "str" # A list of app IDs to query bandwidth metrics for. Required. - ], - "date": "2020-02-20 00:00:00" # Optional. Optional day to query. Only the - date component of the timestamp will be considered. Default: yesterday. + # response body for status code(s): 404 + response == { + "id": "str", # A short identifier corresponding to the HTTP status code + returned. For example, the ID for a response returning a 404 status code would + be "not_found.". Required. + "message": "str", # A message providing additional information about the + error, including details to help resolve it when possible. Required. + "request_id": "str" # Optional. Optionally, some endpoints may include a + request ID that should be provided when reporting bugs or opening support + tickets to help identify the issue. } + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + 401: cast( + Type[HttpResponseError], + lambda response: ClientAuthenticationError(response=response), + ), + 429: HttpResponseError, + 500: HttpResponseError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[Union[str, JSON]] = kwargs.pop("cls", None) + + _request = build_invoices_get_csv_by_uuid_request( + invoice_uuid=invoice_uuid, + headers=_headers, + params=_params, + ) + _request.url = self._client.format_url(_request.url) + + _stream = False + pipeline_response: PipelineResponse = ( + self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + ) + + response = pipeline_response.http_response + + if response.status_code not in [200, 404]: + if _stream: + response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore + raise HttpResponseError(response=response) + + response_headers = {} + if response.status_code == 200: + response_headers["content-disposition"] = self._deserialize( + "str", response.headers.get("content-disposition") + ) + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.content: + deserialized = response.json() + else: + deserialized = None + + if response.status_code == 404: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.content: + deserialized = response.json() + else: + deserialized = None + + if cls: + return cls(pipeline_response, cast(Union[str, JSON], deserialized), response_headers) # type: ignore + + return cast(Union[str, JSON], deserialized) # type: ignore + + @distributed_trace + def get_pdf_by_uuid( + self, invoice_uuid: str, **kwargs: Any + ) -> Union[Iterator[bytes], JSON]: + # pylint: disable=line-too-long + """Retrieve an Invoice PDF by UUID. + + To retrieve a PDF for an invoice, send a GET request to + ``/v2/customers/my/invoices/$INVOICE_UUID/pdf``. + + :param invoice_uuid: UUID of the invoice. Required. + :type invoice_uuid: str + :return: Iterator[bytes] or JSON object + :rtype: Iterator[bytes] or JSON + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python - # response body for status code(s): 200 - response == { - "app_bandwidth_usage": [ - { - "app_id": "str", # Optional. The ID of the app. - "bandwidth_bytes": "str" # Optional. The used bandwidth - amount in bytes. - } - ], - "date": "2020-02-20 00:00:00" # Optional. The date for the metrics data. - } # response body for status code(s): 404 response == { "id": "str", # A short identifier corresponding to the HTTP status code @@ -114481,32 +123372,19 @@ def list_metrics_bandwidth_daily( } error_map.update(kwargs.pop("error_map", {}) or {}) - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _headers = kwargs.pop("headers", {}) or {} _params = kwargs.pop("params", {}) or {} - content_type: Optional[str] = kwargs.pop( - "content_type", _headers.pop("Content-Type", None) - ) - cls: ClsType[JSON] = kwargs.pop("cls", None) - - content_type = content_type or "application/json" - _json = None - _content = None - if isinstance(body, (IOBase, bytes)): - _content = body - else: - _json = body + cls: ClsType[Union[Iterator[bytes], JSON]] = kwargs.pop("cls", None) - _request = build_apps_list_metrics_bandwidth_daily_request( - content_type=content_type, - json=_json, - content=_content, + _request = build_invoices_get_pdf_by_uuid_request( + invoice_uuid=invoice_uuid, headers=_headers, params=_params, ) _request.url = self._client.format_url(_request.url) - _stream = False + _stream = True pipeline_response: PipelineResponse = ( self._client._pipeline.run( # pylint: disable=protected-access _request, stream=_stream, **kwargs @@ -114523,6 +123401,9 @@ def list_metrics_bandwidth_daily( response_headers = {} if response.status_code == 200: + response_headers["content-disposition"] = self._deserialize( + "str", response.headers.get("content-disposition") + ) response_headers["ratelimit-limit"] = self._deserialize( "int", response.headers.get("ratelimit-limit") ) @@ -114533,10 +123414,7 @@ def list_metrics_bandwidth_daily( "int", response.headers.get("ratelimit-reset") ) - if response.content: - deserialized = response.json() - else: - deserialized = None + deserialized = response.iter_bytes() if response.status_code == 404: response_headers["ratelimit-limit"] = self._deserialize( @@ -114549,25 +123427,23 @@ def list_metrics_bandwidth_daily( "int", response.headers.get("ratelimit-reset") ) - if response.content: - deserialized = response.json() - else: - deserialized = None + deserialized = response.iter_bytes() if cls: - return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore + return cls(pipeline_response, cast(Union[Iterator[bytes], JSON], deserialized), response_headers) # type: ignore - return cast(JSON, deserialized) # type: ignore + return cast(Union[Iterator[bytes], JSON], deserialized) # type: ignore @distributed_trace - def get_health(self, app_id: str, **kwargs: Any) -> JSON: + def get_summary_by_uuid(self, invoice_uuid: str, **kwargs: Any) -> JSON: # pylint: disable=line-too-long - """Retrieve App Health. + """Retrieve an Invoice Summary by UUID. - Retrieve information like health status, cpu and memory utilization of app components. + To retrieve a summary for an invoice, send a GET request to + ``/v2/customers/my/invoices/$INVOICE_UUID/summary``. - :param app_id: The app ID. Required. - :type app_id: str + :param invoice_uuid: UUID of the invoice. Required. + :type invoice_uuid: str :return: JSON object :rtype: JSON :raises ~azure.core.exceptions.HttpResponseError: @@ -114577,31 +123453,54 @@ def get_health(self, app_id: str, **kwargs: Any) -> JSON: # response body for status code(s): 200 response == { - "app_health": { - "components": [ + "amount": "str", # Optional. Total amount of the invoice, in USD. This will + reflect month-to-date usage in the invoice preview. + "billing_period": "str", # Optional. Billing period of usage for which the + invoice is issued, in ``YYYY-MM`` format. + "credits_and_adjustments": { + "amount": "str", # Optional. Total amount charged in USD. + "name": "str" # Optional. Name of the charge. + }, + "invoice_id": "str", # Optional. ID of the invoice. + "invoice_uuid": "str", # Optional. UUID of the invoice. + "overages": { + "amount": "str", # Optional. Total amount charged in USD. + "name": "str" # Optional. Name of the charge. + }, + "product_charges": { + "amount": "str", # Optional. Total amount charged. + "items": [ { - "cpu_usage_percent": 0.0, # Optional. - "memory_usage_percent": 0.0, # Optional. - "name": "str", # Optional. - "replicas_desired": 0, # Optional. - "replicas_ready": 0, # Optional. - "state": "UNKNOWN" # Optional. Default value is - "UNKNOWN". Known values are: "UNKNOWN", "HEALTHY", and "UNHEALTHY". + "amount": "str", # Optional. Amount of the charge. + "count": "str", # Optional. Number of times the + charge was applied. + "name": "str" # Optional. Description of the charge. } ], - "functions_components": [ - { - "functions_component_health_metrics": [ - { - "metric_label": "str", # Optional. - "metric_value": 0.0, # Optional. - "time_window": "str" # Optional. - } - ], - "name": "str" # Optional. - } - ] - } + "name": "str" # Optional. Description of usage charges. + }, + "taxes": { + "amount": "str", # Optional. Total amount charged in USD. + "name": "str" # Optional. Name of the charge. + }, + "user_billing_address": { + "address_line1": "str", # Optional. Street address line 1. + "address_line2": "str", # Optional. Street address line 2. + "city": "str", # Optional. City. + "country_iso2_code": "str", # Optional. Country (ISO2) code. + "created_at": "str", # Optional. Timestamp billing address was + created. + "postal_code": "str", # Optional. Postal code. + "region": "str", # Optional. Region. + "updated_at": "str" # Optional. Timestamp billing address was + updated. + }, + "user_company": "str", # Optional. Company of the DigitalOcean customer + being invoiced, if set. + "user_email": "str", # Optional. Email of the DigitalOcean customer being + invoiced. + "user_name": "str" # Optional. Name of the DigitalOcean customer being + invoiced. } # response body for status code(s): 404 response == { @@ -114633,8 +123532,8 @@ def get_health(self, app_id: str, **kwargs: Any) -> JSON: cls: ClsType[JSON] = kwargs.pop("cls", None) - _request = build_apps_get_health_request( - app_id=app_id, + _request = build_invoices_get_summary_by_uuid_request( + invoice_uuid=invoice_uuid, headers=_headers, params=_params, ) @@ -114694,14 +123593,14 @@ def get_health(self, app_id: str, **kwargs: Any) -> JSON: return cast(JSON, deserialized) # type: ignore -class CdnOperations: +class BillingInsightsOperations: """ .. warning:: **DO NOT** instantiate this class directly. Instead, you should access the following operations through :class:`~pydo.GeneratedClient`'s - :attr:`cdn` attribute. + :attr:`billing_insights` attribute. """ def __init__(self, *args, **kwargs): @@ -114714,15 +123613,33 @@ def __init__(self, *args, **kwargs): ) @distributed_trace - def list_endpoints( - self, *, per_page: int = 20, page: int = 1, **kwargs: Any + def list( + self, + account_urn: str, + start_date: datetime.date, + end_date: datetime.date, + *, + per_page: int = 20, + page: int = 1, + **kwargs: Any, ) -> JSON: # pylint: disable=line-too-long - """List All CDN Endpoints. + """List Billing Insights. - To list all of the CDN endpoints available on your account, send a GET request to - ``/v2/cdn/endpoints``. + This endpoint returns day-over-day changes in billing resource usage based on nightly invoice + items, including total amount, region, SKU, and description for a specified date range. It is + important to note that the daily resource usage may not reflect month-end billing totals when + totaled for a given month as nightly invoice item estimates do not necessarily encompass all + invoicing factors for the entire month. + :param account_urn: URN of the customer account, can be a team (do:team:uuid) or an + organization (do:teamgroup:uuid). Required. + :type account_urn: str + :param start_date: Start date for billing insights in YYYY-MM-DD format. Required. + :type start_date: ~datetime.date + :param end_date: End date for billing insights in YYYY-MM-DD format. Must be within 31 days of + start_date. Required. + :type end_date: ~datetime.date :keyword per_page: Number of items returned per page. Default value is 20. :paramtype per_page: int :keyword page: Which 'page' of paginated results to return. Default value is 1. @@ -114736,406 +123653,29 @@ def list_endpoints( # response body for status code(s): 200 response == { - "meta": { - "total": 0 # Optional. Number of objects returned by the request. - }, - "endpoints": [ + "current_page": 0, # Current page number. Required. + "data_points": [ { - "origin": "str", # The fully qualified domain name (FQDN) - for the origin server which provides the content for the CDN. This is - currently restricted to a Space. Required. - "certificate_id": "str", # Optional. The ID of a - DigitalOcean managed TLS certificate used for SSL when a custom subdomain - is provided. - "created_at": "2020-02-20 00:00:00", # Optional. A time - value given in ISO8601 combined date and time format that represents when - the CDN endpoint was created. - "custom_domain": "str", # Optional. The fully qualified - domain name (FQDN) of the custom subdomain used with the CDN endpoint. - "endpoint": "str", # Optional. The fully qualified domain - name (FQDN) from which the CDN-backed content is served. - "id": "str", # Optional. A unique ID that can be used to - identify and reference a CDN endpoint. - "ttl": 3600 # Optional. Default value is 3600. The amount of - time the content is cached by the CDN's edge servers in seconds. TTL must - be one of 60, 600, 3600, 86400, or 604800. Defaults to 3600 (one hour) - when excluded. Known values are: 60, 600, 3600, 86400, and 604800. + "description": "str", # Optional. Description of the billed + resource or service as shown on an invoice item. + "group_description": "str", # Optional. Optional invoice + item group name of the billed resource or service, blank when not part an + invoice item group. + "region": "str", # Optional. Region where the usage + occurred. + "sku": "str", # Optional. Unique SKU identifier for the + billed resource. + "start_date": "2020-02-20", # Optional. Start date of the + billing data point in YYYY-MM-DD format. + "total_amount": "str", # Optional. Total amount for this + data point in USD. + "usage_team_urn": "str" # Optional. URN of the team that + incurred the usage. } ], - "links": { - "pages": {} - } - } - """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - 401: cast( - Type[HttpResponseError], - lambda response: ClientAuthenticationError(response=response), - ), - 429: HttpResponseError, - 500: HttpResponseError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = kwargs.pop("headers", {}) or {} - _params = kwargs.pop("params", {}) or {} - - cls: ClsType[JSON] = kwargs.pop("cls", None) - - _request = build_cdn_list_endpoints_request( - per_page=per_page, - page=page, - headers=_headers, - params=_params, - ) - _request.url = self._client.format_url(_request.url) - - _stream = False - pipeline_response: PipelineResponse = ( - self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - ) - - response = pipeline_response.http_response - - if response.status_code not in [200]: - if _stream: - response.read() # Load the body in memory and close the socket - map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore - raise HttpResponseError(response=response) - - response_headers = {} - response_headers["ratelimit-limit"] = self._deserialize( - "int", response.headers.get("ratelimit-limit") - ) - response_headers["ratelimit-remaining"] = self._deserialize( - "int", response.headers.get("ratelimit-remaining") - ) - response_headers["ratelimit-reset"] = self._deserialize( - "int", response.headers.get("ratelimit-reset") - ) - - if response.content: - deserialized = response.json() - else: - deserialized = None - - if cls: - return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore - - return cast(JSON, deserialized) # type: ignore - - @overload - def create_endpoint( - self, body: JSON, *, content_type: str = "application/json", **kwargs: Any - ) -> JSON: - # pylint: disable=line-too-long - """Create a New CDN Endpoint. - - To create a new CDN endpoint, send a POST request to ``/v2/cdn/endpoints``. The - origin attribute must be set to the fully qualified domain name (FQDN) of a - DigitalOcean Space. Optionally, the TTL may be configured by setting the ``ttl`` - attribute. - - A custom subdomain may be configured by specifying the ``custom_domain`` and - ``certificate_id`` attributes. - - :param body: Required. - :type body: JSON - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: JSON object - :rtype: JSON - :raises ~azure.core.exceptions.HttpResponseError: - - Example: - .. code-block:: python - - # JSON input template you can fill out and use as your body input. - body = { - "origin": "str", # The fully qualified domain name (FQDN) for the origin - server which provides the content for the CDN. This is currently restricted to a - Space. Required. - "certificate_id": "str", # Optional. The ID of a DigitalOcean managed TLS - certificate used for SSL when a custom subdomain is provided. - "created_at": "2020-02-20 00:00:00", # Optional. A time value given in - ISO8601 combined date and time format that represents when the CDN endpoint was - created. - "custom_domain": "str", # Optional. The fully qualified domain name (FQDN) - of the custom subdomain used with the CDN endpoint. - "endpoint": "str", # Optional. The fully qualified domain name (FQDN) from - which the CDN-backed content is served. - "id": "str", # Optional. A unique ID that can be used to identify and - reference a CDN endpoint. - "ttl": 3600 # Optional. Default value is 3600. The amount of time the - content is cached by the CDN's edge servers in seconds. TTL must be one of 60, - 600, 3600, 86400, or 604800. Defaults to 3600 (one hour) when excluded. Known - values are: 60, 600, 3600, 86400, and 604800. - } - - # response body for status code(s): 201 - response == { - "endpoint": { - "origin": "str", # The fully qualified domain name (FQDN) for the - origin server which provides the content for the CDN. This is currently - restricted to a Space. Required. - "certificate_id": "str", # Optional. The ID of a DigitalOcean - managed TLS certificate used for SSL when a custom subdomain is provided. - "created_at": "2020-02-20 00:00:00", # Optional. A time value given - in ISO8601 combined date and time format that represents when the CDN - endpoint was created. - "custom_domain": "str", # Optional. The fully qualified domain name - (FQDN) of the custom subdomain used with the CDN endpoint. - "endpoint": "str", # Optional. The fully qualified domain name - (FQDN) from which the CDN-backed content is served. - "id": "str", # Optional. A unique ID that can be used to identify - and reference a CDN endpoint. - "ttl": 3600 # Optional. Default value is 3600. The amount of time - the content is cached by the CDN's edge servers in seconds. TTL must be one - of 60, 600, 3600, 86400, or 604800. Defaults to 3600 (one hour) when - excluded. Known values are: 60, 600, 3600, 86400, and 604800. - } - } - """ - - @overload - def create_endpoint( - self, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any - ) -> JSON: - # pylint: disable=line-too-long - """Create a New CDN Endpoint. - - To create a new CDN endpoint, send a POST request to ``/v2/cdn/endpoints``. The - origin attribute must be set to the fully qualified domain name (FQDN) of a - DigitalOcean Space. Optionally, the TTL may be configured by setting the ``ttl`` - attribute. - - A custom subdomain may be configured by specifying the ``custom_domain`` and - ``certificate_id`` attributes. - - :param body: Required. - :type body: IO[bytes] - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str - :return: JSON object - :rtype: JSON - :raises ~azure.core.exceptions.HttpResponseError: - - Example: - .. code-block:: python - - # response body for status code(s): 201 - response == { - "endpoint": { - "origin": "str", # The fully qualified domain name (FQDN) for the - origin server which provides the content for the CDN. This is currently - restricted to a Space. Required. - "certificate_id": "str", # Optional. The ID of a DigitalOcean - managed TLS certificate used for SSL when a custom subdomain is provided. - "created_at": "2020-02-20 00:00:00", # Optional. A time value given - in ISO8601 combined date and time format that represents when the CDN - endpoint was created. - "custom_domain": "str", # Optional. The fully qualified domain name - (FQDN) of the custom subdomain used with the CDN endpoint. - "endpoint": "str", # Optional. The fully qualified domain name - (FQDN) from which the CDN-backed content is served. - "id": "str", # Optional. A unique ID that can be used to identify - and reference a CDN endpoint. - "ttl": 3600 # Optional. Default value is 3600. The amount of time - the content is cached by the CDN's edge servers in seconds. TTL must be one - of 60, 600, 3600, 86400, or 604800. Defaults to 3600 (one hour) when - excluded. Known values are: 60, 600, 3600, 86400, and 604800. - } - } - """ - - @distributed_trace - def create_endpoint(self, body: Union[JSON, IO[bytes]], **kwargs: Any) -> JSON: - # pylint: disable=line-too-long - """Create a New CDN Endpoint. - - To create a new CDN endpoint, send a POST request to ``/v2/cdn/endpoints``. The - origin attribute must be set to the fully qualified domain name (FQDN) of a - DigitalOcean Space. Optionally, the TTL may be configured by setting the ``ttl`` - attribute. - - A custom subdomain may be configured by specifying the ``custom_domain`` and - ``certificate_id`` attributes. - - :param body: Is either a JSON type or a IO[bytes] type. Required. - :type body: JSON or IO[bytes] - :return: JSON object - :rtype: JSON - :raises ~azure.core.exceptions.HttpResponseError: - - Example: - .. code-block:: python - - # JSON input template you can fill out and use as your body input. - body = { - "origin": "str", # The fully qualified domain name (FQDN) for the origin - server which provides the content for the CDN. This is currently restricted to a - Space. Required. - "certificate_id": "str", # Optional. The ID of a DigitalOcean managed TLS - certificate used for SSL when a custom subdomain is provided. - "created_at": "2020-02-20 00:00:00", # Optional. A time value given in - ISO8601 combined date and time format that represents when the CDN endpoint was - created. - "custom_domain": "str", # Optional. The fully qualified domain name (FQDN) - of the custom subdomain used with the CDN endpoint. - "endpoint": "str", # Optional. The fully qualified domain name (FQDN) from - which the CDN-backed content is served. - "id": "str", # Optional. A unique ID that can be used to identify and - reference a CDN endpoint. - "ttl": 3600 # Optional. Default value is 3600. The amount of time the - content is cached by the CDN's edge servers in seconds. TTL must be one of 60, - 600, 3600, 86400, or 604800. Defaults to 3600 (one hour) when excluded. Known - values are: 60, 600, 3600, 86400, and 604800. - } - - # response body for status code(s): 201 - response == { - "endpoint": { - "origin": "str", # The fully qualified domain name (FQDN) for the - origin server which provides the content for the CDN. This is currently - restricted to a Space. Required. - "certificate_id": "str", # Optional. The ID of a DigitalOcean - managed TLS certificate used for SSL when a custom subdomain is provided. - "created_at": "2020-02-20 00:00:00", # Optional. A time value given - in ISO8601 combined date and time format that represents when the CDN - endpoint was created. - "custom_domain": "str", # Optional. The fully qualified domain name - (FQDN) of the custom subdomain used with the CDN endpoint. - "endpoint": "str", # Optional. The fully qualified domain name - (FQDN) from which the CDN-backed content is served. - "id": "str", # Optional. A unique ID that can be used to identify - and reference a CDN endpoint. - "ttl": 3600 # Optional. Default value is 3600. The amount of time - the content is cached by the CDN's edge servers in seconds. TTL must be one - of 60, 600, 3600, 86400, or 604800. Defaults to 3600 (one hour) when - excluded. Known values are: 60, 600, 3600, 86400, and 604800. - } - } - """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - 401: cast( - Type[HttpResponseError], - lambda response: ClientAuthenticationError(response=response), - ), - 429: HttpResponseError, - 500: HttpResponseError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = kwargs.pop("params", {}) or {} - - content_type: Optional[str] = kwargs.pop( - "content_type", _headers.pop("Content-Type", None) - ) - cls: ClsType[JSON] = kwargs.pop("cls", None) - - content_type = content_type or "application/json" - _json = None - _content = None - if isinstance(body, (IOBase, bytes)): - _content = body - else: - _json = body - - _request = build_cdn_create_endpoint_request( - content_type=content_type, - json=_json, - content=_content, - headers=_headers, - params=_params, - ) - _request.url = self._client.format_url(_request.url) - - _stream = False - pipeline_response: PipelineResponse = ( - self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - ) - - response = pipeline_response.http_response - - if response.status_code not in [201]: - if _stream: - response.read() # Load the body in memory and close the socket - map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore - raise HttpResponseError(response=response) - - response_headers = {} - response_headers["ratelimit-limit"] = self._deserialize( - "int", response.headers.get("ratelimit-limit") - ) - response_headers["ratelimit-remaining"] = self._deserialize( - "int", response.headers.get("ratelimit-remaining") - ) - response_headers["ratelimit-reset"] = self._deserialize( - "int", response.headers.get("ratelimit-reset") - ) - - if response.content: - deserialized = response.json() - else: - deserialized = None - - if cls: - return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore - - return cast(JSON, deserialized) # type: ignore - - @distributed_trace - def get_endpoint(self, cdn_id: str, **kwargs: Any) -> JSON: - # pylint: disable=line-too-long - """Retrieve an Existing CDN Endpoint. - - To show information about an existing CDN endpoint, send a GET request to - ``/v2/cdn/endpoints/$ENDPOINT_ID``. - - :param cdn_id: A unique identifier for a CDN endpoint. Required. - :type cdn_id: str - :return: JSON object - :rtype: JSON - :raises ~azure.core.exceptions.HttpResponseError: - - Example: - .. code-block:: python - - # response body for status code(s): 200 - response == { - "endpoint": { - "origin": "str", # The fully qualified domain name (FQDN) for the - origin server which provides the content for the CDN. This is currently - restricted to a Space. Required. - "certificate_id": "str", # Optional. The ID of a DigitalOcean - managed TLS certificate used for SSL when a custom subdomain is provided. - "created_at": "2020-02-20 00:00:00", # Optional. A time value given - in ISO8601 combined date and time format that represents when the CDN - endpoint was created. - "custom_domain": "str", # Optional. The fully qualified domain name - (FQDN) of the custom subdomain used with the CDN endpoint. - "endpoint": "str", # Optional. The fully qualified domain name - (FQDN) from which the CDN-backed content is served. - "id": "str", # Optional. A unique ID that can be used to identify - and reference a CDN endpoint. - "ttl": 3600 # Optional. Default value is 3600. The amount of time - the content is cached by the CDN's edge servers in seconds. TTL must be one - of 60, 600, 3600, 86400, or 604800. Defaults to 3600 (one hour) when - excluded. Known values are: 60, 600, 3600, 86400, and 604800. - } + "total_items": 0, # Total number of items available across all pages. + Required. + "total_pages": 0 # Total number of pages available. Required. } # response body for status code(s): 404 response == { @@ -115167,8 +123707,12 @@ def get_endpoint(self, cdn_id: str, **kwargs: Any) -> JSON: cls: ClsType[JSON] = kwargs.pop("cls", None) - _request = build_cdn_get_endpoint_request( - cdn_id=cdn_id, + _request = build_billing_insights_list_request( + account_urn=account_urn, + start_date=start_date, + end_date=end_date, + per_page=per_page, + page=page, headers=_headers, params=_params, ) @@ -115227,165 +123771,35 @@ def get_endpoint(self, cdn_id: str, **kwargs: Any) -> JSON: return cast(JSON, deserialized) # type: ignore - @overload - def update_endpoints( - self, - cdn_id: str, - body: JSON, - *, - content_type: str = "application/json", - **kwargs: Any, - ) -> JSON: - # pylint: disable=line-too-long - """Update a CDN Endpoint. - - To update the TTL, certificate ID, or the FQDN of the custom subdomain for - an existing CDN endpoint, send a PUT request to - ``/v2/cdn/endpoints/$ENDPOINT_ID``. - - :param cdn_id: A unique identifier for a CDN endpoint. Required. - :type cdn_id: str - :param body: Required. - :type body: JSON - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: JSON object - :rtype: JSON - :raises ~azure.core.exceptions.HttpResponseError: - - Example: - .. code-block:: python - - # JSON input template you can fill out and use as your body input. - body = { - "certificate_id": "str", # Optional. The ID of a DigitalOcean managed TLS - certificate used for SSL when a custom subdomain is provided. - "custom_domain": "str", # Optional. The fully qualified domain name (FQDN) - of the custom subdomain used with the CDN endpoint. - "ttl": 3600 # Optional. Default value is 3600. The amount of time the - content is cached by the CDN's edge servers in seconds. TTL must be one of 60, - 600, 3600, 86400, or 604800. Defaults to 3600 (one hour) when excluded. Known - values are: 60, 600, 3600, 86400, and 604800. - } - - # response body for status code(s): 200 - response == { - "endpoint": { - "origin": "str", # The fully qualified domain name (FQDN) for the - origin server which provides the content for the CDN. This is currently - restricted to a Space. Required. - "certificate_id": "str", # Optional. The ID of a DigitalOcean - managed TLS certificate used for SSL when a custom subdomain is provided. - "created_at": "2020-02-20 00:00:00", # Optional. A time value given - in ISO8601 combined date and time format that represents when the CDN - endpoint was created. - "custom_domain": "str", # Optional. The fully qualified domain name - (FQDN) of the custom subdomain used with the CDN endpoint. - "endpoint": "str", # Optional. The fully qualified domain name - (FQDN) from which the CDN-backed content is served. - "id": "str", # Optional. A unique ID that can be used to identify - and reference a CDN endpoint. - "ttl": 3600 # Optional. Default value is 3600. The amount of time - the content is cached by the CDN's edge servers in seconds. TTL must be one - of 60, 600, 3600, 86400, or 604800. Defaults to 3600 (one hour) when - excluded. Known values are: 60, 600, 3600, 86400, and 604800. - } - } - # response body for status code(s): 404 - response == { - "id": "str", # A short identifier corresponding to the HTTP status code - returned. For example, the ID for a response returning a 404 status code would - be "not_found.". Required. - "message": "str", # A message providing additional information about the - error, including details to help resolve it when possible. Required. - "request_id": "str" # Optional. Optionally, some endpoints may include a - request ID that should be provided when reporting bugs or opening support - tickets to help identify the issue. - } - """ - - @overload - def update_endpoints( - self, - cdn_id: str, - body: IO[bytes], - *, - content_type: str = "application/json", - **kwargs: Any, - ) -> JSON: - # pylint: disable=line-too-long - """Update a CDN Endpoint. - - To update the TTL, certificate ID, or the FQDN of the custom subdomain for - an existing CDN endpoint, send a PUT request to - ``/v2/cdn/endpoints/$ENDPOINT_ID``. - :param cdn_id: A unique identifier for a CDN endpoint. Required. - :type cdn_id: str - :param body: Required. - :type body: IO[bytes] - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str - :return: JSON object - :rtype: JSON - :raises ~azure.core.exceptions.HttpResponseError: +class DatabasesOperations: # pylint: disable=too-many-public-methods + """ + .. warning:: + **DO NOT** instantiate this class directly. - Example: - .. code-block:: python + Instead, you should access the following operations through + :class:`~pydo.GeneratedClient`'s + :attr:`databases` attribute. + """ - # response body for status code(s): 200 - response == { - "endpoint": { - "origin": "str", # The fully qualified domain name (FQDN) for the - origin server which provides the content for the CDN. This is currently - restricted to a Space. Required. - "certificate_id": "str", # Optional. The ID of a DigitalOcean - managed TLS certificate used for SSL when a custom subdomain is provided. - "created_at": "2020-02-20 00:00:00", # Optional. A time value given - in ISO8601 combined date and time format that represents when the CDN - endpoint was created. - "custom_domain": "str", # Optional. The fully qualified domain name - (FQDN) of the custom subdomain used with the CDN endpoint. - "endpoint": "str", # Optional. The fully qualified domain name - (FQDN) from which the CDN-backed content is served. - "id": "str", # Optional. A unique ID that can be used to identify - and reference a CDN endpoint. - "ttl": 3600 # Optional. Default value is 3600. The amount of time - the content is cached by the CDN's edge servers in seconds. TTL must be one - of 60, 600, 3600, 86400, or 604800. Defaults to 3600 (one hour) when - excluded. Known values are: 60, 600, 3600, 86400, and 604800. - } - } - # response body for status code(s): 404 - response == { - "id": "str", # A short identifier corresponding to the HTTP status code - returned. For example, the ID for a response returning a 404 status code would - be "not_found.". Required. - "message": "str", # A message providing additional information about the - error, including details to help resolve it when possible. Required. - "request_id": "str" # Optional. Optionally, some endpoints may include a - request ID that should be provided when reporting bugs or opening support - tickets to help identify the issue. - } - """ + def __init__(self, *args, **kwargs): + input_args = list(args) + self._client = input_args.pop(0) if input_args else kwargs.pop("client") + self._config = input_args.pop(0) if input_args else kwargs.pop("config") + self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer") + self._deserialize = ( + input_args.pop(0) if input_args else kwargs.pop("deserializer") + ) @distributed_trace - def update_endpoints( - self, cdn_id: str, body: Union[JSON, IO[bytes]], **kwargs: Any - ) -> JSON: + def list_options(self, **kwargs: Any) -> JSON: # pylint: disable=line-too-long - """Update a CDN Endpoint. + """List Database Options. - To update the TTL, certificate ID, or the FQDN of the custom subdomain for - an existing CDN endpoint, send a PUT request to - ``/v2/cdn/endpoints/$ENDPOINT_ID``. + To list all of the options available for the offered database engines, send a GET request to + ``/v2/databases/options``. + The result will be a JSON object with an ``options`` key. - :param cdn_id: A unique identifier for a CDN endpoint. Required. - :type cdn_id: str - :param body: Is either a JSON type or a IO[bytes] type. Required. - :type body: JSON or IO[bytes] :return: JSON object :rtype: JSON :raises ~azure.core.exceptions.HttpResponseError: @@ -115393,39 +123807,270 @@ def update_endpoints( Example: .. code-block:: python - # JSON input template you can fill out and use as your body input. - body = { - "certificate_id": "str", # Optional. The ID of a DigitalOcean managed TLS - certificate used for SSL when a custom subdomain is provided. - "custom_domain": "str", # Optional. The fully qualified domain name (FQDN) - of the custom subdomain used with the CDN endpoint. - "ttl": 3600 # Optional. Default value is 3600. The amount of time the - content is cached by the CDN's edge servers in seconds. TTL must be one of 60, - 600, 3600, 86400, or 604800. Defaults to 3600 (one hour) when excluded. Known - values are: 60, 600, 3600, 86400, and 604800. - } - # response body for status code(s): 200 response == { - "endpoint": { - "origin": "str", # The fully qualified domain name (FQDN) for the - origin server which provides the content for the CDN. This is currently - restricted to a Space. Required. - "certificate_id": "str", # Optional. The ID of a DigitalOcean - managed TLS certificate used for SSL when a custom subdomain is provided. - "created_at": "2020-02-20 00:00:00", # Optional. A time value given - in ISO8601 combined date and time format that represents when the CDN - endpoint was created. - "custom_domain": "str", # Optional. The fully qualified domain name - (FQDN) of the custom subdomain used with the CDN endpoint. - "endpoint": "str", # Optional. The fully qualified domain name - (FQDN) from which the CDN-backed content is served. - "id": "str", # Optional. A unique ID that can be used to identify - and reference a CDN endpoint. - "ttl": 3600 # Optional. Default value is 3600. The amount of time - the content is cached by the CDN's edge servers in seconds. TTL must be one - of 60, 600, 3600, 86400, or 604800. Defaults to 3600 (one hour) when - excluded. Known values are: 60, 600, 3600, 86400, and 604800. + "options": { + "kafka": { + "layouts": [ + { + "num_nodes": 0, # Optional. An array of + objects, each indicating the node sizes (otherwise referred to as + slugs) that are available with various numbers of nodes in the + database cluster. Each slugs denotes the node's identifier, CPU, + and RAM (in that order). + "sizes": [ + "str" # Optional. An array of + objects containing the slugs available with various node + counts. + ] + } + ], + "regions": [ + "str" # Optional. An array of strings containing the + names of available regions. + ], + "versions": [ + "str" # Optional. An array of strings containing the + names of available regions. + ] + }, + "mongodb": { + "layouts": [ + { + "num_nodes": 0, # Optional. An array of + objects, each indicating the node sizes (otherwise referred to as + slugs) that are available with various numbers of nodes in the + database cluster. Each slugs denotes the node's identifier, CPU, + and RAM (in that order). + "sizes": [ + "str" # Optional. An array of + objects containing the slugs available with various node + counts. + ] + } + ], + "regions": [ + "str" # Optional. An array of strings containing the + names of available regions. + ], + "versions": [ + "str" # Optional. An array of strings containing the + names of available regions. + ] + }, + "mysql": { + "layouts": [ + { + "num_nodes": 0, # Optional. An array of + objects, each indicating the node sizes (otherwise referred to as + slugs) that are available with various numbers of nodes in the + database cluster. Each slugs denotes the node's identifier, CPU, + and RAM (in that order). + "sizes": [ + "str" # Optional. An array of + objects containing the slugs available with various node + counts. + ] + } + ], + "regions": [ + "str" # Optional. An array of strings containing the + names of available regions. + ], + "versions": [ + "str" # Optional. An array of strings containing the + names of available regions. + ] + }, + "opensearch": { + "layouts": [ + { + "num_nodes": 0, # Optional. An array of + objects, each indicating the node sizes (otherwise referred to as + slugs) that are available with various numbers of nodes in the + database cluster. Each slugs denotes the node's identifier, CPU, + and RAM (in that order). + "sizes": [ + "str" # Optional. An array of + objects containing the slugs available with various node + counts. + ] + } + ], + "regions": [ + "str" # Optional. An array of strings containing the + names of available regions. + ], + "versions": [ + "str" # Optional. An array of strings containing the + names of available regions. + ] + }, + "pg": { + "layouts": [ + { + "num_nodes": 0, # Optional. An array of + objects, each indicating the node sizes (otherwise referred to as + slugs) that are available with various numbers of nodes in the + database cluster. Each slugs denotes the node's identifier, CPU, + and RAM (in that order). + "sizes": [ + "str" # Optional. An array of + objects containing the slugs available with various node + counts. + ] + } + ], + "regions": [ + "str" # Optional. An array of strings containing the + names of available regions. + ], + "versions": [ + "str" # Optional. An array of strings containing the + names of available regions. + ] + }, + "redis": { + "layouts": [ + { + "num_nodes": 0, # Optional. An array of + objects, each indicating the node sizes (otherwise referred to as + slugs) that are available with various numbers of nodes in the + database cluster. Each slugs denotes the node's identifier, CPU, + and RAM (in that order). + "sizes": [ + "str" # Optional. An array of + objects containing the slugs available with various node + counts. + ] + } + ], + "regions": [ + "str" # Optional. An array of strings containing the + names of available regions. + ], + "versions": [ + "str" # Optional. An array of strings containing the + names of available regions. + ] + }, + "valkey": { + "layouts": [ + { + "num_nodes": 0, # Optional. An array of + objects, each indicating the node sizes (otherwise referred to as + slugs) that are available with various numbers of nodes in the + database cluster. Each slugs denotes the node's identifier, CPU, + and RAM (in that order). + "sizes": [ + "str" # Optional. An array of + objects containing the slugs available with various node + counts. + ] + } + ], + "regions": [ + "str" # Optional. An array of strings containing the + names of available regions. + ], + "versions": [ + "str" # Optional. An array of strings containing the + names of available regions. + ] + } + }, + "version_availability": { + "kafka": [ + { + "end_of_availability": "str", # Optional. A + timestamp referring to the date when the particular version will no + longer be available for creating new clusters. If null, the version + does not have an end of availability timeline. + "end_of_life": "str", # Optional. A timestamp + referring to the date when the particular version will no longer be + supported. If null, the version does not have an end of life + timeline. + "version": "str" # Optional. The engine version. + } + ], + "mongodb": [ + { + "end_of_availability": "str", # Optional. A + timestamp referring to the date when the particular version will no + longer be available for creating new clusters. If null, the version + does not have an end of availability timeline. + "end_of_life": "str", # Optional. A timestamp + referring to the date when the particular version will no longer be + supported. If null, the version does not have an end of life + timeline. + "version": "str" # Optional. The engine version. + } + ], + "mysql": [ + { + "end_of_availability": "str", # Optional. A + timestamp referring to the date when the particular version will no + longer be available for creating new clusters. If null, the version + does not have an end of availability timeline. + "end_of_life": "str", # Optional. A timestamp + referring to the date when the particular version will no longer be + supported. If null, the version does not have an end of life + timeline. + "version": "str" # Optional. The engine version. + } + ], + "opensearch": [ + { + "end_of_availability": "str", # Optional. A + timestamp referring to the date when the particular version will no + longer be available for creating new clusters. If null, the version + does not have an end of availability timeline. + "end_of_life": "str", # Optional. A timestamp + referring to the date when the particular version will no longer be + supported. If null, the version does not have an end of life + timeline. + "version": "str" # Optional. The engine version. + } + ], + "pg": [ + { + "end_of_availability": "str", # Optional. A + timestamp referring to the date when the particular version will no + longer be available for creating new clusters. If null, the version + does not have an end of availability timeline. + "end_of_life": "str", # Optional. A timestamp + referring to the date when the particular version will no longer be + supported. If null, the version does not have an end of life + timeline. + "version": "str" # Optional. The engine version. + } + ], + "redis": [ + { + "end_of_availability": "str", # Optional. A + timestamp referring to the date when the particular version will no + longer be available for creating new clusters. If null, the version + does not have an end of availability timeline. + "end_of_life": "str", # Optional. A timestamp + referring to the date when the particular version will no longer be + supported. If null, the version does not have an end of life + timeline. + "version": "str" # Optional. The engine version. + } + ], + "valkey": [ + { + "end_of_availability": "str", # Optional. A + timestamp referring to the date when the particular version will no + longer be available for creating new clusters. If null, the version + does not have an end of availability timeline. + "end_of_life": "str", # Optional. A timestamp + referring to the date when the particular version will no longer be + supported. If null, the version does not have an end of life + timeline. + "version": "str" # Optional. The engine version. + } + ] } } # response body for status code(s): 404 @@ -115453,27 +124098,12 @@ def update_endpoints( } error_map.update(kwargs.pop("error_map", {}) or {}) - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _headers = kwargs.pop("headers", {}) or {} _params = kwargs.pop("params", {}) or {} - content_type: Optional[str] = kwargs.pop( - "content_type", _headers.pop("Content-Type", None) - ) cls: ClsType[JSON] = kwargs.pop("cls", None) - content_type = content_type or "application/json" - _json = None - _content = None - if isinstance(body, (IOBase, bytes)): - _content = body - else: - _json = body - - _request = build_cdn_update_endpoints_request( - cdn_id=cdn_id, - content_type=content_type, - json=_json, - content=_content, + _request = build_databases_list_options_request( headers=_headers, params=_params, ) @@ -115533,25 +124163,358 @@ def update_endpoints( return cast(JSON, deserialized) # type: ignore @distributed_trace - def delete_endpoint(self, cdn_id: str, **kwargs: Any) -> Optional[JSON]: + def list_clusters(self, *, tag_name: Optional[str] = None, **kwargs: Any) -> JSON: # pylint: disable=line-too-long - """Delete a CDN Endpoint. + """List All Database Clusters. - To delete a specific CDN endpoint, send a DELETE request to - ``/v2/cdn/endpoints/$ENDPOINT_ID``. + To list all of the database clusters available on your account, send a GET request to + ``/v2/databases``. To limit the results to database clusters with a specific tag, include the + ``tag_name`` query parameter set to the name of the tag. For example, + ``/v2/databases?tag_name=$TAG_NAME``. - A status of 204 will be given. This indicates that the request was processed - successfully, but that no response body is needed. + The result will be a JSON object with a ``databases`` key. This will be set to an array of + database objects, each of which will contain the standard database attributes. - :param cdn_id: A unique identifier for a CDN endpoint. Required. - :type cdn_id: str - :return: JSON object or None - :rtype: JSON or None + The embedded ``connection`` and ``private_connection`` objects will contain the information + needed to access the database cluster. For multi-node clusters, the ``standby_connection`` and + ``standby_private_connection`` objects will contain the information needed to connect to the + cluster's standby node(s). + + The embedded ``maintenance_window`` object will contain information about any scheduled + maintenance for the database cluster. + + :keyword tag_name: Limits the results to database clusters with a specific + tag.:code:`
`:code:`
`Requires ``tag:read`` scope. Default value is None. + :paramtype tag_name: str + :return: JSON object + :rtype: JSON :raises ~azure.core.exceptions.HttpResponseError: Example: .. code-block:: python + # response body for status code(s): 200 + response == { + "databases": [ + { + "engine": "str", # A slug representing the database engine + used for the cluster. The possible values are: "pg" for PostgreSQL, + "mysql" for MySQL, "redis" for Caching, "mongodb" for MongoDB, "kafka" + for Kafka, "opensearch" for OpenSearch, and "valkey" for Valkey. + Required. Known values are: "pg", "mysql", "redis", "valkey", "mongodb", + "kafka", and "opensearch". + "name": "str", # A unique, human-readable name referring to + a database cluster. Required. + "num_nodes": 0, # The number of nodes in the database + cluster. Required. + "region": "str", # The slug identifier for the region where + the database cluster is located. Required. + "size": "str", # The slug identifier representing the size + of the nodes in the database cluster. Required. + "connection": { + "database": "str", # Optional. The name of the + default database. + "host": "str", # Optional. The FQDN pointing to the + database cluster's current primary node. + "password": "str", # Optional. The randomly + generated password for the default + user.:code:`
`:code:`
`Requires ``database:view_credentials`` + scope. + "port": 0, # Optional. The port on which the + database cluster is listening. + "ssl": bool, # Optional. A boolean value indicating + if the connection should be made over SSL. + "uri": "str", # Optional. A connection string in the + format accepted by the ``psql`` command. This is provided as a + convenience and should be able to be constructed by the other + attributes. + "user": "str" # Optional. The default user for the + database.:code:`
`:code:`
`Requires + ``database:view_credentials`` scope. + }, + "created_at": "2020-02-20 00:00:00", # Optional. A time + value given in ISO8601 combined date and time format that represents when + the database cluster was created. + "db_names": [ + "str" # Optional. An array of strings containing the + names of databases created in the database cluster. + ], + "do_settings": { + "service_cnames": [ + "str" # Optional. An array of custom CNAMEs + for the database cluster. Each CNAME must be a valid RFC 1123 + hostname (e.g., "db.example.com"). Maximum of 16 CNAMEs allowed, + each up to 253 characters. + ] + }, + "id": "str", # Optional. A unique ID that can be used to + identify and reference a database cluster. + "maintenance_window": { + "day": "str", # The day of the week on which to + apply maintenance updates. Required. + "hour": "str", # The hour in UTC at which + maintenance updates will be applied in 24 hour format. Required. + "description": [ + "str" # Optional. A list of strings, each + containing information about a pending maintenance update. + ], + "pending": bool # Optional. A boolean value + indicating whether any maintenance is scheduled to be performed in + the next window. + }, + "metrics_endpoints": [ + { + "host": "str", # Optional. A FQDN pointing + to the database cluster's node(s). + "port": 0 # Optional. The port on which a + service is listening. + } + ], + "private_connection": { + "database": "str", # Optional. The name of the + default database. + "host": "str", # Optional. The FQDN pointing to the + database cluster's current primary node. + "password": "str", # Optional. The randomly + generated password for the default + user.:code:`
`:code:`
`Requires ``database:view_credentials`` + scope. + "port": 0, # Optional. The port on which the + database cluster is listening. + "ssl": bool, # Optional. A boolean value indicating + if the connection should be made over SSL. + "uri": "str", # Optional. A connection string in the + format accepted by the ``psql`` command. This is provided as a + convenience and should be able to be constructed by the other + attributes. + "user": "str" # Optional. The default user for the + database.:code:`
`:code:`
`Requires + ``database:view_credentials`` scope. + }, + "private_network_uuid": "str", # Optional. A string + specifying the UUID of the VPC to which the database cluster will be + assigned. If excluded, the cluster when creating a new database cluster, + it will be assigned to your account's default VPC for the region. + :code:`
`:code:`
`Requires ``vpc:read`` scope. + "project_id": "str", # Optional. The ID of the project that + the database cluster is assigned to. If excluded when creating a new + database cluster, it will be assigned to your default + project.:code:`
`:code:`
`Requires ``project:read`` scope. + "rules": [ + { + "type": "str", # The type of resource that + the firewall rule allows to access the database cluster. + Required. Known values are: "droplet", "k8s", "ip_addr", "tag", + and "app". + "value": "str", # The ID of the specific + resource, the name of a tag applied to a group of resources, or + the IP address that the firewall rule allows to access the + database cluster. Required. + "cluster_uuid": "str", # Optional. A unique + ID for the database cluster to which the rule is applied. + "created_at": "2020-02-20 00:00:00", # + Optional. A time value given in ISO8601 combined date and time + format that represents when the firewall rule was created. + "description": "str", # Optional. A + human-readable description of the rule. + "uuid": "str" # Optional. A unique ID for + the firewall rule itself. + } + ], + "schema_registry_connection": { + "host": "str", # Optional. The FQDN pointing to the + schema registry connection uri. + "password": "str", # Optional. The randomly + generated password for the schema + registry.:code:`
`:code:`
`Requires + ``database:view_credentials`` scope. + "port": 0, # Optional. The port on which the schema + registry is listening. + "ssl": bool, # Optional. A boolean value indicating + if the connection should be made over SSL. + "uri": "str", # Optional. This is provided as a + convenience and should be able to be constructed by the other + attributes. + "user": "str" # Optional. The default user for the + schema registry.:code:`
`:code:`
`Requires + ``database:view_credentials`` scope. + }, + "semantic_version": "str", # Optional. A string representing + the semantic version of the database engine in use for the cluster. + "standby_connection": { + "database": "str", # Optional. The name of the + default database. + "host": "str", # Optional. The FQDN pointing to the + database cluster's current primary node. + "password": "str", # Optional. The randomly + generated password for the default + user.:code:`
`:code:`
`Requires ``database:view_credentials`` + scope. + "port": 0, # Optional. The port on which the + database cluster is listening. + "ssl": bool, # Optional. A boolean value indicating + if the connection should be made over SSL. + "uri": "str", # Optional. A connection string in the + format accepted by the ``psql`` command. This is provided as a + convenience and should be able to be constructed by the other + attributes. + "user": "str" # Optional. The default user for the + database.:code:`
`:code:`
`Requires + ``database:view_credentials`` scope. + }, + "standby_private_connection": { + "database": "str", # Optional. The name of the + default database. + "host": "str", # Optional. The FQDN pointing to the + database cluster's current primary node. + "password": "str", # Optional. The randomly + generated password for the default + user.:code:`
`:code:`
`Requires ``database:view_credentials`` + scope. + "port": 0, # Optional. The port on which the + database cluster is listening. + "ssl": bool, # Optional. A boolean value indicating + if the connection should be made over SSL. + "uri": "str", # Optional. A connection string in the + format accepted by the ``psql`` command. This is provided as a + convenience and should be able to be constructed by the other + attributes. + "user": "str" # Optional. The default user for the + database.:code:`
`:code:`
`Requires + ``database:view_credentials`` scope. + }, + "status": "str", # Optional. A string representing the + current status of the database cluster. Known values are: "creating", + "online", "resizing", "migrating", and "forking". + "storage_size_mib": 0, # Optional. Additional storage added + to the cluster, in MiB. If null, no additional storage is added to the + cluster, beyond what is provided as a base amount from the 'size' and any + previously added additional storage. + "tags": [ + "str" # Optional. An array of tags that have been + applied to the database cluster. :code:`
`:code:`
`Requires + ``tag:read`` scope. + ], + "ui_connection": { + "host": "str", # Optional. The FQDN pointing to the + opensearch cluster's current primary node. + "password": "str", # Optional. The randomly + generated password for the default + user.:code:`
`:code:`
`Requires ``database:view_credentials`` + scope. + "port": 0, # Optional. The port on which the + opensearch dashboard is listening. + "ssl": bool, # Optional. A boolean value indicating + if the connection should be made over SSL. + "uri": "str", # Optional. This is provided as a + convenience and should be able to be constructed by the other + attributes. + "user": "str" # Optional. The default user for the + opensearch dashboard.:code:`
`:code:`
`Requires + ``database:view_credentials`` scope. + }, + "users": [ + { + "name": "str", # The name of a database + user. Required. + "access_cert": "str", # Optional. Access + certificate for TLS client authentication. (Kafka only). + "access_key": "str", # Optional. Access key + for TLS client authentication. (Kafka only). + "mysql_settings": { + "auth_plugin": "str" # A string + specifying the authentication method to be used for + connections to the MySQL user account. The valid values are + ``mysql_native_password`` or ``caching_sha2_password``. If + excluded when creating a new user, the default for the + version of MySQL in use will be used. As of MySQL 8.0, the + default is ``caching_sha2_password``. Required. Known values + are: "mysql_native_password" and "caching_sha2_password". + }, + "password": "str", # Optional. A randomly + generated password for the database user.:code:`
`Requires + ``database:view_credentials`` scope. + "role": "str", # Optional. A string + representing the database user's role. The value will be either + "primary" or "normal". Known values are: "primary" and "normal". + "settings": { + "acl": [ + { + "permission": "str", + # Permission set applied to the ACL. 'consume' allows + for messages to be consumed from the topic. 'produce' + allows for messages to be published to the topic. + 'produceconsume' allows for both 'consume' and + 'produce' permission. 'admin' allows for + 'produceconsume' as well as any operations to + administer the topic (delete, update). Required. + Known values are: "admin", "consume", "produce", and + "produceconsume". + "topic": "str", # A + regex for matching the topic(s) that this ACL should + apply to. Required. + "id": "str" # + Optional. An identifier for the ACL. Will be computed + after the ACL is created/updated. + } + ], + "mongo_user_settings": { + "databases": [ + "str" # Optional. A + list of databases to which the user should have + access. When the database is set to ``admin``"" , the + user will have access to all databases based on the + user's role i.e. a user with the role ``readOnly`` + assigned to the ``admin`` database will have read + access to all databases. + ], + "role": "str" # Optional. + The role to assign to the user with each role mapping to + a MongoDB built-in role. ``readOnly`` maps to a `read + `_ + role. ``readWrite`` maps to a `readWrite + `_ + role. ``dbAdmin`` maps to a `dbAdmin + `_ + role. Known values are: "readOnly", "readWrite", and + "dbAdmin". + }, + "opensearch_acl": [ + { + "index": "str", # + Optional. A regex for matching the indexes that this + ACL should apply to. + "permission": "str" + # Optional. Permission set applied to the ACL. 'read' + allows user to read from the index. 'write' allows + for user to write to the index. 'readwrite' allows + for both 'read' and 'write' permission. + 'deny'(default) restricts user from performing any + operation over an index. 'admin' allows for + 'readwrite' as well as any operations to administer + the index. Known values are: "deny", "admin", "read", + "readwrite", and "write". + } + ], + "pg_allow_replication": bool # + Optional. For Postgres clusters, set to ``true`` for a user + with replication rights. This option is not currently + supported for other database engines. + } + } + ], + "version": "str", # Optional. A string representing the + version of the database engine in use for the cluster. + "version_end_of_availability": "str", # Optional. A + timestamp referring to the date when the particular version will no + longer be available for creating new clusters. If null, the version does + not have an end of availability timeline. + "version_end_of_life": "str" # Optional. A timestamp + referring to the date when the particular version will no longer be + supported. If null, the version does not have an end of life timeline. + } + ] + } # response body for status code(s): 404 response == { "id": "str", # A short identifier corresponding to the HTTP status code @@ -115580,10 +124543,10 @@ def delete_endpoint(self, cdn_id: str, **kwargs: Any) -> Optional[JSON]: _headers = kwargs.pop("headers", {}) or {} _params = kwargs.pop("params", {}) or {} - cls: ClsType[Optional[JSON]] = kwargs.pop("cls", None) + cls: ClsType[JSON] = kwargs.pop("cls", None) - _request = build_cdn_delete_endpoint_request( - cdn_id=cdn_id, + _request = build_databases_list_clusters_request( + tag_name=tag_name, headers=_headers, params=_params, ) @@ -115598,15 +124561,14 @@ def delete_endpoint(self, cdn_id: str, **kwargs: Any) -> Optional[JSON]: response = pipeline_response.http_response - if response.status_code not in [204, 404]: + if response.status_code not in [200, 404]: if _stream: response.read() # Load the body in memory and close the socket map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore raise HttpResponseError(response=response) - deserialized = None response_headers = {} - if response.status_code == 204: + if response.status_code == 200: response_headers["ratelimit-limit"] = self._deserialize( "int", response.headers.get("ratelimit-limit") ) @@ -115617,6 +124579,11 @@ def delete_endpoint(self, cdn_id: str, **kwargs: Any) -> Optional[JSON]: "int", response.headers.get("ratelimit-reset") ) + if response.content: + deserialized = response.json() + else: + deserialized = None + if response.status_code == 404: response_headers["ratelimit-limit"] = self._deserialize( "int", response.headers.get("ratelimit-limit") @@ -115634,41 +124601,49 @@ def delete_endpoint(self, cdn_id: str, **kwargs: Any) -> Optional[JSON]: deserialized = None if cls: - return cls(pipeline_response, deserialized, response_headers) # type: ignore + return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore - return deserialized # type: ignore + return cast(JSON, deserialized) # type: ignore @overload - def purge_cache( - self, - cdn_id: str, - body: JSON, - *, - content_type: str = "application/json", - **kwargs: Any, - ) -> Optional[JSON]: + def create_cluster( + self, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> JSON: # pylint: disable=line-too-long - """Purge the Cache for an Existing CDN Endpoint. + """Create a New Database Cluster. - To purge cached content from a CDN endpoint, send a DELETE request to - ``/v2/cdn/endpoints/$ENDPOINT_ID/cache``. The body of the request should include - a ``files`` attribute containing a list of cached file paths to be purged. A - path may be for a single file or may contain a wildcard (\\ ``*``\\ ) to recursively - purge all files under a directory. When only a wildcard is provided, all cached - files will be purged. There is a rate limit of 50 files per 20 seconds that can - be purged. CDN endpoints have a rate limit of 5 requests per 10 seconds. - Purging files using a wildcard path counts as a single request against the API's - rate limit. Two identical purge requests cannot be sent at the same time. + To create a database cluster, send a POST request to ``/v2/databases``. To see a list of + options for each engine, such as available regions, size slugs, and versions, send a GET + request to the ``/v2/databases/options`` endpoint. The available sizes for the + ``storage_size_mib`` field depends on the cluster's size. To see a list of available sizes, see + `Managed Database Pricing `_. + + The create response returns a JSON object with a key called ``database``. The value of this is + an object that contains the standard attributes associated with a database cluster. The initial + value of the database cluster's ``status`` attribute is ``creating``. When the cluster is ready + to receive traffic, this changes to ``online``. + + The embedded ``connection`` and ``private_connection`` objects contains the information needed + to access the database cluster. For multi-node clusters, the ``standby_connection`` and + ``standby_private_connection`` objects contain the information needed to connect to the + cluster's standby node(s). + + DigitalOcean managed PostgreSQL and MySQL database clusters take automated daily backups. To + create a new database cluster based on a backup of an existing cluster, send a POST request to + ``/v2/databases``. In addition to the standard database cluster attributes, the JSON body must + include a key named ``backup_restore`` with the name of the original database cluster and the + timestamp of the backup to be restored. Creating a database from a backup is the same as + forking a database in the control panel. + Note: Caching cluster creates are no longer supported as of 2025-04-30T00:00:00Z. Backups are + also not supported for Caching or Valkey clusters. - :param cdn_id: A unique identifier for a CDN endpoint. Required. - :type cdn_id: str :param body: Required. :type body: JSON :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str - :return: JSON object or None - :rtype: JSON or None + :return: JSON object + :rtype: JSON :raises ~azure.core.exceptions.HttpResponseError: Example: @@ -115676,12 +124651,611 @@ def purge_cache( # JSON input template you can fill out and use as your body input. body = { - "files": [ - "str" # An array of strings containing the path to the content to be - purged from the CDN cache. Required. - ] + "engine": "str", # A slug representing the database engine used for the + cluster. The possible values are: "pg" for PostgreSQL, "mysql" for MySQL, "redis" + for Caching, "mongodb" for MongoDB, "kafka" for Kafka, "opensearch" for + OpenSearch, and "valkey" for Valkey. Required. Known values are: "pg", "mysql", + "redis", "valkey", "mongodb", "kafka", and "opensearch". + "name": "str", # A unique, human-readable name referring to a database + cluster. Required. + "num_nodes": 0, # The number of nodes in the database cluster. Required. + "region": "str", # The slug identifier for the region where the database + cluster is located. Required. + "size": "str", # The slug identifier representing the size of the nodes in + the database cluster. Required. + "autoscale": { + "storage": { + "enabled": bool, # Whether storage autoscaling is enabled + for the cluster. Required. + "increment_gib": 0, # Optional. The amount of additional + storage to add (in GiB) when autoscaling is triggered. + "threshold_percent": 0 # Optional. The storage usage + threshold percentage that triggers autoscaling. When storage usage + exceeds this percentage, additional storage will be added automatically. + } + }, + "backup_restore": { + "database_name": "str", # The name of an existing database cluster + from which the backup will be restored. Required. + "backup_created_at": "2020-02-20 00:00:00" # Optional. The timestamp + of an existing database cluster backup in ISO8601 combined date and time + format. The most recent backup will be used if excluded. + }, + "connection": { + "database": "str", # Optional. The name of the default database. + "host": "str", # Optional. The FQDN pointing to the database + cluster's current primary node. + "password": "str", # Optional. The randomly generated password for + the default user.:code:`
`:code:`
`Requires + ``database:view_credentials`` scope. + "port": 0, # Optional. The port on which the database cluster is + listening. + "ssl": bool, # Optional. A boolean value indicating if the + connection should be made over SSL. + "uri": "str", # Optional. A connection string in the format accepted + by the ``psql`` command. This is provided as a convenience and should be able + to be constructed by the other attributes. + "user": "str" # Optional. The default user for the + database.:code:`
`:code:`
`Requires ``database:view_credentials`` + scope. + }, + "created_at": "2020-02-20 00:00:00", # Optional. A time value given in + ISO8601 combined date and time format that represents when the database cluster + was created. + "db_names": [ + "str" # Optional. An array of strings containing the names of + databases created in the database cluster. + ], + "do_settings": { + "service_cnames": [ + "str" # Optional. An array of custom CNAMEs for the database + cluster. Each CNAME must be a valid RFC 1123 hostname (e.g., + "db.example.com"). Maximum of 16 CNAMEs allowed, each up to 253 + characters. + ] + }, + "id": "str", # Optional. A unique ID that can be used to identify and + reference a database cluster. + "maintenance_window": { + "day": "str", # The day of the week on which to apply maintenance + updates. Required. + "hour": "str", # The hour in UTC at which maintenance updates will + be applied in 24 hour format. Required. + "description": [ + "str" # Optional. A list of strings, each containing + information about a pending maintenance update. + ], + "pending": bool # Optional. A boolean value indicating whether any + maintenance is scheduled to be performed in the next window. + }, + "metrics_endpoints": [ + { + "host": "str", # Optional. A FQDN pointing to the database + cluster's node(s). + "port": 0 # Optional. The port on which a service is + listening. + } + ], + "private_connection": { + "database": "str", # Optional. The name of the default database. + "host": "str", # Optional. The FQDN pointing to the database + cluster's current primary node. + "password": "str", # Optional. The randomly generated password for + the default user.:code:`
`:code:`
`Requires + ``database:view_credentials`` scope. + "port": 0, # Optional. The port on which the database cluster is + listening. + "ssl": bool, # Optional. A boolean value indicating if the + connection should be made over SSL. + "uri": "str", # Optional. A connection string in the format accepted + by the ``psql`` command. This is provided as a convenience and should be able + to be constructed by the other attributes. + "user": "str" # Optional. The default user for the + database.:code:`
`:code:`
`Requires ``database:view_credentials`` + scope. + }, + "private_network_uuid": "str", # Optional. A string specifying the UUID of + the VPC to which the database cluster will be assigned. If excluded, the cluster + when creating a new database cluster, it will be assigned to your account's + default VPC for the region. :code:`
`:code:`
`Requires ``vpc:read`` scope. + "project_id": "str", # Optional. The ID of the project that the database + cluster is assigned to. If excluded when creating a new database cluster, it will + be assigned to your default project.:code:`
`:code:`
`Requires + ``project:update`` scope. + "rules": [ + { + "type": "str", # The type of resource that the firewall rule + allows to access the database cluster. Required. Known values are: + "droplet", "k8s", "ip_addr", "tag", and "app". + "value": "str", # The ID of the specific resource, the name + of a tag applied to a group of resources, or the IP address that the + firewall rule allows to access the database cluster. Required. + "cluster_uuid": "str", # Optional. A unique ID for the + database cluster to which the rule is applied. + "created_at": "2020-02-20 00:00:00", # Optional. A time + value given in ISO8601 combined date and time format that represents when + the firewall rule was created. + "description": "str", # Optional. A human-readable + description of the rule. + "uuid": "str" # Optional. A unique ID for the firewall rule + itself. + } + ], + "schema_registry_connection": { + "host": "str", # Optional. The FQDN pointing to the schema registry + connection uri. + "password": "str", # Optional. The randomly generated password for + the schema registry.:code:`
`:code:`
`Requires + ``database:view_credentials`` scope. + "port": 0, # Optional. The port on which the schema registry is + listening. + "ssl": bool, # Optional. A boolean value indicating if the + connection should be made over SSL. + "uri": "str", # Optional. This is provided as a convenience and + should be able to be constructed by the other attributes. + "user": "str" # Optional. The default user for the schema + registry.:code:`
`:code:`
`Requires ``database:view_credentials`` + scope. + }, + "semantic_version": "str", # Optional. A string representing the semantic + version of the database engine in use for the cluster. + "standby_connection": { + "database": "str", # Optional. The name of the default database. + "host": "str", # Optional. The FQDN pointing to the database + cluster's current primary node. + "password": "str", # Optional. The randomly generated password for + the default user.:code:`
`:code:`
`Requires + ``database:view_credentials`` scope. + "port": 0, # Optional. The port on which the database cluster is + listening. + "ssl": bool, # Optional. A boolean value indicating if the + connection should be made over SSL. + "uri": "str", # Optional. A connection string in the format accepted + by the ``psql`` command. This is provided as a convenience and should be able + to be constructed by the other attributes. + "user": "str" # Optional. The default user for the + database.:code:`
`:code:`
`Requires ``database:view_credentials`` + scope. + }, + "standby_private_connection": { + "database": "str", # Optional. The name of the default database. + "host": "str", # Optional. The FQDN pointing to the database + cluster's current primary node. + "password": "str", # Optional. The randomly generated password for + the default user.:code:`
`:code:`
`Requires + ``database:view_credentials`` scope. + "port": 0, # Optional. The port on which the database cluster is + listening. + "ssl": bool, # Optional. A boolean value indicating if the + connection should be made over SSL. + "uri": "str", # Optional. A connection string in the format accepted + by the ``psql`` command. This is provided as a convenience and should be able + to be constructed by the other attributes. + "user": "str" # Optional. The default user for the + database.:code:`
`:code:`
`Requires ``database:view_credentials`` + scope. + }, + "status": "str", # Optional. A string representing the current status of the + database cluster. Known values are: "creating", "online", "resizing", + "migrating", and "forking". + "storage_size_mib": 0, # Optional. Additional storage added to the cluster, + in MiB. If null, no additional storage is added to the cluster, beyond what is + provided as a base amount from the 'size' and any previously added additional + storage. + "tags": [ + "str" # Optional. An array of tags (as strings) to apply to the + database cluster. :code:`
`:code:`
`Requires ``tag:create`` scope. + ], + "ui_connection": { + "host": "str", # Optional. The FQDN pointing to the opensearch + cluster's current primary node. + "password": "str", # Optional. The randomly generated password for + the default user.:code:`
`:code:`
`Requires + ``database:view_credentials`` scope. + "port": 0, # Optional. The port on which the opensearch dashboard is + listening. + "ssl": bool, # Optional. A boolean value indicating if the + connection should be made over SSL. + "uri": "str", # Optional. This is provided as a convenience and + should be able to be constructed by the other attributes. + "user": "str" # Optional. The default user for the opensearch + dashboard.:code:`
`:code:`
`Requires ``database:view_credentials`` + scope. + }, + "users": [ + { + "name": "str", # The name of a database user. Required. + "access_cert": "str", # Optional. Access certificate for TLS + client authentication. (Kafka only). + "access_key": "str", # Optional. Access key for TLS client + authentication. (Kafka only). + "mysql_settings": { + "auth_plugin": "str" # A string specifying the + authentication method to be used for connections to the MySQL user + account. The valid values are ``mysql_native_password`` or + ``caching_sha2_password``. If excluded when creating a new user, the + default for the version of MySQL in use will be used. As of MySQL + 8.0, the default is ``caching_sha2_password``. Required. Known values + are: "mysql_native_password" and "caching_sha2_password". + }, + "password": "str", # Optional. A randomly generated password + for the database user.:code:`
`Requires ``database:view_credentials`` + scope. + "role": "str", # Optional. A string representing the + database user's role. The value will be either "primary" or "normal". + Known values are: "primary" and "normal". + "settings": { + "acl": [ + { + "permission": "str", # Permission + set applied to the ACL. 'consume' allows for messages to be + consumed from the topic. 'produce' allows for messages to be + published to the topic. 'produceconsume' allows for both + 'consume' and 'produce' permission. 'admin' allows for + 'produceconsume' as well as any operations to administer the + topic (delete, update). Required. Known values are: "admin", + "consume", "produce", and "produceconsume". + "topic": "str", # A regex for + matching the topic(s) that this ACL should apply to. + Required. + "id": "str" # Optional. An + identifier for the ACL. Will be computed after the ACL is + created/updated. + } + ], + "mongo_user_settings": { + "databases": [ + "str" # Optional. A list of + databases to which the user should have access. When the + database is set to ``admin``"" , the user will have access to + all databases based on the user's role i.e. a user with the + role ``readOnly`` assigned to the ``admin`` database will + have read access to all databases. + ], + "role": "str" # Optional. The role to assign + to the user with each role mapping to a MongoDB built-in role. + ``readOnly`` maps to a `read + `_ + role. ``readWrite`` maps to a `readWrite + `_ + role. ``dbAdmin`` maps to a `dbAdmin + `_ + role. Known values are: "readOnly", "readWrite", and "dbAdmin". + }, + "opensearch_acl": [ + { + "index": "str", # Optional. A regex + for matching the indexes that this ACL should apply to. + "permission": "str" # Optional. + Permission set applied to the ACL. 'read' allows user to read + from the index. 'write' allows for user to write to the + index. 'readwrite' allows for both 'read' and 'write' + permission. 'deny'(default) restricts user from performing + any operation over an index. 'admin' allows for 'readwrite' + as well as any operations to administer the index. Known + values are: "deny", "admin", "read", "readwrite", and + "write". + } + ], + "pg_allow_replication": bool # Optional. For + Postgres clusters, set to ``true`` for a user with replication + rights. This option is not currently supported for other database + engines. + } + } + ], + "version": "str", # Optional. A string representing the version of the + database engine in use for the cluster. + "version_end_of_availability": "str", # Optional. A timestamp referring to + the date when the particular version will no longer be available for creating new + clusters. If null, the version does not have an end of availability timeline. + "version_end_of_life": "str" # Optional. A timestamp referring to the date + when the particular version will no longer be supported. If null, the version + does not have an end of life timeline. + } + + # response body for status code(s): 201 + response == { + "database": { + "engine": "str", # A slug representing the database engine used for + the cluster. The possible values are: "pg" for PostgreSQL, "mysql" for MySQL, + "redis" for Caching, "mongodb" for MongoDB, "kafka" for Kafka, "opensearch" + for OpenSearch, and "valkey" for Valkey. Required. Known values are: "pg", + "mysql", "redis", "valkey", "mongodb", "kafka", and "opensearch". + "name": "str", # A unique, human-readable name referring to a + database cluster. Required. + "num_nodes": 0, # The number of nodes in the database cluster. + Required. + "region": "str", # The slug identifier for the region where the + database cluster is located. Required. + "size": "str", # The slug identifier representing the size of the + nodes in the database cluster. Required. + "connection": { + "database": "str", # Optional. The name of the default + database. + "host": "str", # Optional. The FQDN pointing to the database + cluster's current primary node. + "password": "str", # Optional. The randomly generated + password for the default user.:code:`
`:code:`
`Requires + ``database:view_credentials`` scope. + "port": 0, # Optional. The port on which the database + cluster is listening. + "ssl": bool, # Optional. A boolean value indicating if the + connection should be made over SSL. + "uri": "str", # Optional. A connection string in the format + accepted by the ``psql`` command. This is provided as a convenience and + should be able to be constructed by the other attributes. + "user": "str" # Optional. The default user for the + database.:code:`
`:code:`
`Requires ``database:view_credentials`` + scope. + }, + "created_at": "2020-02-20 00:00:00", # Optional. A time value given + in ISO8601 combined date and time format that represents when the database + cluster was created. + "db_names": [ + "str" # Optional. An array of strings containing the names + of databases created in the database cluster. + ], + "do_settings": { + "service_cnames": [ + "str" # Optional. An array of custom CNAMEs for the + database cluster. Each CNAME must be a valid RFC 1123 hostname (e.g., + "db.example.com"). Maximum of 16 CNAMEs allowed, each up to 253 + characters. + ] + }, + "id": "str", # Optional. A unique ID that can be used to identify + and reference a database cluster. + "maintenance_window": { + "day": "str", # The day of the week on which to apply + maintenance updates. Required. + "hour": "str", # The hour in UTC at which maintenance + updates will be applied in 24 hour format. Required. + "description": [ + "str" # Optional. A list of strings, each containing + information about a pending maintenance update. + ], + "pending": bool # Optional. A boolean value indicating + whether any maintenance is scheduled to be performed in the next window. + }, + "metrics_endpoints": [ + { + "host": "str", # Optional. A FQDN pointing to the + database cluster's node(s). + "port": 0 # Optional. The port on which a service is + listening. + } + ], + "private_connection": { + "database": "str", # Optional. The name of the default + database. + "host": "str", # Optional. The FQDN pointing to the database + cluster's current primary node. + "password": "str", # Optional. The randomly generated + password for the default user.:code:`
`:code:`
`Requires + ``database:view_credentials`` scope. + "port": 0, # Optional. The port on which the database + cluster is listening. + "ssl": bool, # Optional. A boolean value indicating if the + connection should be made over SSL. + "uri": "str", # Optional. A connection string in the format + accepted by the ``psql`` command. This is provided as a convenience and + should be able to be constructed by the other attributes. + "user": "str" # Optional. The default user for the + database.:code:`
`:code:`
`Requires ``database:view_credentials`` + scope. + }, + "private_network_uuid": "str", # Optional. A string specifying the + UUID of the VPC to which the database cluster will be assigned. If excluded, + the cluster when creating a new database cluster, it will be assigned to your + account's default VPC for the region. :code:`
`:code:`
`Requires + ``vpc:read`` scope. + "project_id": "str", # Optional. The ID of the project that the + database cluster is assigned to. If excluded when creating a new database + cluster, it will be assigned to your default + project.:code:`
`:code:`
`Requires ``project:read`` scope. + "rules": [ + { + "type": "str", # The type of resource that the + firewall rule allows to access the database cluster. Required. Known + values are: "droplet", "k8s", "ip_addr", "tag", and "app". + "value": "str", # The ID of the specific resource, + the name of a tag applied to a group of resources, or the IP address + that the firewall rule allows to access the database cluster. + Required. + "cluster_uuid": "str", # Optional. A unique ID for + the database cluster to which the rule is applied. + "created_at": "2020-02-20 00:00:00", # Optional. A + time value given in ISO8601 combined date and time format that + represents when the firewall rule was created. + "description": "str", # Optional. A human-readable + description of the rule. + "uuid": "str" # Optional. A unique ID for the + firewall rule itself. + } + ], + "schema_registry_connection": { + "host": "str", # Optional. The FQDN pointing to the schema + registry connection uri. + "password": "str", # Optional. The randomly generated + password for the schema registry.:code:`
`:code:`
`Requires + ``database:view_credentials`` scope. + "port": 0, # Optional. The port on which the schema registry + is listening. + "ssl": bool, # Optional. A boolean value indicating if the + connection should be made over SSL. + "uri": "str", # Optional. This is provided as a convenience + and should be able to be constructed by the other attributes. + "user": "str" # Optional. The default user for the schema + registry.:code:`
`:code:`
`Requires ``database:view_credentials`` + scope. + }, + "semantic_version": "str", # Optional. A string representing the + semantic version of the database engine in use for the cluster. + "standby_connection": { + "database": "str", # Optional. The name of the default + database. + "host": "str", # Optional. The FQDN pointing to the database + cluster's current primary node. + "password": "str", # Optional. The randomly generated + password for the default user.:code:`
`:code:`
`Requires + ``database:view_credentials`` scope. + "port": 0, # Optional. The port on which the database + cluster is listening. + "ssl": bool, # Optional. A boolean value indicating if the + connection should be made over SSL. + "uri": "str", # Optional. A connection string in the format + accepted by the ``psql`` command. This is provided as a convenience and + should be able to be constructed by the other attributes. + "user": "str" # Optional. The default user for the + database.:code:`
`:code:`
`Requires ``database:view_credentials`` + scope. + }, + "standby_private_connection": { + "database": "str", # Optional. The name of the default + database. + "host": "str", # Optional. The FQDN pointing to the database + cluster's current primary node. + "password": "str", # Optional. The randomly generated + password for the default user.:code:`
`:code:`
`Requires + ``database:view_credentials`` scope. + "port": 0, # Optional. The port on which the database + cluster is listening. + "ssl": bool, # Optional. A boolean value indicating if the + connection should be made over SSL. + "uri": "str", # Optional. A connection string in the format + accepted by the ``psql`` command. This is provided as a convenience and + should be able to be constructed by the other attributes. + "user": "str" # Optional. The default user for the + database.:code:`
`:code:`
`Requires ``database:view_credentials`` + scope. + }, + "status": "str", # Optional. A string representing the current + status of the database cluster. Known values are: "creating", "online", + "resizing", "migrating", and "forking". + "storage_size_mib": 0, # Optional. Additional storage added to the + cluster, in MiB. If null, no additional storage is added to the cluster, + beyond what is provided as a base amount from the 'size' and any previously + added additional storage. + "tags": [ + "str" # Optional. An array of tags that have been applied to + the database cluster. :code:`
`:code:`
`Requires ``tag:read`` + scope. + ], + "ui_connection": { + "host": "str", # Optional. The FQDN pointing to the + opensearch cluster's current primary node. + "password": "str", # Optional. The randomly generated + password for the default user.:code:`
`:code:`
`Requires + ``database:view_credentials`` scope. + "port": 0, # Optional. The port on which the opensearch + dashboard is listening. + "ssl": bool, # Optional. A boolean value indicating if the + connection should be made over SSL. + "uri": "str", # Optional. This is provided as a convenience + and should be able to be constructed by the other attributes. + "user": "str" # Optional. The default user for the + opensearch dashboard.:code:`
`:code:`
`Requires + ``database:view_credentials`` scope. + }, + "users": [ + { + "name": "str", # The name of a database user. + Required. + "access_cert": "str", # Optional. Access certificate + for TLS client authentication. (Kafka only). + "access_key": "str", # Optional. Access key for TLS + client authentication. (Kafka only). + "mysql_settings": { + "auth_plugin": "str" # A string specifying + the authentication method to be used for connections to the MySQL + user account. The valid values are ``mysql_native_password`` or + ``caching_sha2_password``. If excluded when creating a new user, + the default for the version of MySQL in use will be used. As of + MySQL 8.0, the default is ``caching_sha2_password``. Required. + Known values are: "mysql_native_password" and + "caching_sha2_password". + }, + "password": "str", # Optional. A randomly generated + password for the database user.:code:`
`Requires + ``database:view_credentials`` scope. + "role": "str", # Optional. A string representing the + database user's role. The value will be either "primary" or "normal". + Known values are: "primary" and "normal". + "settings": { + "acl": [ + { + "permission": "str", # + Permission set applied to the ACL. 'consume' allows for + messages to be consumed from the topic. 'produce' allows + for messages to be published to the topic. + 'produceconsume' allows for both 'consume' and 'produce' + permission. 'admin' allows for 'produceconsume' as well + as any operations to administer the topic (delete, + update). Required. Known values are: "admin", "consume", + "produce", and "produceconsume". + "topic": "str", # A regex + for matching the topic(s) that this ACL should apply to. + Required. + "id": "str" # Optional. An + identifier for the ACL. Will be computed after the ACL is + created/updated. + } + ], + "mongo_user_settings": { + "databases": [ + "str" # Optional. A list of + databases to which the user should have access. When the + database is set to ``admin``"" , the user will have + access to all databases based on the user's role i.e. a + user with the role ``readOnly`` assigned to the ``admin`` + database will have read access to all databases. + ], + "role": "str" # Optional. The role + to assign to the user with each role mapping to a MongoDB + built-in role. ``readOnly`` maps to a `read + `_ + role. ``readWrite`` maps to a `readWrite + `_ + role. ``dbAdmin`` maps to a `dbAdmin + `_ + role. Known values are: "readOnly", "readWrite", and + "dbAdmin". + }, + "opensearch_acl": [ + { + "index": "str", # Optional. + A regex for matching the indexes that this ACL should + apply to. + "permission": "str" # + Optional. Permission set applied to the ACL. 'read' + allows user to read from the index. 'write' allows for + user to write to the index. 'readwrite' allows for both + 'read' and 'write' permission. 'deny'(default) restricts + user from performing any operation over an index. 'admin' + allows for 'readwrite' as well as any operations to + administer the index. Known values are: "deny", "admin", + "read", "readwrite", and "write". + } + ], + "pg_allow_replication": bool # Optional. For + Postgres clusters, set to ``true`` for a user with replication + rights. This option is not currently supported for other database + engines. + } + } + ], + "version": "str", # Optional. A string representing the version of + the database engine in use for the cluster. + "version_end_of_availability": "str", # Optional. A timestamp + referring to the date when the particular version will no longer be available + for creating new clusters. If null, the version does not have an end of + availability timeline. + "version_end_of_life": "str" # Optional. A timestamp referring to + the date when the particular version will no longer be supported. If null, + the version does not have an end of life timeline. + } } - # response body for status code(s): 404 response == { "id": "str", # A short identifier corresponding to the HTTP status code @@ -115696,41 +125270,351 @@ def purge_cache( """ @overload - def purge_cache( - self, - cdn_id: str, - body: IO[bytes], - *, - content_type: str = "application/json", - **kwargs: Any, - ) -> Optional[JSON]: + def create_cluster( + self, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> JSON: # pylint: disable=line-too-long - """Purge the Cache for an Existing CDN Endpoint. + """Create a New Database Cluster. - To purge cached content from a CDN endpoint, send a DELETE request to - ``/v2/cdn/endpoints/$ENDPOINT_ID/cache``. The body of the request should include - a ``files`` attribute containing a list of cached file paths to be purged. A - path may be for a single file or may contain a wildcard (\\ ``*``\\ ) to recursively - purge all files under a directory. When only a wildcard is provided, all cached - files will be purged. There is a rate limit of 50 files per 20 seconds that can - be purged. CDN endpoints have a rate limit of 5 requests per 10 seconds. - Purging files using a wildcard path counts as a single request against the API's - rate limit. Two identical purge requests cannot be sent at the same time. + To create a database cluster, send a POST request to ``/v2/databases``. To see a list of + options for each engine, such as available regions, size slugs, and versions, send a GET + request to the ``/v2/databases/options`` endpoint. The available sizes for the + ``storage_size_mib`` field depends on the cluster's size. To see a list of available sizes, see + `Managed Database Pricing `_. + + The create response returns a JSON object with a key called ``database``. The value of this is + an object that contains the standard attributes associated with a database cluster. The initial + value of the database cluster's ``status`` attribute is ``creating``. When the cluster is ready + to receive traffic, this changes to ``online``. + + The embedded ``connection`` and ``private_connection`` objects contains the information needed + to access the database cluster. For multi-node clusters, the ``standby_connection`` and + ``standby_private_connection`` objects contain the information needed to connect to the + cluster's standby node(s). + + DigitalOcean managed PostgreSQL and MySQL database clusters take automated daily backups. To + create a new database cluster based on a backup of an existing cluster, send a POST request to + ``/v2/databases``. In addition to the standard database cluster attributes, the JSON body must + include a key named ``backup_restore`` with the name of the original database cluster and the + timestamp of the backup to be restored. Creating a database from a backup is the same as + forking a database in the control panel. + Note: Caching cluster creates are no longer supported as of 2025-04-30T00:00:00Z. Backups are + also not supported for Caching or Valkey clusters. - :param cdn_id: A unique identifier for a CDN endpoint. Required. - :type cdn_id: str :param body: Required. :type body: IO[bytes] :keyword content_type: Body Parameter content-type. Content type parameter for binary body. Default value is "application/json". :paramtype content_type: str - :return: JSON object or None - :rtype: JSON or None + :return: JSON object + :rtype: JSON :raises ~azure.core.exceptions.HttpResponseError: Example: .. code-block:: python + # response body for status code(s): 201 + response == { + "database": { + "engine": "str", # A slug representing the database engine used for + the cluster. The possible values are: "pg" for PostgreSQL, "mysql" for MySQL, + "redis" for Caching, "mongodb" for MongoDB, "kafka" for Kafka, "opensearch" + for OpenSearch, and "valkey" for Valkey. Required. Known values are: "pg", + "mysql", "redis", "valkey", "mongodb", "kafka", and "opensearch". + "name": "str", # A unique, human-readable name referring to a + database cluster. Required. + "num_nodes": 0, # The number of nodes in the database cluster. + Required. + "region": "str", # The slug identifier for the region where the + database cluster is located. Required. + "size": "str", # The slug identifier representing the size of the + nodes in the database cluster. Required. + "connection": { + "database": "str", # Optional. The name of the default + database. + "host": "str", # Optional. The FQDN pointing to the database + cluster's current primary node. + "password": "str", # Optional. The randomly generated + password for the default user.:code:`
`:code:`
`Requires + ``database:view_credentials`` scope. + "port": 0, # Optional. The port on which the database + cluster is listening. + "ssl": bool, # Optional. A boolean value indicating if the + connection should be made over SSL. + "uri": "str", # Optional. A connection string in the format + accepted by the ``psql`` command. This is provided as a convenience and + should be able to be constructed by the other attributes. + "user": "str" # Optional. The default user for the + database.:code:`
`:code:`
`Requires ``database:view_credentials`` + scope. + }, + "created_at": "2020-02-20 00:00:00", # Optional. A time value given + in ISO8601 combined date and time format that represents when the database + cluster was created. + "db_names": [ + "str" # Optional. An array of strings containing the names + of databases created in the database cluster. + ], + "do_settings": { + "service_cnames": [ + "str" # Optional. An array of custom CNAMEs for the + database cluster. Each CNAME must be a valid RFC 1123 hostname (e.g., + "db.example.com"). Maximum of 16 CNAMEs allowed, each up to 253 + characters. + ] + }, + "id": "str", # Optional. A unique ID that can be used to identify + and reference a database cluster. + "maintenance_window": { + "day": "str", # The day of the week on which to apply + maintenance updates. Required. + "hour": "str", # The hour in UTC at which maintenance + updates will be applied in 24 hour format. Required. + "description": [ + "str" # Optional. A list of strings, each containing + information about a pending maintenance update. + ], + "pending": bool # Optional. A boolean value indicating + whether any maintenance is scheduled to be performed in the next window. + }, + "metrics_endpoints": [ + { + "host": "str", # Optional. A FQDN pointing to the + database cluster's node(s). + "port": 0 # Optional. The port on which a service is + listening. + } + ], + "private_connection": { + "database": "str", # Optional. The name of the default + database. + "host": "str", # Optional. The FQDN pointing to the database + cluster's current primary node. + "password": "str", # Optional. The randomly generated + password for the default user.:code:`
`:code:`
`Requires + ``database:view_credentials`` scope. + "port": 0, # Optional. The port on which the database + cluster is listening. + "ssl": bool, # Optional. A boolean value indicating if the + connection should be made over SSL. + "uri": "str", # Optional. A connection string in the format + accepted by the ``psql`` command. This is provided as a convenience and + should be able to be constructed by the other attributes. + "user": "str" # Optional. The default user for the + database.:code:`
`:code:`
`Requires ``database:view_credentials`` + scope. + }, + "private_network_uuid": "str", # Optional. A string specifying the + UUID of the VPC to which the database cluster will be assigned. If excluded, + the cluster when creating a new database cluster, it will be assigned to your + account's default VPC for the region. :code:`
`:code:`
`Requires + ``vpc:read`` scope. + "project_id": "str", # Optional. The ID of the project that the + database cluster is assigned to. If excluded when creating a new database + cluster, it will be assigned to your default + project.:code:`
`:code:`
`Requires ``project:read`` scope. + "rules": [ + { + "type": "str", # The type of resource that the + firewall rule allows to access the database cluster. Required. Known + values are: "droplet", "k8s", "ip_addr", "tag", and "app". + "value": "str", # The ID of the specific resource, + the name of a tag applied to a group of resources, or the IP address + that the firewall rule allows to access the database cluster. + Required. + "cluster_uuid": "str", # Optional. A unique ID for + the database cluster to which the rule is applied. + "created_at": "2020-02-20 00:00:00", # Optional. A + time value given in ISO8601 combined date and time format that + represents when the firewall rule was created. + "description": "str", # Optional. A human-readable + description of the rule. + "uuid": "str" # Optional. A unique ID for the + firewall rule itself. + } + ], + "schema_registry_connection": { + "host": "str", # Optional. The FQDN pointing to the schema + registry connection uri. + "password": "str", # Optional. The randomly generated + password for the schema registry.:code:`
`:code:`
`Requires + ``database:view_credentials`` scope. + "port": 0, # Optional. The port on which the schema registry + is listening. + "ssl": bool, # Optional. A boolean value indicating if the + connection should be made over SSL. + "uri": "str", # Optional. This is provided as a convenience + and should be able to be constructed by the other attributes. + "user": "str" # Optional. The default user for the schema + registry.:code:`
`:code:`
`Requires ``database:view_credentials`` + scope. + }, + "semantic_version": "str", # Optional. A string representing the + semantic version of the database engine in use for the cluster. + "standby_connection": { + "database": "str", # Optional. The name of the default + database. + "host": "str", # Optional. The FQDN pointing to the database + cluster's current primary node. + "password": "str", # Optional. The randomly generated + password for the default user.:code:`
`:code:`
`Requires + ``database:view_credentials`` scope. + "port": 0, # Optional. The port on which the database + cluster is listening. + "ssl": bool, # Optional. A boolean value indicating if the + connection should be made over SSL. + "uri": "str", # Optional. A connection string in the format + accepted by the ``psql`` command. This is provided as a convenience and + should be able to be constructed by the other attributes. + "user": "str" # Optional. The default user for the + database.:code:`
`:code:`
`Requires ``database:view_credentials`` + scope. + }, + "standby_private_connection": { + "database": "str", # Optional. The name of the default + database. + "host": "str", # Optional. The FQDN pointing to the database + cluster's current primary node. + "password": "str", # Optional. The randomly generated + password for the default user.:code:`
`:code:`
`Requires + ``database:view_credentials`` scope. + "port": 0, # Optional. The port on which the database + cluster is listening. + "ssl": bool, # Optional. A boolean value indicating if the + connection should be made over SSL. + "uri": "str", # Optional. A connection string in the format + accepted by the ``psql`` command. This is provided as a convenience and + should be able to be constructed by the other attributes. + "user": "str" # Optional. The default user for the + database.:code:`
`:code:`
`Requires ``database:view_credentials`` + scope. + }, + "status": "str", # Optional. A string representing the current + status of the database cluster. Known values are: "creating", "online", + "resizing", "migrating", and "forking". + "storage_size_mib": 0, # Optional. Additional storage added to the + cluster, in MiB. If null, no additional storage is added to the cluster, + beyond what is provided as a base amount from the 'size' and any previously + added additional storage. + "tags": [ + "str" # Optional. An array of tags that have been applied to + the database cluster. :code:`
`:code:`
`Requires ``tag:read`` + scope. + ], + "ui_connection": { + "host": "str", # Optional. The FQDN pointing to the + opensearch cluster's current primary node. + "password": "str", # Optional. The randomly generated + password for the default user.:code:`
`:code:`
`Requires + ``database:view_credentials`` scope. + "port": 0, # Optional. The port on which the opensearch + dashboard is listening. + "ssl": bool, # Optional. A boolean value indicating if the + connection should be made over SSL. + "uri": "str", # Optional. This is provided as a convenience + and should be able to be constructed by the other attributes. + "user": "str" # Optional. The default user for the + opensearch dashboard.:code:`
`:code:`
`Requires + ``database:view_credentials`` scope. + }, + "users": [ + { + "name": "str", # The name of a database user. + Required. + "access_cert": "str", # Optional. Access certificate + for TLS client authentication. (Kafka only). + "access_key": "str", # Optional. Access key for TLS + client authentication. (Kafka only). + "mysql_settings": { + "auth_plugin": "str" # A string specifying + the authentication method to be used for connections to the MySQL + user account. The valid values are ``mysql_native_password`` or + ``caching_sha2_password``. If excluded when creating a new user, + the default for the version of MySQL in use will be used. As of + MySQL 8.0, the default is ``caching_sha2_password``. Required. + Known values are: "mysql_native_password" and + "caching_sha2_password". + }, + "password": "str", # Optional. A randomly generated + password for the database user.:code:`
`Requires + ``database:view_credentials`` scope. + "role": "str", # Optional. A string representing the + database user's role. The value will be either "primary" or "normal". + Known values are: "primary" and "normal". + "settings": { + "acl": [ + { + "permission": "str", # + Permission set applied to the ACL. 'consume' allows for + messages to be consumed from the topic. 'produce' allows + for messages to be published to the topic. + 'produceconsume' allows for both 'consume' and 'produce' + permission. 'admin' allows for 'produceconsume' as well + as any operations to administer the topic (delete, + update). Required. Known values are: "admin", "consume", + "produce", and "produceconsume". + "topic": "str", # A regex + for matching the topic(s) that this ACL should apply to. + Required. + "id": "str" # Optional. An + identifier for the ACL. Will be computed after the ACL is + created/updated. + } + ], + "mongo_user_settings": { + "databases": [ + "str" # Optional. A list of + databases to which the user should have access. When the + database is set to ``admin``"" , the user will have + access to all databases based on the user's role i.e. a + user with the role ``readOnly`` assigned to the ``admin`` + database will have read access to all databases. + ], + "role": "str" # Optional. The role + to assign to the user with each role mapping to a MongoDB + built-in role. ``readOnly`` maps to a `read + `_ + role. ``readWrite`` maps to a `readWrite + `_ + role. ``dbAdmin`` maps to a `dbAdmin + `_ + role. Known values are: "readOnly", "readWrite", and + "dbAdmin". + }, + "opensearch_acl": [ + { + "index": "str", # Optional. + A regex for matching the indexes that this ACL should + apply to. + "permission": "str" # + Optional. Permission set applied to the ACL. 'read' + allows user to read from the index. 'write' allows for + user to write to the index. 'readwrite' allows for both + 'read' and 'write' permission. 'deny'(default) restricts + user from performing any operation over an index. 'admin' + allows for 'readwrite' as well as any operations to + administer the index. Known values are: "deny", "admin", + "read", "readwrite", and "write". + } + ], + "pg_allow_replication": bool # Optional. For + Postgres clusters, set to ``true`` for a user with replication + rights. This option is not currently supported for other database + engines. + } + } + ], + "version": "str", # Optional. A string representing the version of + the database engine in use for the cluster. + "version_end_of_availability": "str", # Optional. A timestamp + referring to the date when the particular version will no longer be available + for creating new clusters. If null, the version does not have an end of + availability timeline. + "version_end_of_life": "str" # Optional. A timestamp referring to + the date when the particular version will no longer be supported. If null, + the version does not have an end of life timeline. + } + } # response body for status code(s): 404 response == { "id": "str", # A short identifier corresponding to the HTTP status code @@ -115745,28 +125629,39 @@ def purge_cache( """ @distributed_trace - def purge_cache( - self, cdn_id: str, body: Union[JSON, IO[bytes]], **kwargs: Any - ) -> Optional[JSON]: + def create_cluster(self, body: Union[JSON, IO[bytes]], **kwargs: Any) -> JSON: # pylint: disable=line-too-long - """Purge the Cache for an Existing CDN Endpoint. + """Create a New Database Cluster. - To purge cached content from a CDN endpoint, send a DELETE request to - ``/v2/cdn/endpoints/$ENDPOINT_ID/cache``. The body of the request should include - a ``files`` attribute containing a list of cached file paths to be purged. A - path may be for a single file or may contain a wildcard (\\ ``*``\\ ) to recursively - purge all files under a directory. When only a wildcard is provided, all cached - files will be purged. There is a rate limit of 50 files per 20 seconds that can - be purged. CDN endpoints have a rate limit of 5 requests per 10 seconds. - Purging files using a wildcard path counts as a single request against the API's - rate limit. Two identical purge requests cannot be sent at the same time. + To create a database cluster, send a POST request to ``/v2/databases``. To see a list of + options for each engine, such as available regions, size slugs, and versions, send a GET + request to the ``/v2/databases/options`` endpoint. The available sizes for the + ``storage_size_mib`` field depends on the cluster's size. To see a list of available sizes, see + `Managed Database Pricing `_. + + The create response returns a JSON object with a key called ``database``. The value of this is + an object that contains the standard attributes associated with a database cluster. The initial + value of the database cluster's ``status`` attribute is ``creating``. When the cluster is ready + to receive traffic, this changes to ``online``. + + The embedded ``connection`` and ``private_connection`` objects contains the information needed + to access the database cluster. For multi-node clusters, the ``standby_connection`` and + ``standby_private_connection`` objects contain the information needed to connect to the + cluster's standby node(s). + + DigitalOcean managed PostgreSQL and MySQL database clusters take automated daily backups. To + create a new database cluster based on a backup of an existing cluster, send a POST request to + ``/v2/databases``. In addition to the standard database cluster attributes, the JSON body must + include a key named ``backup_restore`` with the name of the original database cluster and the + timestamp of the backup to be restored. Creating a database from a backup is the same as + forking a database in the control panel. + Note: Caching cluster creates are no longer supported as of 2025-04-30T00:00:00Z. Backups are + also not supported for Caching or Valkey clusters. - :param cdn_id: A unique identifier for a CDN endpoint. Required. - :type cdn_id: str :param body: Is either a JSON type or a IO[bytes] type. Required. :type body: JSON or IO[bytes] - :return: JSON object or None - :rtype: JSON or None + :return: JSON object + :rtype: JSON :raises ~azure.core.exceptions.HttpResponseError: Example: @@ -115774,12 +125669,611 @@ def purge_cache( # JSON input template you can fill out and use as your body input. body = { - "files": [ - "str" # An array of strings containing the path to the content to be - purged from the CDN cache. Required. - ] + "engine": "str", # A slug representing the database engine used for the + cluster. The possible values are: "pg" for PostgreSQL, "mysql" for MySQL, "redis" + for Caching, "mongodb" for MongoDB, "kafka" for Kafka, "opensearch" for + OpenSearch, and "valkey" for Valkey. Required. Known values are: "pg", "mysql", + "redis", "valkey", "mongodb", "kafka", and "opensearch". + "name": "str", # A unique, human-readable name referring to a database + cluster. Required. + "num_nodes": 0, # The number of nodes in the database cluster. Required. + "region": "str", # The slug identifier for the region where the database + cluster is located. Required. + "size": "str", # The slug identifier representing the size of the nodes in + the database cluster. Required. + "autoscale": { + "storage": { + "enabled": bool, # Whether storage autoscaling is enabled + for the cluster. Required. + "increment_gib": 0, # Optional. The amount of additional + storage to add (in GiB) when autoscaling is triggered. + "threshold_percent": 0 # Optional. The storage usage + threshold percentage that triggers autoscaling. When storage usage + exceeds this percentage, additional storage will be added automatically. + } + }, + "backup_restore": { + "database_name": "str", # The name of an existing database cluster + from which the backup will be restored. Required. + "backup_created_at": "2020-02-20 00:00:00" # Optional. The timestamp + of an existing database cluster backup in ISO8601 combined date and time + format. The most recent backup will be used if excluded. + }, + "connection": { + "database": "str", # Optional. The name of the default database. + "host": "str", # Optional. The FQDN pointing to the database + cluster's current primary node. + "password": "str", # Optional. The randomly generated password for + the default user.:code:`
`:code:`
`Requires + ``database:view_credentials`` scope. + "port": 0, # Optional. The port on which the database cluster is + listening. + "ssl": bool, # Optional. A boolean value indicating if the + connection should be made over SSL. + "uri": "str", # Optional. A connection string in the format accepted + by the ``psql`` command. This is provided as a convenience and should be able + to be constructed by the other attributes. + "user": "str" # Optional. The default user for the + database.:code:`
`:code:`
`Requires ``database:view_credentials`` + scope. + }, + "created_at": "2020-02-20 00:00:00", # Optional. A time value given in + ISO8601 combined date and time format that represents when the database cluster + was created. + "db_names": [ + "str" # Optional. An array of strings containing the names of + databases created in the database cluster. + ], + "do_settings": { + "service_cnames": [ + "str" # Optional. An array of custom CNAMEs for the database + cluster. Each CNAME must be a valid RFC 1123 hostname (e.g., + "db.example.com"). Maximum of 16 CNAMEs allowed, each up to 253 + characters. + ] + }, + "id": "str", # Optional. A unique ID that can be used to identify and + reference a database cluster. + "maintenance_window": { + "day": "str", # The day of the week on which to apply maintenance + updates. Required. + "hour": "str", # The hour in UTC at which maintenance updates will + be applied in 24 hour format. Required. + "description": [ + "str" # Optional. A list of strings, each containing + information about a pending maintenance update. + ], + "pending": bool # Optional. A boolean value indicating whether any + maintenance is scheduled to be performed in the next window. + }, + "metrics_endpoints": [ + { + "host": "str", # Optional. A FQDN pointing to the database + cluster's node(s). + "port": 0 # Optional. The port on which a service is + listening. + } + ], + "private_connection": { + "database": "str", # Optional. The name of the default database. + "host": "str", # Optional. The FQDN pointing to the database + cluster's current primary node. + "password": "str", # Optional. The randomly generated password for + the default user.:code:`
`:code:`
`Requires + ``database:view_credentials`` scope. + "port": 0, # Optional. The port on which the database cluster is + listening. + "ssl": bool, # Optional. A boolean value indicating if the + connection should be made over SSL. + "uri": "str", # Optional. A connection string in the format accepted + by the ``psql`` command. This is provided as a convenience and should be able + to be constructed by the other attributes. + "user": "str" # Optional. The default user for the + database.:code:`
`:code:`
`Requires ``database:view_credentials`` + scope. + }, + "private_network_uuid": "str", # Optional. A string specifying the UUID of + the VPC to which the database cluster will be assigned. If excluded, the cluster + when creating a new database cluster, it will be assigned to your account's + default VPC for the region. :code:`
`:code:`
`Requires ``vpc:read`` scope. + "project_id": "str", # Optional. The ID of the project that the database + cluster is assigned to. If excluded when creating a new database cluster, it will + be assigned to your default project.:code:`
`:code:`
`Requires + ``project:update`` scope. + "rules": [ + { + "type": "str", # The type of resource that the firewall rule + allows to access the database cluster. Required. Known values are: + "droplet", "k8s", "ip_addr", "tag", and "app". + "value": "str", # The ID of the specific resource, the name + of a tag applied to a group of resources, or the IP address that the + firewall rule allows to access the database cluster. Required. + "cluster_uuid": "str", # Optional. A unique ID for the + database cluster to which the rule is applied. + "created_at": "2020-02-20 00:00:00", # Optional. A time + value given in ISO8601 combined date and time format that represents when + the firewall rule was created. + "description": "str", # Optional. A human-readable + description of the rule. + "uuid": "str" # Optional. A unique ID for the firewall rule + itself. + } + ], + "schema_registry_connection": { + "host": "str", # Optional. The FQDN pointing to the schema registry + connection uri. + "password": "str", # Optional. The randomly generated password for + the schema registry.:code:`
`:code:`
`Requires + ``database:view_credentials`` scope. + "port": 0, # Optional. The port on which the schema registry is + listening. + "ssl": bool, # Optional. A boolean value indicating if the + connection should be made over SSL. + "uri": "str", # Optional. This is provided as a convenience and + should be able to be constructed by the other attributes. + "user": "str" # Optional. The default user for the schema + registry.:code:`
`:code:`
`Requires ``database:view_credentials`` + scope. + }, + "semantic_version": "str", # Optional. A string representing the semantic + version of the database engine in use for the cluster. + "standby_connection": { + "database": "str", # Optional. The name of the default database. + "host": "str", # Optional. The FQDN pointing to the database + cluster's current primary node. + "password": "str", # Optional. The randomly generated password for + the default user.:code:`
`:code:`
`Requires + ``database:view_credentials`` scope. + "port": 0, # Optional. The port on which the database cluster is + listening. + "ssl": bool, # Optional. A boolean value indicating if the + connection should be made over SSL. + "uri": "str", # Optional. A connection string in the format accepted + by the ``psql`` command. This is provided as a convenience and should be able + to be constructed by the other attributes. + "user": "str" # Optional. The default user for the + database.:code:`
`:code:`
`Requires ``database:view_credentials`` + scope. + }, + "standby_private_connection": { + "database": "str", # Optional. The name of the default database. + "host": "str", # Optional. The FQDN pointing to the database + cluster's current primary node. + "password": "str", # Optional. The randomly generated password for + the default user.:code:`
`:code:`
`Requires + ``database:view_credentials`` scope. + "port": 0, # Optional. The port on which the database cluster is + listening. + "ssl": bool, # Optional. A boolean value indicating if the + connection should be made over SSL. + "uri": "str", # Optional. A connection string in the format accepted + by the ``psql`` command. This is provided as a convenience and should be able + to be constructed by the other attributes. + "user": "str" # Optional. The default user for the + database.:code:`
`:code:`
`Requires ``database:view_credentials`` + scope. + }, + "status": "str", # Optional. A string representing the current status of the + database cluster. Known values are: "creating", "online", "resizing", + "migrating", and "forking". + "storage_size_mib": 0, # Optional. Additional storage added to the cluster, + in MiB. If null, no additional storage is added to the cluster, beyond what is + provided as a base amount from the 'size' and any previously added additional + storage. + "tags": [ + "str" # Optional. An array of tags (as strings) to apply to the + database cluster. :code:`
`:code:`
`Requires ``tag:create`` scope. + ], + "ui_connection": { + "host": "str", # Optional. The FQDN pointing to the opensearch + cluster's current primary node. + "password": "str", # Optional. The randomly generated password for + the default user.:code:`
`:code:`
`Requires + ``database:view_credentials`` scope. + "port": 0, # Optional. The port on which the opensearch dashboard is + listening. + "ssl": bool, # Optional. A boolean value indicating if the + connection should be made over SSL. + "uri": "str", # Optional. This is provided as a convenience and + should be able to be constructed by the other attributes. + "user": "str" # Optional. The default user for the opensearch + dashboard.:code:`
`:code:`
`Requires ``database:view_credentials`` + scope. + }, + "users": [ + { + "name": "str", # The name of a database user. Required. + "access_cert": "str", # Optional. Access certificate for TLS + client authentication. (Kafka only). + "access_key": "str", # Optional. Access key for TLS client + authentication. (Kafka only). + "mysql_settings": { + "auth_plugin": "str" # A string specifying the + authentication method to be used for connections to the MySQL user + account. The valid values are ``mysql_native_password`` or + ``caching_sha2_password``. If excluded when creating a new user, the + default for the version of MySQL in use will be used. As of MySQL + 8.0, the default is ``caching_sha2_password``. Required. Known values + are: "mysql_native_password" and "caching_sha2_password". + }, + "password": "str", # Optional. A randomly generated password + for the database user.:code:`
`Requires ``database:view_credentials`` + scope. + "role": "str", # Optional. A string representing the + database user's role. The value will be either "primary" or "normal". + Known values are: "primary" and "normal". + "settings": { + "acl": [ + { + "permission": "str", # Permission + set applied to the ACL. 'consume' allows for messages to be + consumed from the topic. 'produce' allows for messages to be + published to the topic. 'produceconsume' allows for both + 'consume' and 'produce' permission. 'admin' allows for + 'produceconsume' as well as any operations to administer the + topic (delete, update). Required. Known values are: "admin", + "consume", "produce", and "produceconsume". + "topic": "str", # A regex for + matching the topic(s) that this ACL should apply to. + Required. + "id": "str" # Optional. An + identifier for the ACL. Will be computed after the ACL is + created/updated. + } + ], + "mongo_user_settings": { + "databases": [ + "str" # Optional. A list of + databases to which the user should have access. When the + database is set to ``admin``"" , the user will have access to + all databases based on the user's role i.e. a user with the + role ``readOnly`` assigned to the ``admin`` database will + have read access to all databases. + ], + "role": "str" # Optional. The role to assign + to the user with each role mapping to a MongoDB built-in role. + ``readOnly`` maps to a `read + `_ + role. ``readWrite`` maps to a `readWrite + `_ + role. ``dbAdmin`` maps to a `dbAdmin + `_ + role. Known values are: "readOnly", "readWrite", and "dbAdmin". + }, + "opensearch_acl": [ + { + "index": "str", # Optional. A regex + for matching the indexes that this ACL should apply to. + "permission": "str" # Optional. + Permission set applied to the ACL. 'read' allows user to read + from the index. 'write' allows for user to write to the + index. 'readwrite' allows for both 'read' and 'write' + permission. 'deny'(default) restricts user from performing + any operation over an index. 'admin' allows for 'readwrite' + as well as any operations to administer the index. Known + values are: "deny", "admin", "read", "readwrite", and + "write". + } + ], + "pg_allow_replication": bool # Optional. For + Postgres clusters, set to ``true`` for a user with replication + rights. This option is not currently supported for other database + engines. + } + } + ], + "version": "str", # Optional. A string representing the version of the + database engine in use for the cluster. + "version_end_of_availability": "str", # Optional. A timestamp referring to + the date when the particular version will no longer be available for creating new + clusters. If null, the version does not have an end of availability timeline. + "version_end_of_life": "str" # Optional. A timestamp referring to the date + when the particular version will no longer be supported. If null, the version + does not have an end of life timeline. + } + + # response body for status code(s): 201 + response == { + "database": { + "engine": "str", # A slug representing the database engine used for + the cluster. The possible values are: "pg" for PostgreSQL, "mysql" for MySQL, + "redis" for Caching, "mongodb" for MongoDB, "kafka" for Kafka, "opensearch" + for OpenSearch, and "valkey" for Valkey. Required. Known values are: "pg", + "mysql", "redis", "valkey", "mongodb", "kafka", and "opensearch". + "name": "str", # A unique, human-readable name referring to a + database cluster. Required. + "num_nodes": 0, # The number of nodes in the database cluster. + Required. + "region": "str", # The slug identifier for the region where the + database cluster is located. Required. + "size": "str", # The slug identifier representing the size of the + nodes in the database cluster. Required. + "connection": { + "database": "str", # Optional. The name of the default + database. + "host": "str", # Optional. The FQDN pointing to the database + cluster's current primary node. + "password": "str", # Optional. The randomly generated + password for the default user.:code:`
`:code:`
`Requires + ``database:view_credentials`` scope. + "port": 0, # Optional. The port on which the database + cluster is listening. + "ssl": bool, # Optional. A boolean value indicating if the + connection should be made over SSL. + "uri": "str", # Optional. A connection string in the format + accepted by the ``psql`` command. This is provided as a convenience and + should be able to be constructed by the other attributes. + "user": "str" # Optional. The default user for the + database.:code:`
`:code:`
`Requires ``database:view_credentials`` + scope. + }, + "created_at": "2020-02-20 00:00:00", # Optional. A time value given + in ISO8601 combined date and time format that represents when the database + cluster was created. + "db_names": [ + "str" # Optional. An array of strings containing the names + of databases created in the database cluster. + ], + "do_settings": { + "service_cnames": [ + "str" # Optional. An array of custom CNAMEs for the + database cluster. Each CNAME must be a valid RFC 1123 hostname (e.g., + "db.example.com"). Maximum of 16 CNAMEs allowed, each up to 253 + characters. + ] + }, + "id": "str", # Optional. A unique ID that can be used to identify + and reference a database cluster. + "maintenance_window": { + "day": "str", # The day of the week on which to apply + maintenance updates. Required. + "hour": "str", # The hour in UTC at which maintenance + updates will be applied in 24 hour format. Required. + "description": [ + "str" # Optional. A list of strings, each containing + information about a pending maintenance update. + ], + "pending": bool # Optional. A boolean value indicating + whether any maintenance is scheduled to be performed in the next window. + }, + "metrics_endpoints": [ + { + "host": "str", # Optional. A FQDN pointing to the + database cluster's node(s). + "port": 0 # Optional. The port on which a service is + listening. + } + ], + "private_connection": { + "database": "str", # Optional. The name of the default + database. + "host": "str", # Optional. The FQDN pointing to the database + cluster's current primary node. + "password": "str", # Optional. The randomly generated + password for the default user.:code:`
`:code:`
`Requires + ``database:view_credentials`` scope. + "port": 0, # Optional. The port on which the database + cluster is listening. + "ssl": bool, # Optional. A boolean value indicating if the + connection should be made over SSL. + "uri": "str", # Optional. A connection string in the format + accepted by the ``psql`` command. This is provided as a convenience and + should be able to be constructed by the other attributes. + "user": "str" # Optional. The default user for the + database.:code:`
`:code:`
`Requires ``database:view_credentials`` + scope. + }, + "private_network_uuid": "str", # Optional. A string specifying the + UUID of the VPC to which the database cluster will be assigned. If excluded, + the cluster when creating a new database cluster, it will be assigned to your + account's default VPC for the region. :code:`
`:code:`
`Requires + ``vpc:read`` scope. + "project_id": "str", # Optional. The ID of the project that the + database cluster is assigned to. If excluded when creating a new database + cluster, it will be assigned to your default + project.:code:`
`:code:`
`Requires ``project:read`` scope. + "rules": [ + { + "type": "str", # The type of resource that the + firewall rule allows to access the database cluster. Required. Known + values are: "droplet", "k8s", "ip_addr", "tag", and "app". + "value": "str", # The ID of the specific resource, + the name of a tag applied to a group of resources, or the IP address + that the firewall rule allows to access the database cluster. + Required. + "cluster_uuid": "str", # Optional. A unique ID for + the database cluster to which the rule is applied. + "created_at": "2020-02-20 00:00:00", # Optional. A + time value given in ISO8601 combined date and time format that + represents when the firewall rule was created. + "description": "str", # Optional. A human-readable + description of the rule. + "uuid": "str" # Optional. A unique ID for the + firewall rule itself. + } + ], + "schema_registry_connection": { + "host": "str", # Optional. The FQDN pointing to the schema + registry connection uri. + "password": "str", # Optional. The randomly generated + password for the schema registry.:code:`
`:code:`
`Requires + ``database:view_credentials`` scope. + "port": 0, # Optional. The port on which the schema registry + is listening. + "ssl": bool, # Optional. A boolean value indicating if the + connection should be made over SSL. + "uri": "str", # Optional. This is provided as a convenience + and should be able to be constructed by the other attributes. + "user": "str" # Optional. The default user for the schema + registry.:code:`
`:code:`
`Requires ``database:view_credentials`` + scope. + }, + "semantic_version": "str", # Optional. A string representing the + semantic version of the database engine in use for the cluster. + "standby_connection": { + "database": "str", # Optional. The name of the default + database. + "host": "str", # Optional. The FQDN pointing to the database + cluster's current primary node. + "password": "str", # Optional. The randomly generated + password for the default user.:code:`
`:code:`
`Requires + ``database:view_credentials`` scope. + "port": 0, # Optional. The port on which the database + cluster is listening. + "ssl": bool, # Optional. A boolean value indicating if the + connection should be made over SSL. + "uri": "str", # Optional. A connection string in the format + accepted by the ``psql`` command. This is provided as a convenience and + should be able to be constructed by the other attributes. + "user": "str" # Optional. The default user for the + database.:code:`
`:code:`
`Requires ``database:view_credentials`` + scope. + }, + "standby_private_connection": { + "database": "str", # Optional. The name of the default + database. + "host": "str", # Optional. The FQDN pointing to the database + cluster's current primary node. + "password": "str", # Optional. The randomly generated + password for the default user.:code:`
`:code:`
`Requires + ``database:view_credentials`` scope. + "port": 0, # Optional. The port on which the database + cluster is listening. + "ssl": bool, # Optional. A boolean value indicating if the + connection should be made over SSL. + "uri": "str", # Optional. A connection string in the format + accepted by the ``psql`` command. This is provided as a convenience and + should be able to be constructed by the other attributes. + "user": "str" # Optional. The default user for the + database.:code:`
`:code:`
`Requires ``database:view_credentials`` + scope. + }, + "status": "str", # Optional. A string representing the current + status of the database cluster. Known values are: "creating", "online", + "resizing", "migrating", and "forking". + "storage_size_mib": 0, # Optional. Additional storage added to the + cluster, in MiB. If null, no additional storage is added to the cluster, + beyond what is provided as a base amount from the 'size' and any previously + added additional storage. + "tags": [ + "str" # Optional. An array of tags that have been applied to + the database cluster. :code:`
`:code:`
`Requires ``tag:read`` + scope. + ], + "ui_connection": { + "host": "str", # Optional. The FQDN pointing to the + opensearch cluster's current primary node. + "password": "str", # Optional. The randomly generated + password for the default user.:code:`
`:code:`
`Requires + ``database:view_credentials`` scope. + "port": 0, # Optional. The port on which the opensearch + dashboard is listening. + "ssl": bool, # Optional. A boolean value indicating if the + connection should be made over SSL. + "uri": "str", # Optional. This is provided as a convenience + and should be able to be constructed by the other attributes. + "user": "str" # Optional. The default user for the + opensearch dashboard.:code:`
`:code:`
`Requires + ``database:view_credentials`` scope. + }, + "users": [ + { + "name": "str", # The name of a database user. + Required. + "access_cert": "str", # Optional. Access certificate + for TLS client authentication. (Kafka only). + "access_key": "str", # Optional. Access key for TLS + client authentication. (Kafka only). + "mysql_settings": { + "auth_plugin": "str" # A string specifying + the authentication method to be used for connections to the MySQL + user account. The valid values are ``mysql_native_password`` or + ``caching_sha2_password``. If excluded when creating a new user, + the default for the version of MySQL in use will be used. As of + MySQL 8.0, the default is ``caching_sha2_password``. Required. + Known values are: "mysql_native_password" and + "caching_sha2_password". + }, + "password": "str", # Optional. A randomly generated + password for the database user.:code:`
`Requires + ``database:view_credentials`` scope. + "role": "str", # Optional. A string representing the + database user's role. The value will be either "primary" or "normal". + Known values are: "primary" and "normal". + "settings": { + "acl": [ + { + "permission": "str", # + Permission set applied to the ACL. 'consume' allows for + messages to be consumed from the topic. 'produce' allows + for messages to be published to the topic. + 'produceconsume' allows for both 'consume' and 'produce' + permission. 'admin' allows for 'produceconsume' as well + as any operations to administer the topic (delete, + update). Required. Known values are: "admin", "consume", + "produce", and "produceconsume". + "topic": "str", # A regex + for matching the topic(s) that this ACL should apply to. + Required. + "id": "str" # Optional. An + identifier for the ACL. Will be computed after the ACL is + created/updated. + } + ], + "mongo_user_settings": { + "databases": [ + "str" # Optional. A list of + databases to which the user should have access. When the + database is set to ``admin``"" , the user will have + access to all databases based on the user's role i.e. a + user with the role ``readOnly`` assigned to the ``admin`` + database will have read access to all databases. + ], + "role": "str" # Optional. The role + to assign to the user with each role mapping to a MongoDB + built-in role. ``readOnly`` maps to a `read + `_ + role. ``readWrite`` maps to a `readWrite + `_ + role. ``dbAdmin`` maps to a `dbAdmin + `_ + role. Known values are: "readOnly", "readWrite", and + "dbAdmin". + }, + "opensearch_acl": [ + { + "index": "str", # Optional. + A regex for matching the indexes that this ACL should + apply to. + "permission": "str" # + Optional. Permission set applied to the ACL. 'read' + allows user to read from the index. 'write' allows for + user to write to the index. 'readwrite' allows for both + 'read' and 'write' permission. 'deny'(default) restricts + user from performing any operation over an index. 'admin' + allows for 'readwrite' as well as any operations to + administer the index. Known values are: "deny", "admin", + "read", "readwrite", and "write". + } + ], + "pg_allow_replication": bool # Optional. For + Postgres clusters, set to ``true`` for a user with replication + rights. This option is not currently supported for other database + engines. + } + } + ], + "version": "str", # Optional. A string representing the version of + the database engine in use for the cluster. + "version_end_of_availability": "str", # Optional. A timestamp + referring to the date when the particular version will no longer be available + for creating new clusters. If null, the version does not have an end of + availability timeline. + "version_end_of_life": "str" # Optional. A timestamp referring to + the date when the particular version will no longer be supported. If null, + the version does not have an end of life timeline. + } } - # response body for status code(s): 404 response == { "id": "str", # A short identifier corresponding to the HTTP status code @@ -115808,415 +126302,6 @@ def purge_cache( _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = kwargs.pop("params", {}) or {} - content_type: Optional[str] = kwargs.pop( - "content_type", _headers.pop("Content-Type", None) - ) - cls: ClsType[Optional[JSON]] = kwargs.pop("cls", None) - - content_type = content_type or "application/json" - _json = None - _content = None - if isinstance(body, (IOBase, bytes)): - _content = body - else: - _json = body - - _request = build_cdn_purge_cache_request( - cdn_id=cdn_id, - content_type=content_type, - json=_json, - content=_content, - headers=_headers, - params=_params, - ) - _request.url = self._client.format_url(_request.url) - - _stream = False - pipeline_response: PipelineResponse = ( - self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - ) - - response = pipeline_response.http_response - - if response.status_code not in [204, 404]: - if _stream: - response.read() # Load the body in memory and close the socket - map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore - raise HttpResponseError(response=response) - - deserialized = None - response_headers = {} - if response.status_code == 204: - response_headers["ratelimit-limit"] = self._deserialize( - "int", response.headers.get("ratelimit-limit") - ) - response_headers["ratelimit-remaining"] = self._deserialize( - "int", response.headers.get("ratelimit-remaining") - ) - response_headers["ratelimit-reset"] = self._deserialize( - "int", response.headers.get("ratelimit-reset") - ) - - if response.status_code == 404: - response_headers["ratelimit-limit"] = self._deserialize( - "int", response.headers.get("ratelimit-limit") - ) - response_headers["ratelimit-remaining"] = self._deserialize( - "int", response.headers.get("ratelimit-remaining") - ) - response_headers["ratelimit-reset"] = self._deserialize( - "int", response.headers.get("ratelimit-reset") - ) - - if response.content: - deserialized = response.json() - else: - deserialized = None - - if cls: - return cls(pipeline_response, deserialized, response_headers) # type: ignore - - return deserialized # type: ignore - - -class CertificatesOperations: - """ - .. warning:: - **DO NOT** instantiate this class directly. - - Instead, you should access the following operations through - :class:`~pydo.GeneratedClient`'s - :attr:`certificates` attribute. - """ - - def __init__(self, *args, **kwargs): - input_args = list(args) - self._client = input_args.pop(0) if input_args else kwargs.pop("client") - self._config = input_args.pop(0) if input_args else kwargs.pop("config") - self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer") - self._deserialize = ( - input_args.pop(0) if input_args else kwargs.pop("deserializer") - ) - - @distributed_trace - def list( - self, *, per_page: int = 20, page: int = 1, name: str = "", **kwargs: Any - ) -> JSON: - # pylint: disable=line-too-long - """List All Certificates. - - To list all of the certificates available on your account, send a GET request to - ``/v2/certificates``. - - :keyword per_page: Number of items returned per page. Default value is 20. - :paramtype per_page: int - :keyword page: Which 'page' of paginated results to return. Default value is 1. - :paramtype page: int - :keyword name: Name of expected certificate. Default value is "". - :paramtype name: str - :return: JSON object - :rtype: JSON - :raises ~azure.core.exceptions.HttpResponseError: - - Example: - .. code-block:: python - - # response body for status code(s): 200 - response == { - "meta": { - "total": 0 # Optional. Number of objects returned by the request. - }, - "certificates": [ - { - "created_at": "2020-02-20 00:00:00", # Optional. A time - value given in ISO8601 combined date and time format that represents when - the certificate was created. - "dns_names": [ - "str" # Optional. An array of fully qualified domain - names (FQDNs) for which the certificate was issued. - ], - "id": "str", # Optional. A unique ID that can be used to - identify and reference a certificate. - "name": "str", # Optional. A unique human-readable name - referring to a certificate. - "not_after": "2020-02-20 00:00:00", # Optional. A time value - given in ISO8601 combined date and time format that represents the - certificate's expiration date. - "sha1_fingerprint": "str", # Optional. A unique identifier - generated from the SHA-1 fingerprint of the certificate. - "state": "str", # Optional. A string representing the - current state of the certificate. It may be ``pending``"" , - ``verified``"" , or ``error``. Known values are: "pending", "verified", - and "error". - "type": "str" # Optional. A string representing the type of - the certificate. The value will be ``custom`` for a user-uploaded - certificate or ``lets_encrypt`` for one automatically generated with - Let's Encrypt. Known values are: "custom" and "lets_encrypt". - } - ], - "links": { - "pages": {} - } - } - """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - 401: cast( - Type[HttpResponseError], - lambda response: ClientAuthenticationError(response=response), - ), - 429: HttpResponseError, - 500: HttpResponseError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = kwargs.pop("headers", {}) or {} - _params = kwargs.pop("params", {}) or {} - - cls: ClsType[JSON] = kwargs.pop("cls", None) - - _request = build_certificates_list_request( - per_page=per_page, - page=page, - name=name, - headers=_headers, - params=_params, - ) - _request.url = self._client.format_url(_request.url) - - _stream = False - pipeline_response: PipelineResponse = ( - self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - ) - - response = pipeline_response.http_response - - if response.status_code not in [200]: - if _stream: - response.read() # Load the body in memory and close the socket - map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore - raise HttpResponseError(response=response) - - response_headers = {} - response_headers["ratelimit-limit"] = self._deserialize( - "int", response.headers.get("ratelimit-limit") - ) - response_headers["ratelimit-remaining"] = self._deserialize( - "int", response.headers.get("ratelimit-remaining") - ) - response_headers["ratelimit-reset"] = self._deserialize( - "int", response.headers.get("ratelimit-reset") - ) - - if response.content: - deserialized = response.json() - else: - deserialized = None - - if cls: - return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore - - return cast(JSON, deserialized) # type: ignore - - @overload - def create( - self, body: JSON, *, content_type: str = "application/json", **kwargs: Any - ) -> JSON: - # pylint: disable=line-too-long - """Create a New Certificate. - - To upload new SSL certificate which you have previously generated, send a POST - request to ``/v2/certificates``. - - When uploading a user-generated certificate, the ``private_key``\\ , - ``leaf_certificate``\\ , and optionally the ``certificate_chain`` attributes should - be provided. The type must be set to ``custom``. - - When using Let's Encrypt to create a certificate, the ``dns_names`` attribute - must be provided, and the type must be set to ``lets_encrypt``. - - :param body: Required. - :type body: JSON - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: JSON object - :rtype: JSON - :raises ~azure.core.exceptions.HttpResponseError: - - Example: - .. code-block:: python - - # JSON input template you can fill out and use as your body input. - body = {} - - # response body for status code(s): 201 - response == { - "certificate": { - "created_at": "2020-02-20 00:00:00", # Optional. A time value given - in ISO8601 combined date and time format that represents when the certificate - was created. - "dns_names": [ - "str" # Optional. An array of fully qualified domain names - (FQDNs) for which the certificate was issued. - ], - "id": "str", # Optional. A unique ID that can be used to identify - and reference a certificate. - "name": "str", # Optional. A unique human-readable name referring to - a certificate. - "not_after": "2020-02-20 00:00:00", # Optional. A time value given - in ISO8601 combined date and time format that represents the certificate's - expiration date. - "sha1_fingerprint": "str", # Optional. A unique identifier generated - from the SHA-1 fingerprint of the certificate. - "state": "str", # Optional. A string representing the current state - of the certificate. It may be ``pending``"" , ``verified``"" , or ``error``. - Known values are: "pending", "verified", and "error". - "type": "str" # Optional. A string representing the type of the - certificate. The value will be ``custom`` for a user-uploaded certificate or - ``lets_encrypt`` for one automatically generated with Let's Encrypt. Known - values are: "custom" and "lets_encrypt". - } - } - """ - - @overload - def create( - self, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any - ) -> JSON: - # pylint: disable=line-too-long - """Create a New Certificate. - - To upload new SSL certificate which you have previously generated, send a POST - request to ``/v2/certificates``. - - When uploading a user-generated certificate, the ``private_key``\\ , - ``leaf_certificate``\\ , and optionally the ``certificate_chain`` attributes should - be provided. The type must be set to ``custom``. - - When using Let's Encrypt to create a certificate, the ``dns_names`` attribute - must be provided, and the type must be set to ``lets_encrypt``. - - :param body: Required. - :type body: IO[bytes] - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str - :return: JSON object - :rtype: JSON - :raises ~azure.core.exceptions.HttpResponseError: - - Example: - .. code-block:: python - - # response body for status code(s): 201 - response == { - "certificate": { - "created_at": "2020-02-20 00:00:00", # Optional. A time value given - in ISO8601 combined date and time format that represents when the certificate - was created. - "dns_names": [ - "str" # Optional. An array of fully qualified domain names - (FQDNs) for which the certificate was issued. - ], - "id": "str", # Optional. A unique ID that can be used to identify - and reference a certificate. - "name": "str", # Optional. A unique human-readable name referring to - a certificate. - "not_after": "2020-02-20 00:00:00", # Optional. A time value given - in ISO8601 combined date and time format that represents the certificate's - expiration date. - "sha1_fingerprint": "str", # Optional. A unique identifier generated - from the SHA-1 fingerprint of the certificate. - "state": "str", # Optional. A string representing the current state - of the certificate. It may be ``pending``"" , ``verified``"" , or ``error``. - Known values are: "pending", "verified", and "error". - "type": "str" # Optional. A string representing the type of the - certificate. The value will be ``custom`` for a user-uploaded certificate or - ``lets_encrypt`` for one automatically generated with Let's Encrypt. Known - values are: "custom" and "lets_encrypt". - } - } - """ - - @distributed_trace - def create(self, body: Union[JSON, IO[bytes]], **kwargs: Any) -> JSON: - # pylint: disable=line-too-long - """Create a New Certificate. - - To upload new SSL certificate which you have previously generated, send a POST - request to ``/v2/certificates``. - - When uploading a user-generated certificate, the ``private_key``\\ , - ``leaf_certificate``\\ , and optionally the ``certificate_chain`` attributes should - be provided. The type must be set to ``custom``. - - When using Let's Encrypt to create a certificate, the ``dns_names`` attribute - must be provided, and the type must be set to ``lets_encrypt``. - - :param body: Is either a JSON type or a IO[bytes] type. Required. - :type body: JSON or IO[bytes] - :return: JSON object - :rtype: JSON - :raises ~azure.core.exceptions.HttpResponseError: - - Example: - .. code-block:: python - - # JSON input template you can fill out and use as your body input. - body = {} - - # response body for status code(s): 201 - response == { - "certificate": { - "created_at": "2020-02-20 00:00:00", # Optional. A time value given - in ISO8601 combined date and time format that represents when the certificate - was created. - "dns_names": [ - "str" # Optional. An array of fully qualified domain names - (FQDNs) for which the certificate was issued. - ], - "id": "str", # Optional. A unique ID that can be used to identify - and reference a certificate. - "name": "str", # Optional. A unique human-readable name referring to - a certificate. - "not_after": "2020-02-20 00:00:00", # Optional. A time value given - in ISO8601 combined date and time format that represents the certificate's - expiration date. - "sha1_fingerprint": "str", # Optional. A unique identifier generated - from the SHA-1 fingerprint of the certificate. - "state": "str", # Optional. A string representing the current state - of the certificate. It may be ``pending``"" , ``verified``"" , or ``error``. - Known values are: "pending", "verified", and "error". - "type": "str" # Optional. A string representing the type of the - certificate. The value will be ``custom`` for a user-uploaded certificate or - ``lets_encrypt`` for one automatically generated with Let's Encrypt. Known - values are: "custom" and "lets_encrypt". - } - } - """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - 401: cast( - Type[HttpResponseError], - lambda response: ClientAuthenticationError(response=response), - ), - 429: HttpResponseError, - 500: HttpResponseError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = kwargs.pop("params", {}) or {} - content_type: Optional[str] = kwargs.pop( "content_type", _headers.pop("Content-Type", None) ) @@ -116230,7 +126315,7 @@ def create(self, body: Union[JSON, IO[bytes]], **kwargs: Any) -> JSON: else: _json = body - _request = build_certificates_create_request( + _request = build_databases_create_cluster_request( content_type=content_type, json=_json, content=_content, @@ -116248,132 +126333,14 @@ def create(self, body: Union[JSON, IO[bytes]], **kwargs: Any) -> JSON: response = pipeline_response.http_response - if response.status_code not in [201]: - if _stream: - response.read() # Load the body in memory and close the socket - map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore - raise HttpResponseError(response=response) - - response_headers = {} - response_headers["ratelimit-limit"] = self._deserialize( - "int", response.headers.get("ratelimit-limit") - ) - response_headers["ratelimit-remaining"] = self._deserialize( - "int", response.headers.get("ratelimit-remaining") - ) - response_headers["ratelimit-reset"] = self._deserialize( - "int", response.headers.get("ratelimit-reset") - ) - - if response.content: - deserialized = response.json() - else: - deserialized = None - - if cls: - return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore - - return cast(JSON, deserialized) # type: ignore - - @distributed_trace - def get(self, certificate_id: str, **kwargs: Any) -> JSON: - # pylint: disable=line-too-long - """Retrieve an Existing Certificate. - - To show information about an existing certificate, send a GET request to - ``/v2/certificates/$CERTIFICATE_ID``. - - :param certificate_id: A unique identifier for a certificate. Required. - :type certificate_id: str - :return: JSON object - :rtype: JSON - :raises ~azure.core.exceptions.HttpResponseError: - - Example: - .. code-block:: python - - # response body for status code(s): 200 - response == { - "certificate": { - "created_at": "2020-02-20 00:00:00", # Optional. A time value given - in ISO8601 combined date and time format that represents when the certificate - was created. - "dns_names": [ - "str" # Optional. An array of fully qualified domain names - (FQDNs) for which the certificate was issued. - ], - "id": "str", # Optional. A unique ID that can be used to identify - and reference a certificate. - "name": "str", # Optional. A unique human-readable name referring to - a certificate. - "not_after": "2020-02-20 00:00:00", # Optional. A time value given - in ISO8601 combined date and time format that represents the certificate's - expiration date. - "sha1_fingerprint": "str", # Optional. A unique identifier generated - from the SHA-1 fingerprint of the certificate. - "state": "str", # Optional. A string representing the current state - of the certificate. It may be ``pending``"" , ``verified``"" , or ``error``. - Known values are: "pending", "verified", and "error". - "type": "str" # Optional. A string representing the type of the - certificate. The value will be ``custom`` for a user-uploaded certificate or - ``lets_encrypt`` for one automatically generated with Let's Encrypt. Known - values are: "custom" and "lets_encrypt". - } - } - # response body for status code(s): 404 - response == { - "id": "str", # A short identifier corresponding to the HTTP status code - returned. For example, the ID for a response returning a 404 status code would - be "not_found.". Required. - "message": "str", # A message providing additional information about the - error, including details to help resolve it when possible. Required. - "request_id": "str" # Optional. Optionally, some endpoints may include a - request ID that should be provided when reporting bugs or opening support - tickets to help identify the issue. - } - """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - 401: cast( - Type[HttpResponseError], - lambda response: ClientAuthenticationError(response=response), - ), - 429: HttpResponseError, - 500: HttpResponseError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = kwargs.pop("headers", {}) or {} - _params = kwargs.pop("params", {}) or {} - - cls: ClsType[JSON] = kwargs.pop("cls", None) - - _request = build_certificates_get_request( - certificate_id=certificate_id, - headers=_headers, - params=_params, - ) - _request.url = self._client.format_url(_request.url) - - _stream = False - pipeline_response: PipelineResponse = ( - self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - ) - - response = pipeline_response.http_response - - if response.status_code not in [200, 404]: + if response.status_code not in [201, 404]: if _stream: response.read() # Load the body in memory and close the socket map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore raise HttpResponseError(response=response) response_headers = {} - if response.status_code == 200: + if response.status_code == 201: response_headers["ratelimit-limit"] = self._deserialize( "int", response.headers.get("ratelimit-limit") ) @@ -116411,136 +126378,26 @@ def get(self, certificate_id: str, **kwargs: Any) -> JSON: return cast(JSON, deserialized) # type: ignore @distributed_trace - def delete(self, certificate_id: str, **kwargs: Any) -> Optional[JSON]: + def get_cluster(self, database_cluster_uuid: str, **kwargs: Any) -> JSON: # pylint: disable=line-too-long - """Delete a Certificate. - - To delete a specific certificate, send a DELETE request to - ``/v2/certificates/$CERTIFICATE_ID``. - - :param certificate_id: A unique identifier for a certificate. Required. - :type certificate_id: str - :return: JSON object or None - :rtype: JSON or None - :raises ~azure.core.exceptions.HttpResponseError: - - Example: - .. code-block:: python - - # response body for status code(s): 404 - response == { - "id": "str", # A short identifier corresponding to the HTTP status code - returned. For example, the ID for a response returning a 404 status code would - be "not_found.". Required. - "message": "str", # A message providing additional information about the - error, including details to help resolve it when possible. Required. - "request_id": "str" # Optional. Optionally, some endpoints may include a - request ID that should be provided when reporting bugs or opening support - tickets to help identify the issue. - } - """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - 401: cast( - Type[HttpResponseError], - lambda response: ClientAuthenticationError(response=response), - ), - 429: HttpResponseError, - 500: HttpResponseError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = kwargs.pop("headers", {}) or {} - _params = kwargs.pop("params", {}) or {} - - cls: ClsType[Optional[JSON]] = kwargs.pop("cls", None) - - _request = build_certificates_delete_request( - certificate_id=certificate_id, - headers=_headers, - params=_params, - ) - _request.url = self._client.format_url(_request.url) - - _stream = False - pipeline_response: PipelineResponse = ( - self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - ) - - response = pipeline_response.http_response - - if response.status_code not in [204, 404]: - if _stream: - response.read() # Load the body in memory and close the socket - map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore - raise HttpResponseError(response=response) - - deserialized = None - response_headers = {} - if response.status_code == 204: - response_headers["ratelimit-limit"] = self._deserialize( - "int", response.headers.get("ratelimit-limit") - ) - response_headers["ratelimit-remaining"] = self._deserialize( - "int", response.headers.get("ratelimit-remaining") - ) - response_headers["ratelimit-reset"] = self._deserialize( - "int", response.headers.get("ratelimit-reset") - ) - - if response.status_code == 404: - response_headers["ratelimit-limit"] = self._deserialize( - "int", response.headers.get("ratelimit-limit") - ) - response_headers["ratelimit-remaining"] = self._deserialize( - "int", response.headers.get("ratelimit-remaining") - ) - response_headers["ratelimit-reset"] = self._deserialize( - "int", response.headers.get("ratelimit-reset") - ) - - if response.content: - deserialized = response.json() - else: - deserialized = None - - if cls: - return cls(pipeline_response, deserialized, response_headers) # type: ignore - - return deserialized # type: ignore - - -class BalanceOperations: - """ - .. warning:: - **DO NOT** instantiate this class directly. + """Retrieve an Existing Database Cluster. - Instead, you should access the following operations through - :class:`~pydo.GeneratedClient`'s - :attr:`balance` attribute. - """ + To show information about an existing database cluster, send a GET request to + ``/v2/databases/$DATABASE_ID``. - def __init__(self, *args, **kwargs): - input_args = list(args) - self._client = input_args.pop(0) if input_args else kwargs.pop("client") - self._config = input_args.pop(0) if input_args else kwargs.pop("config") - self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer") - self._deserialize = ( - input_args.pop(0) if input_args else kwargs.pop("deserializer") - ) + The response will be a JSON object with a database key. This will be set to an object + containing the standard database cluster attributes. - @distributed_trace - def get(self, **kwargs: Any) -> JSON: - # pylint: disable=line-too-long - """Get Customer Balance. + The embedded ``connection`` and ``private_connection`` objects will contain the information + needed to access the database cluster. For multi-node clusters, the ``standby_connection`` and + ``standby_private_connection`` objects contain the information needed to connect to the + cluster's standby node(s). - To retrieve the balances on a customer's account, send a GET request to - ``/v2/customers/my/balance``. + The embedded maintenance_window object will contain information about any scheduled maintenance + for the database cluster. + :param database_cluster_uuid: A unique identifier for a database cluster. Required. + :type database_cluster_uuid: str :return: JSON object :rtype: JSON :raises ~azure.core.exceptions.HttpResponseError: @@ -116550,15 +126407,305 @@ def get(self, **kwargs: Any) -> JSON: # response body for status code(s): 200 response == { - "account_balance": "str", # Optional. Current balance of the customer's most - recent billing activity. Does not reflect ``month_to_date_usage``. - "generated_at": "2020-02-20 00:00:00", # Optional. The time at which - balances were most recently generated. - "month_to_date_balance": "str", # Optional. Balance as of the - ``generated_at`` time. This value includes the ``account_balance`` and - ``month_to_date_usage``. - "month_to_date_usage": "str" # Optional. Amount used in the current billing - period as of the ``generated_at`` time. + "database": { + "engine": "str", # A slug representing the database engine used for + the cluster. The possible values are: "pg" for PostgreSQL, "mysql" for MySQL, + "redis" for Caching, "mongodb" for MongoDB, "kafka" for Kafka, "opensearch" + for OpenSearch, and "valkey" for Valkey. Required. Known values are: "pg", + "mysql", "redis", "valkey", "mongodb", "kafka", and "opensearch". + "name": "str", # A unique, human-readable name referring to a + database cluster. Required. + "num_nodes": 0, # The number of nodes in the database cluster. + Required. + "region": "str", # The slug identifier for the region where the + database cluster is located. Required. + "size": "str", # The slug identifier representing the size of the + nodes in the database cluster. Required. + "connection": { + "database": "str", # Optional. The name of the default + database. + "host": "str", # Optional. The FQDN pointing to the database + cluster's current primary node. + "password": "str", # Optional. The randomly generated + password for the default user.:code:`
`:code:`
`Requires + ``database:view_credentials`` scope. + "port": 0, # Optional. The port on which the database + cluster is listening. + "ssl": bool, # Optional. A boolean value indicating if the + connection should be made over SSL. + "uri": "str", # Optional. A connection string in the format + accepted by the ``psql`` command. This is provided as a convenience and + should be able to be constructed by the other attributes. + "user": "str" # Optional. The default user for the + database.:code:`
`:code:`
`Requires ``database:view_credentials`` + scope. + }, + "created_at": "2020-02-20 00:00:00", # Optional. A time value given + in ISO8601 combined date and time format that represents when the database + cluster was created. + "db_names": [ + "str" # Optional. An array of strings containing the names + of databases created in the database cluster. + ], + "do_settings": { + "service_cnames": [ + "str" # Optional. An array of custom CNAMEs for the + database cluster. Each CNAME must be a valid RFC 1123 hostname (e.g., + "db.example.com"). Maximum of 16 CNAMEs allowed, each up to 253 + characters. + ] + }, + "id": "str", # Optional. A unique ID that can be used to identify + and reference a database cluster. + "maintenance_window": { + "day": "str", # The day of the week on which to apply + maintenance updates. Required. + "hour": "str", # The hour in UTC at which maintenance + updates will be applied in 24 hour format. Required. + "description": [ + "str" # Optional. A list of strings, each containing + information about a pending maintenance update. + ], + "pending": bool # Optional. A boolean value indicating + whether any maintenance is scheduled to be performed in the next window. + }, + "metrics_endpoints": [ + { + "host": "str", # Optional. A FQDN pointing to the + database cluster's node(s). + "port": 0 # Optional. The port on which a service is + listening. + } + ], + "private_connection": { + "database": "str", # Optional. The name of the default + database. + "host": "str", # Optional. The FQDN pointing to the database + cluster's current primary node. + "password": "str", # Optional. The randomly generated + password for the default user.:code:`
`:code:`
`Requires + ``database:view_credentials`` scope. + "port": 0, # Optional. The port on which the database + cluster is listening. + "ssl": bool, # Optional. A boolean value indicating if the + connection should be made over SSL. + "uri": "str", # Optional. A connection string in the format + accepted by the ``psql`` command. This is provided as a convenience and + should be able to be constructed by the other attributes. + "user": "str" # Optional. The default user for the + database.:code:`
`:code:`
`Requires ``database:view_credentials`` + scope. + }, + "private_network_uuid": "str", # Optional. A string specifying the + UUID of the VPC to which the database cluster will be assigned. If excluded, + the cluster when creating a new database cluster, it will be assigned to your + account's default VPC for the region. :code:`
`:code:`
`Requires + ``vpc:read`` scope. + "project_id": "str", # Optional. The ID of the project that the + database cluster is assigned to. If excluded when creating a new database + cluster, it will be assigned to your default + project.:code:`
`:code:`
`Requires ``project:read`` scope. + "rules": [ + { + "type": "str", # The type of resource that the + firewall rule allows to access the database cluster. Required. Known + values are: "droplet", "k8s", "ip_addr", "tag", and "app". + "value": "str", # The ID of the specific resource, + the name of a tag applied to a group of resources, or the IP address + that the firewall rule allows to access the database cluster. + Required. + "cluster_uuid": "str", # Optional. A unique ID for + the database cluster to which the rule is applied. + "created_at": "2020-02-20 00:00:00", # Optional. A + time value given in ISO8601 combined date and time format that + represents when the firewall rule was created. + "description": "str", # Optional. A human-readable + description of the rule. + "uuid": "str" # Optional. A unique ID for the + firewall rule itself. + } + ], + "schema_registry_connection": { + "host": "str", # Optional. The FQDN pointing to the schema + registry connection uri. + "password": "str", # Optional. The randomly generated + password for the schema registry.:code:`
`:code:`
`Requires + ``database:view_credentials`` scope. + "port": 0, # Optional. The port on which the schema registry + is listening. + "ssl": bool, # Optional. A boolean value indicating if the + connection should be made over SSL. + "uri": "str", # Optional. This is provided as a convenience + and should be able to be constructed by the other attributes. + "user": "str" # Optional. The default user for the schema + registry.:code:`
`:code:`
`Requires ``database:view_credentials`` + scope. + }, + "semantic_version": "str", # Optional. A string representing the + semantic version of the database engine in use for the cluster. + "standby_connection": { + "database": "str", # Optional. The name of the default + database. + "host": "str", # Optional. The FQDN pointing to the database + cluster's current primary node. + "password": "str", # Optional. The randomly generated + password for the default user.:code:`
`:code:`
`Requires + ``database:view_credentials`` scope. + "port": 0, # Optional. The port on which the database + cluster is listening. + "ssl": bool, # Optional. A boolean value indicating if the + connection should be made over SSL. + "uri": "str", # Optional. A connection string in the format + accepted by the ``psql`` command. This is provided as a convenience and + should be able to be constructed by the other attributes. + "user": "str" # Optional. The default user for the + database.:code:`
`:code:`
`Requires ``database:view_credentials`` + scope. + }, + "standby_private_connection": { + "database": "str", # Optional. The name of the default + database. + "host": "str", # Optional. The FQDN pointing to the database + cluster's current primary node. + "password": "str", # Optional. The randomly generated + password for the default user.:code:`
`:code:`
`Requires + ``database:view_credentials`` scope. + "port": 0, # Optional. The port on which the database + cluster is listening. + "ssl": bool, # Optional. A boolean value indicating if the + connection should be made over SSL. + "uri": "str", # Optional. A connection string in the format + accepted by the ``psql`` command. This is provided as a convenience and + should be able to be constructed by the other attributes. + "user": "str" # Optional. The default user for the + database.:code:`
`:code:`
`Requires ``database:view_credentials`` + scope. + }, + "status": "str", # Optional. A string representing the current + status of the database cluster. Known values are: "creating", "online", + "resizing", "migrating", and "forking". + "storage_size_mib": 0, # Optional. Additional storage added to the + cluster, in MiB. If null, no additional storage is added to the cluster, + beyond what is provided as a base amount from the 'size' and any previously + added additional storage. + "tags": [ + "str" # Optional. An array of tags that have been applied to + the database cluster. :code:`
`:code:`
`Requires ``tag:read`` + scope. + ], + "ui_connection": { + "host": "str", # Optional. The FQDN pointing to the + opensearch cluster's current primary node. + "password": "str", # Optional. The randomly generated + password for the default user.:code:`
`:code:`
`Requires + ``database:view_credentials`` scope. + "port": 0, # Optional. The port on which the opensearch + dashboard is listening. + "ssl": bool, # Optional. A boolean value indicating if the + connection should be made over SSL. + "uri": "str", # Optional. This is provided as a convenience + and should be able to be constructed by the other attributes. + "user": "str" # Optional. The default user for the + opensearch dashboard.:code:`
`:code:`
`Requires + ``database:view_credentials`` scope. + }, + "users": [ + { + "name": "str", # The name of a database user. + Required. + "access_cert": "str", # Optional. Access certificate + for TLS client authentication. (Kafka only). + "access_key": "str", # Optional. Access key for TLS + client authentication. (Kafka only). + "mysql_settings": { + "auth_plugin": "str" # A string specifying + the authentication method to be used for connections to the MySQL + user account. The valid values are ``mysql_native_password`` or + ``caching_sha2_password``. If excluded when creating a new user, + the default for the version of MySQL in use will be used. As of + MySQL 8.0, the default is ``caching_sha2_password``. Required. + Known values are: "mysql_native_password" and + "caching_sha2_password". + }, + "password": "str", # Optional. A randomly generated + password for the database user.:code:`
`Requires + ``database:view_credentials`` scope. + "role": "str", # Optional. A string representing the + database user's role. The value will be either "primary" or "normal". + Known values are: "primary" and "normal". + "settings": { + "acl": [ + { + "permission": "str", # + Permission set applied to the ACL. 'consume' allows for + messages to be consumed from the topic. 'produce' allows + for messages to be published to the topic. + 'produceconsume' allows for both 'consume' and 'produce' + permission. 'admin' allows for 'produceconsume' as well + as any operations to administer the topic (delete, + update). Required. Known values are: "admin", "consume", + "produce", and "produceconsume". + "topic": "str", # A regex + for matching the topic(s) that this ACL should apply to. + Required. + "id": "str" # Optional. An + identifier for the ACL. Will be computed after the ACL is + created/updated. + } + ], + "mongo_user_settings": { + "databases": [ + "str" # Optional. A list of + databases to which the user should have access. When the + database is set to ``admin``"" , the user will have + access to all databases based on the user's role i.e. a + user with the role ``readOnly`` assigned to the ``admin`` + database will have read access to all databases. + ], + "role": "str" # Optional. The role + to assign to the user with each role mapping to a MongoDB + built-in role. ``readOnly`` maps to a `read + `_ + role. ``readWrite`` maps to a `readWrite + `_ + role. ``dbAdmin`` maps to a `dbAdmin + `_ + role. Known values are: "readOnly", "readWrite", and + "dbAdmin". + }, + "opensearch_acl": [ + { + "index": "str", # Optional. + A regex for matching the indexes that this ACL should + apply to. + "permission": "str" # + Optional. Permission set applied to the ACL. 'read' + allows user to read from the index. 'write' allows for + user to write to the index. 'readwrite' allows for both + 'read' and 'write' permission. 'deny'(default) restricts + user from performing any operation over an index. 'admin' + allows for 'readwrite' as well as any operations to + administer the index. Known values are: "deny", "admin", + "read", "readwrite", and "write". + } + ], + "pg_allow_replication": bool # Optional. For + Postgres clusters, set to ``true`` for a user with replication + rights. This option is not currently supported for other database + engines. + } + } + ], + "version": "str", # Optional. A string representing the version of + the database engine in use for the cluster. + "version_end_of_availability": "str", # Optional. A timestamp + referring to the date when the particular version will no longer be available + for creating new clusters. If null, the version does not have an end of + availability timeline. + "version_end_of_life": "str" # Optional. A timestamp referring to + the date when the particular version will no longer be supported. If null, + the version does not have an end of life timeline. + } } # response body for status code(s): 404 response == { @@ -116590,7 +126737,8 @@ def get(self, **kwargs: Any) -> JSON: cls: ClsType[JSON] = kwargs.pop("cls", None) - _request = build_balance_get_request( + _request = build_databases_get_cluster_request( + database_cluster_uuid=database_cluster_uuid, headers=_headers, params=_params, ) @@ -116649,68 +126797,26 @@ def get(self, **kwargs: Any) -> JSON: return cast(JSON, deserialized) # type: ignore - -class BillingHistoryOperations: - """ - .. warning:: - **DO NOT** instantiate this class directly. - - Instead, you should access the following operations through - :class:`~pydo.GeneratedClient`'s - :attr:`billing_history` attribute. - """ - - def __init__(self, *args, **kwargs): - input_args = list(args) - self._client = input_args.pop(0) if input_args else kwargs.pop("client") - self._config = input_args.pop(0) if input_args else kwargs.pop("config") - self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer") - self._deserialize = ( - input_args.pop(0) if input_args else kwargs.pop("deserializer") - ) - @distributed_trace - def list(self, **kwargs: Any) -> JSON: + def destroy_cluster( + self, database_cluster_uuid: str, **kwargs: Any + ) -> Optional[JSON]: # pylint: disable=line-too-long - """List Billing History. + """Destroy a Database Cluster. - To retrieve a list of all billing history entries, send a GET request to - ``/v2/customers/my/billing_history``. + To destroy a specific database, send a DELETE request to ``/v2/databases/$DATABASE_ID``. + A status of 204 will be given. This indicates that the request was processed successfully, but + that no response body is needed. - :return: JSON object - :rtype: JSON + :param database_cluster_uuid: A unique identifier for a database cluster. Required. + :type database_cluster_uuid: str + :return: JSON object or None + :rtype: JSON or None :raises ~azure.core.exceptions.HttpResponseError: Example: .. code-block:: python - # response body for status code(s): 200 - response == { - "meta": { - "total": 0 # Optional. Number of objects returned by the request. - }, - "billing_history": [ - { - "amount": "str", # Optional. Amount of the billing history - entry. - "date": "2020-02-20 00:00:00", # Optional. Time the billing - history entry occurred. - "description": "str", # Optional. Description of the billing - history entry. - "invoice_id": "str", # Optional. ID of the invoice - associated with the billing history entry, if applicable. - "invoice_uuid": "str", # Optional. UUID of the invoice - associated with the billing history entry, if applicable. - "type": "str" # Optional. Type of billing history entry. - Known values are: "ACHFailure", "Adjustment", "AttemptFailed", - "Chargeback", "Credit", "CreditExpiration", "Invoice", "Payment", - "Refund", and "Reversal". - } - ], - "links": { - "pages": {} - } - } # response body for status code(s): 404 response == { "id": "str", # A short identifier corresponding to the HTTP status code @@ -116739,9 +126845,10 @@ def list(self, **kwargs: Any) -> JSON: _headers = kwargs.pop("headers", {}) or {} _params = kwargs.pop("params", {}) or {} - cls: ClsType[JSON] = kwargs.pop("cls", None) + cls: ClsType[Optional[JSON]] = kwargs.pop("cls", None) - _request = build_billing_history_list_request( + _request = build_databases_destroy_cluster_request( + database_cluster_uuid=database_cluster_uuid, headers=_headers, params=_params, ) @@ -116756,14 +126863,15 @@ def list(self, **kwargs: Any) -> JSON: response = pipeline_response.http_response - if response.status_code not in [200, 404]: + if response.status_code not in [204, 404]: if _stream: response.read() # Load the body in memory and close the socket map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore raise HttpResponseError(response=response) + deserialized = None response_headers = {} - if response.status_code == 200: + if response.status_code == 204: response_headers["ratelimit-limit"] = self._deserialize( "int", response.headers.get("ratelimit-limit") ) @@ -116774,11 +126882,6 @@ def list(self, **kwargs: Any) -> JSON: "int", response.headers.get("ratelimit-reset") ) - if response.content: - deserialized = response.json() - else: - deserialized = None - if response.status_code == 404: response_headers["ratelimit-limit"] = self._deserialize( "int", response.headers.get("ratelimit-limit") @@ -116796,162 +126899,22 @@ def list(self, **kwargs: Any) -> JSON: deserialized = None if cls: - return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore - - return cast(JSON, deserialized) # type: ignore - - -class InvoicesOperations: - """ - .. warning:: - **DO NOT** instantiate this class directly. - - Instead, you should access the following operations through - :class:`~pydo.GeneratedClient`'s - :attr:`invoices` attribute. - """ - - def __init__(self, *args, **kwargs): - input_args = list(args) - self._client = input_args.pop(0) if input_args else kwargs.pop("client") - self._config = input_args.pop(0) if input_args else kwargs.pop("config") - self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer") - self._deserialize = ( - input_args.pop(0) if input_args else kwargs.pop("deserializer") - ) - - @distributed_trace - def list(self, *, per_page: int = 20, page: int = 1, **kwargs: Any) -> JSON: - # pylint: disable=line-too-long - """List All Invoices. - - To retrieve a list of all invoices, send a GET request to ``/v2/customers/my/invoices``. - - :keyword per_page: Number of items returned per page. Default value is 20. - :paramtype per_page: int - :keyword page: Which 'page' of paginated results to return. Default value is 1. - :paramtype page: int - :return: JSON object - :rtype: JSON - :raises ~azure.core.exceptions.HttpResponseError: - - Example: - .. code-block:: python - - # response body for status code(s): 200 - response == { - "meta": { - "total": 0 # Optional. Number of objects returned by the request. - }, - "invoice_preview": { - "amount": "str", # Optional. Total amount of the invoice, in USD. - This will reflect month-to-date usage in the invoice preview. - "invoice_id": "str", # Optional. ID of the invoice. Listed on the - face of the invoice PDF as the "Invoice number". - "invoice_period": "str", # Optional. Billing period of usage for - which the invoice is issued, in ``YYYY-MM`` format. - "invoice_uuid": "str", # Optional. The UUID of the invoice. The - canonical reference for the invoice. - "updated_at": "str" # Optional. Time the invoice was last updated. - This is only included with the invoice preview. - }, - "invoices": [ - { - "amount": "str", # Optional. Total amount of the invoice, in - USD. This will reflect month-to-date usage in the invoice preview. - "invoice_id": "str", # Optional. ID of the invoice. Listed - on the face of the invoice PDF as the "Invoice number". - "invoice_period": "str", # Optional. Billing period of usage - for which the invoice is issued, in ``YYYY-MM`` format. - "invoice_uuid": "str", # Optional. The UUID of the invoice. - The canonical reference for the invoice. - "updated_at": "str" # Optional. Time the invoice was last - updated. This is only included with the invoice preview. - } - ], - "links": { - "pages": {} - } - } - """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - 401: cast( - Type[HttpResponseError], - lambda response: ClientAuthenticationError(response=response), - ), - 429: HttpResponseError, - 500: HttpResponseError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = kwargs.pop("headers", {}) or {} - _params = kwargs.pop("params", {}) or {} - - cls: ClsType[JSON] = kwargs.pop("cls", None) - - _request = build_invoices_list_request( - per_page=per_page, - page=page, - headers=_headers, - params=_params, - ) - _request.url = self._client.format_url(_request.url) - - _stream = False - pipeline_response: PipelineResponse = ( - self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - ) - - response = pipeline_response.http_response - - if response.status_code not in [200]: - if _stream: - response.read() # Load the body in memory and close the socket - map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore - raise HttpResponseError(response=response) - - response_headers = {} - response_headers["ratelimit-limit"] = self._deserialize( - "int", response.headers.get("ratelimit-limit") - ) - response_headers["ratelimit-remaining"] = self._deserialize( - "int", response.headers.get("ratelimit-remaining") - ) - response_headers["ratelimit-reset"] = self._deserialize( - "int", response.headers.get("ratelimit-reset") - ) - - if response.content: - deserialized = response.json() - else: - deserialized = None - - if cls: - return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore + return cls(pipeline_response, deserialized, response_headers) # type: ignore - return cast(JSON, deserialized) # type: ignore + return deserialized # type: ignore @distributed_trace - def get_by_uuid( - self, invoice_uuid: str, *, per_page: int = 20, page: int = 1, **kwargs: Any - ) -> JSON: + def get_config(self, database_cluster_uuid: str, **kwargs: Any) -> JSON: # pylint: disable=line-too-long - """Retrieve an Invoice by UUID. + """Retrieve an Existing Database Cluster Configuration. - To retrieve the invoice items for an invoice, send a GET request to - ``/v2/customers/my/invoices/$INVOICE_UUID``. + Shows configuration parameters for an existing database cluster by sending a GET request to + ``/v2/databases/$DATABASE_ID/config``. + The response is a JSON object with a ``config`` key, which is set to an object + containing any database configuration parameters. - :param invoice_uuid: UUID of the invoice. Required. - :type invoice_uuid: str - :keyword per_page: Number of items returned per page. Default value is 20. - :paramtype per_page: int - :keyword page: Which 'page' of paginated results to return. Default value is 1. - :paramtype page: int + :param database_cluster_uuid: A unique identifier for a database cluster. Required. + :type database_cluster_uuid: str :return: JSON object :rtype: JSON :raises ~azure.core.exceptions.HttpResponseError: @@ -116961,39 +126924,7 @@ def get_by_uuid( # response body for status code(s): 200 response == { - "meta": { - "total": 0 # Optional. Number of objects returned by the request. - }, - "invoice_items": [ - { - "amount": "str", # Optional. Billed amount of this invoice - item. Billed in USD. - "description": "str", # Optional. Description of the invoice - item. - "duration": "str", # Optional. Duration of time this invoice - item was used and subsequently billed. - "duration_unit": "str", # Optional. Unit of time for - duration. - "end_time": "str", # Optional. Time the invoice item stopped - being billed for usage. - "group_description": "str", # Optional. Description of the - invoice item when it is a grouped set of usage, such as DOKS or - databases. - "product": "str", # Optional. Name of the product being - billed in the invoice item. - "project_name": "str", # Optional. Name of the DigitalOcean - Project this resource belongs to. - "resource_id": "str", # Optional. ID of the resource billing - in the invoice item if available. - "resource_uuid": "str", # Optional. UUID of the resource - billing in the invoice item if available. - "start_time": "str" # Optional. Time the invoice item began - to be billed for usage. - } - ], - "links": { - "pages": {} - } + "config": {} } # response body for status code(s): 404 response == { @@ -117025,10 +126956,8 @@ def get_by_uuid( cls: ClsType[JSON] = kwargs.pop("cls", None) - _request = build_invoices_get_by_uuid_request( - invoice_uuid=invoice_uuid, - per_page=per_page, - page=page, + _request = build_databases_get_config_request( + database_cluster_uuid=database_cluster_uuid, headers=_headers, params=_params, ) @@ -117087,23 +127016,40 @@ def get_by_uuid( return cast(JSON, deserialized) # type: ignore - @distributed_trace - def get_csv_by_uuid(self, invoice_uuid: str, **kwargs: Any) -> Union[str, JSON]: + @overload + def patch_config( + self, + database_cluster_uuid: str, + body: JSON, + *, + content_type: str = "application/json", + **kwargs: Any, + ) -> Optional[JSON]: # pylint: disable=line-too-long - """Retrieve an Invoice CSV by UUID. + """Update the Database Configuration for an Existing Database. - To retrieve a CSV for an invoice, send a GET request to - ``/v2/customers/my/invoices/$INVOICE_UUID/csv``. + To update the configuration for an existing database cluster, send a PATCH request to + ``/v2/databases/$DATABASE_ID/config``. - :param invoice_uuid: UUID of the invoice. Required. - :type invoice_uuid: str - :return: str or JSON object - :rtype: str or JSON + :param database_cluster_uuid: A unique identifier for a database cluster. Required. + :type database_cluster_uuid: str + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: JSON object or None + :rtype: JSON or None :raises ~azure.core.exceptions.HttpResponseError: Example: .. code-block:: python + # JSON input template you can fill out and use as your body input. + body = { + "config": {} + } + # response body for status code(s): 404 response == { "id": "str", # A short identifier corresponding to the HTTP status code @@ -117116,101 +127062,31 @@ def get_csv_by_uuid(self, invoice_uuid: str, **kwargs: Any) -> Union[str, JSON]: tickets to help identify the issue. } """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - 401: cast( - Type[HttpResponseError], - lambda response: ClientAuthenticationError(response=response), - ), - 429: HttpResponseError, - 500: HttpResponseError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = kwargs.pop("headers", {}) or {} - _params = kwargs.pop("params", {}) or {} - - cls: ClsType[Union[str, JSON]] = kwargs.pop("cls", None) - - _request = build_invoices_get_csv_by_uuid_request( - invoice_uuid=invoice_uuid, - headers=_headers, - params=_params, - ) - _request.url = self._client.format_url(_request.url) - - _stream = False - pipeline_response: PipelineResponse = ( - self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - ) - - response = pipeline_response.http_response - - if response.status_code not in [200, 404]: - if _stream: - response.read() # Load the body in memory and close the socket - map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore - raise HttpResponseError(response=response) - - response_headers = {} - if response.status_code == 200: - response_headers["content-disposition"] = self._deserialize( - "str", response.headers.get("content-disposition") - ) - response_headers["ratelimit-limit"] = self._deserialize( - "int", response.headers.get("ratelimit-limit") - ) - response_headers["ratelimit-remaining"] = self._deserialize( - "int", response.headers.get("ratelimit-remaining") - ) - response_headers["ratelimit-reset"] = self._deserialize( - "int", response.headers.get("ratelimit-reset") - ) - - if response.content: - deserialized = response.json() - else: - deserialized = None - - if response.status_code == 404: - response_headers["ratelimit-limit"] = self._deserialize( - "int", response.headers.get("ratelimit-limit") - ) - response_headers["ratelimit-remaining"] = self._deserialize( - "int", response.headers.get("ratelimit-remaining") - ) - response_headers["ratelimit-reset"] = self._deserialize( - "int", response.headers.get("ratelimit-reset") - ) - - if response.content: - deserialized = response.json() - else: - deserialized = None - - if cls: - return cls(pipeline_response, cast(Union[str, JSON], deserialized), response_headers) # type: ignore - - return cast(Union[str, JSON], deserialized) # type: ignore - @distributed_trace - def get_pdf_by_uuid( - self, invoice_uuid: str, **kwargs: Any - ) -> Union[Iterator[bytes], JSON]: + @overload + def patch_config( + self, + database_cluster_uuid: str, + body: IO[bytes], + *, + content_type: str = "application/json", + **kwargs: Any, + ) -> Optional[JSON]: # pylint: disable=line-too-long - """Retrieve an Invoice PDF by UUID. + """Update the Database Configuration for an Existing Database. - To retrieve a PDF for an invoice, send a GET request to - ``/v2/customers/my/invoices/$INVOICE_UUID/pdf``. + To update the configuration for an existing database cluster, send a PATCH request to + ``/v2/databases/$DATABASE_ID/config``. - :param invoice_uuid: UUID of the invoice. Required. - :type invoice_uuid: str - :return: Iterator[bytes] or JSON object - :rtype: Iterator[bytes] or JSON + :param database_cluster_uuid: A unique identifier for a database cluster. Required. + :type database_cluster_uuid: str + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: JSON object or None + :rtype: JSON or None :raises ~azure.core.exceptions.HttpResponseError: Example: @@ -117228,149 +127104,33 @@ def get_pdf_by_uuid( tickets to help identify the issue. } """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - 401: cast( - Type[HttpResponseError], - lambda response: ClientAuthenticationError(response=response), - ), - 429: HttpResponseError, - 500: HttpResponseError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = kwargs.pop("headers", {}) or {} - _params = kwargs.pop("params", {}) or {} - - cls: ClsType[Union[Iterator[bytes], JSON]] = kwargs.pop("cls", None) - - _request = build_invoices_get_pdf_by_uuid_request( - invoice_uuid=invoice_uuid, - headers=_headers, - params=_params, - ) - _request.url = self._client.format_url(_request.url) - - _stream = True - pipeline_response: PipelineResponse = ( - self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - ) - - response = pipeline_response.http_response - - if response.status_code not in [200, 404]: - if _stream: - response.read() # Load the body in memory and close the socket - map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore - raise HttpResponseError(response=response) - - response_headers = {} - if response.status_code == 200: - response_headers["content-disposition"] = self._deserialize( - "str", response.headers.get("content-disposition") - ) - response_headers["ratelimit-limit"] = self._deserialize( - "int", response.headers.get("ratelimit-limit") - ) - response_headers["ratelimit-remaining"] = self._deserialize( - "int", response.headers.get("ratelimit-remaining") - ) - response_headers["ratelimit-reset"] = self._deserialize( - "int", response.headers.get("ratelimit-reset") - ) - - deserialized = response.iter_bytes() - - if response.status_code == 404: - response_headers["ratelimit-limit"] = self._deserialize( - "int", response.headers.get("ratelimit-limit") - ) - response_headers["ratelimit-remaining"] = self._deserialize( - "int", response.headers.get("ratelimit-remaining") - ) - response_headers["ratelimit-reset"] = self._deserialize( - "int", response.headers.get("ratelimit-reset") - ) - - deserialized = response.iter_bytes() - - if cls: - return cls(pipeline_response, cast(Union[Iterator[bytes], JSON], deserialized), response_headers) # type: ignore - - return cast(Union[Iterator[bytes], JSON], deserialized) # type: ignore @distributed_trace - def get_summary_by_uuid(self, invoice_uuid: str, **kwargs: Any) -> JSON: + def patch_config( + self, database_cluster_uuid: str, body: Union[JSON, IO[bytes]], **kwargs: Any + ) -> Optional[JSON]: # pylint: disable=line-too-long - """Retrieve an Invoice Summary by UUID. + """Update the Database Configuration for an Existing Database. - To retrieve a summary for an invoice, send a GET request to - ``/v2/customers/my/invoices/$INVOICE_UUID/summary``. + To update the configuration for an existing database cluster, send a PATCH request to + ``/v2/databases/$DATABASE_ID/config``. - :param invoice_uuid: UUID of the invoice. Required. - :type invoice_uuid: str - :return: JSON object - :rtype: JSON + :param database_cluster_uuid: A unique identifier for a database cluster. Required. + :type database_cluster_uuid: str + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :return: JSON object or None + :rtype: JSON or None :raises ~azure.core.exceptions.HttpResponseError: Example: .. code-block:: python - # response body for status code(s): 200 - response == { - "amount": "str", # Optional. Total amount of the invoice, in USD. This will - reflect month-to-date usage in the invoice preview. - "billing_period": "str", # Optional. Billing period of usage for which the - invoice is issued, in ``YYYY-MM`` format. - "credits_and_adjustments": { - "amount": "str", # Optional. Total amount charged in USD. - "name": "str" # Optional. Name of the charge. - }, - "invoice_id": "str", # Optional. ID of the invoice. - "invoice_uuid": "str", # Optional. UUID of the invoice. - "overages": { - "amount": "str", # Optional. Total amount charged in USD. - "name": "str" # Optional. Name of the charge. - }, - "product_charges": { - "amount": "str", # Optional. Total amount charged. - "items": [ - { - "amount": "str", # Optional. Amount of the charge. - "count": "str", # Optional. Number of times the - charge was applied. - "name": "str" # Optional. Description of the charge. - } - ], - "name": "str" # Optional. Description of usage charges. - }, - "taxes": { - "amount": "str", # Optional. Total amount charged in USD. - "name": "str" # Optional. Name of the charge. - }, - "user_billing_address": { - "address_line1": "str", # Optional. Street address line 1. - "address_line2": "str", # Optional. Street address line 2. - "city": "str", # Optional. City. - "country_iso2_code": "str", # Optional. Country (ISO2) code. - "created_at": "str", # Optional. Timestamp billing address was - created. - "postal_code": "str", # Optional. Postal code. - "region": "str", # Optional. Region. - "updated_at": "str" # Optional. Timestamp billing address was - updated. - }, - "user_company": "str", # Optional. Company of the DigitalOcean customer - being invoiced, if set. - "user_email": "str", # Optional. Email of the DigitalOcean customer being - invoiced. - "user_name": "str" # Optional. Name of the DigitalOcean customer being - invoiced. + # JSON input template you can fill out and use as your body input. + body = { + "config": {} } + # response body for status code(s): 404 response == { "id": "str", # A short identifier corresponding to the HTTP status code @@ -117396,13 +127156,27 @@ def get_summary_by_uuid(self, invoice_uuid: str, **kwargs: Any) -> JSON: } error_map.update(kwargs.pop("error_map", {}) or {}) - _headers = kwargs.pop("headers", {}) or {} + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = kwargs.pop("params", {}) or {} - cls: ClsType[JSON] = kwargs.pop("cls", None) + content_type: Optional[str] = kwargs.pop( + "content_type", _headers.pop("Content-Type", None) + ) + cls: ClsType[Optional[JSON]] = kwargs.pop("cls", None) - _request = build_invoices_get_summary_by_uuid_request( - invoice_uuid=invoice_uuid, + content_type = content_type or "application/json" + _json = None + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _json = body + + _request = build_databases_patch_config_request( + database_cluster_uuid=database_cluster_uuid, + content_type=content_type, + json=_json, + content=_content, headers=_headers, params=_params, ) @@ -117423,6 +127197,7 @@ def get_summary_by_uuid(self, invoice_uuid: str, **kwargs: Any) -> JSON: map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore raise HttpResponseError(response=response) + deserialized = None response_headers = {} if response.status_code == 200: response_headers["ratelimit-limit"] = self._deserialize( @@ -117435,11 +127210,6 @@ def get_summary_by_uuid(self, invoice_uuid: str, **kwargs: Any) -> JSON: "int", response.headers.get("ratelimit-reset") ) - if response.content: - deserialized = response.json() - else: - deserialized = None - if response.status_code == 404: response_headers["ratelimit-limit"] = self._deserialize( "int", response.headers.get("ratelimit-limit") @@ -117457,62 +127227,24 @@ def get_summary_by_uuid(self, invoice_uuid: str, **kwargs: Any) -> JSON: deserialized = None if cls: - return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore - - return cast(JSON, deserialized) # type: ignore - - -class BillingInsightsOperations: - """ - .. warning:: - **DO NOT** instantiate this class directly. - - Instead, you should access the following operations through - :class:`~pydo.GeneratedClient`'s - :attr:`billing_insights` attribute. - """ + return cls(pipeline_response, deserialized, response_headers) # type: ignore - def __init__(self, *args, **kwargs): - input_args = list(args) - self._client = input_args.pop(0) if input_args else kwargs.pop("client") - self._config = input_args.pop(0) if input_args else kwargs.pop("config") - self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer") - self._deserialize = ( - input_args.pop(0) if input_args else kwargs.pop("deserializer") - ) + return deserialized # type: ignore @distributed_trace - def list( - self, - account_urn: str, - start_date: datetime.date, - end_date: datetime.date, - *, - per_page: int = 20, - page: int = 1, - **kwargs: Any, - ) -> JSON: + def get_ca(self, database_cluster_uuid: str, **kwargs: Any) -> JSON: # pylint: disable=line-too-long - """List Billing Insights. + """Retrieve the Public Certificate. - This endpoint returns day-over-day changes in billing resource usage based on nightly invoice - items, including total amount, region, SKU, and description for a specified date range. It is - important to note that the daily resource usage may not reflect month-end billing totals when - totaled for a given month as nightly invoice item estimates do not necessarily encompass all - invoicing factors for the entire month. + To retrieve the public certificate used to secure the connection to the database cluster send a + GET request to + ``/v2/databases/$DATABASE_ID/ca``. - :param account_urn: URN of the customer account, can be a team (do:team:uuid) or an - organization (do:teamgroup:uuid). Required. - :type account_urn: str - :param start_date: Start date for billing insights in YYYY-MM-DD format. Required. - :type start_date: ~datetime.date - :param end_date: End date for billing insights in YYYY-MM-DD format. Must be within 31 days of - start_date. Required. - :type end_date: ~datetime.date - :keyword per_page: Number of items returned per page. Default value is 20. - :paramtype per_page: int - :keyword page: Which 'page' of paginated results to return. Default value is 1. - :paramtype page: int + The response will be a JSON object with a ``ca`` key. This will be set to an object + containing the base64 encoding of the public key certificate. + + :param database_cluster_uuid: A unique identifier for a database cluster. Required. + :type database_cluster_uuid: str :return: JSON object :rtype: JSON :raises ~azure.core.exceptions.HttpResponseError: @@ -117522,29 +127254,10 @@ def list( # response body for status code(s): 200 response == { - "current_page": 0, # Current page number. Required. - "data_points": [ - { - "description": "str", # Optional. Description of the billed - resource or service as shown on an invoice item. - "group_description": "str", # Optional. Optional invoice - item group name of the billed resource or service, blank when not part an - invoice item group. - "region": "str", # Optional. Region where the usage - occurred. - "sku": "str", # Optional. Unique SKU identifier for the - billed resource. - "start_date": "2020-02-20", # Optional. Start date of the - billing data point in YYYY-MM-DD format. - "total_amount": "str", # Optional. Total amount for this - data point in USD. - "usage_team_urn": "str" # Optional. URN of the team that - incurred the usage. - } - ], - "total_items": 0, # Total number of items available across all pages. - Required. - "total_pages": 0 # Total number of pages available. Required. + "ca": { + "certificate": "str" # base64 encoding of the certificate used to + secure database connections. Required. + } } # response body for status code(s): 404 response == { @@ -117576,12 +127289,8 @@ def list( cls: ClsType[JSON] = kwargs.pop("cls", None) - _request = build_billing_insights_list_request( - account_urn=account_urn, - start_date=start_date, - end_date=end_date, - per_page=per_page, - page=page, + _request = build_databases_get_ca_request( + database_cluster_uuid=database_cluster_uuid, headers=_headers, params=_params, ) @@ -117623,324 +127332,47 @@ def list( response_headers["ratelimit-limit"] = self._deserialize( "int", response.headers.get("ratelimit-limit") ) - response_headers["ratelimit-remaining"] = self._deserialize( - "int", response.headers.get("ratelimit-remaining") - ) - response_headers["ratelimit-reset"] = self._deserialize( - "int", response.headers.get("ratelimit-reset") - ) - - if response.content: - deserialized = response.json() - else: - deserialized = None - - if cls: - return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore - - return cast(JSON, deserialized) # type: ignore - - -class DatabasesOperations: # pylint: disable=too-many-public-methods - """ - .. warning:: - **DO NOT** instantiate this class directly. - - Instead, you should access the following operations through - :class:`~pydo.GeneratedClient`'s - :attr:`databases` attribute. - """ - - def __init__(self, *args, **kwargs): - input_args = list(args) - self._client = input_args.pop(0) if input_args else kwargs.pop("client") - self._config = input_args.pop(0) if input_args else kwargs.pop("config") - self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer") - self._deserialize = ( - input_args.pop(0) if input_args else kwargs.pop("deserializer") - ) - - @distributed_trace - def list_options(self, **kwargs: Any) -> JSON: - # pylint: disable=line-too-long - """List Database Options. - - To list all of the options available for the offered database engines, send a GET request to - ``/v2/databases/options``. - The result will be a JSON object with an ``options`` key. - - :return: JSON object - :rtype: JSON - :raises ~azure.core.exceptions.HttpResponseError: - - Example: - .. code-block:: python - - # response body for status code(s): 200 - response == { - "options": { - "kafka": { - "layouts": [ - { - "num_nodes": 0, # Optional. An array of - objects, each indicating the node sizes (otherwise referred to as - slugs) that are available with various numbers of nodes in the - database cluster. Each slugs denotes the node's identifier, CPU, - and RAM (in that order). - "sizes": [ - "str" # Optional. An array of - objects containing the slugs available with various node - counts. - ] - } - ], - "regions": [ - "str" # Optional. An array of strings containing the - names of available regions. - ], - "versions": [ - "str" # Optional. An array of strings containing the - names of available regions. - ] - }, - "mongodb": { - "layouts": [ - { - "num_nodes": 0, # Optional. An array of - objects, each indicating the node sizes (otherwise referred to as - slugs) that are available with various numbers of nodes in the - database cluster. Each slugs denotes the node's identifier, CPU, - and RAM (in that order). - "sizes": [ - "str" # Optional. An array of - objects containing the slugs available with various node - counts. - ] - } - ], - "regions": [ - "str" # Optional. An array of strings containing the - names of available regions. - ], - "versions": [ - "str" # Optional. An array of strings containing the - names of available regions. - ] - }, - "mysql": { - "layouts": [ - { - "num_nodes": 0, # Optional. An array of - objects, each indicating the node sizes (otherwise referred to as - slugs) that are available with various numbers of nodes in the - database cluster. Each slugs denotes the node's identifier, CPU, - and RAM (in that order). - "sizes": [ - "str" # Optional. An array of - objects containing the slugs available with various node - counts. - ] - } - ], - "regions": [ - "str" # Optional. An array of strings containing the - names of available regions. - ], - "versions": [ - "str" # Optional. An array of strings containing the - names of available regions. - ] - }, - "opensearch": { - "layouts": [ - { - "num_nodes": 0, # Optional. An array of - objects, each indicating the node sizes (otherwise referred to as - slugs) that are available with various numbers of nodes in the - database cluster. Each slugs denotes the node's identifier, CPU, - and RAM (in that order). - "sizes": [ - "str" # Optional. An array of - objects containing the slugs available with various node - counts. - ] - } - ], - "regions": [ - "str" # Optional. An array of strings containing the - names of available regions. - ], - "versions": [ - "str" # Optional. An array of strings containing the - names of available regions. - ] - }, - "pg": { - "layouts": [ - { - "num_nodes": 0, # Optional. An array of - objects, each indicating the node sizes (otherwise referred to as - slugs) that are available with various numbers of nodes in the - database cluster. Each slugs denotes the node's identifier, CPU, - and RAM (in that order). - "sizes": [ - "str" # Optional. An array of - objects containing the slugs available with various node - counts. - ] - } - ], - "regions": [ - "str" # Optional. An array of strings containing the - names of available regions. - ], - "versions": [ - "str" # Optional. An array of strings containing the - names of available regions. - ] - }, - "redis": { - "layouts": [ - { - "num_nodes": 0, # Optional. An array of - objects, each indicating the node sizes (otherwise referred to as - slugs) that are available with various numbers of nodes in the - database cluster. Each slugs denotes the node's identifier, CPU, - and RAM (in that order). - "sizes": [ - "str" # Optional. An array of - objects containing the slugs available with various node - counts. - ] - } - ], - "regions": [ - "str" # Optional. An array of strings containing the - names of available regions. - ], - "versions": [ - "str" # Optional. An array of strings containing the - names of available regions. - ] - }, - "valkey": { - "layouts": [ - { - "num_nodes": 0, # Optional. An array of - objects, each indicating the node sizes (otherwise referred to as - slugs) that are available with various numbers of nodes in the - database cluster. Each slugs denotes the node's identifier, CPU, - and RAM (in that order). - "sizes": [ - "str" # Optional. An array of - objects containing the slugs available with various node - counts. - ] - } - ], - "regions": [ - "str" # Optional. An array of strings containing the - names of available regions. - ], - "versions": [ - "str" # Optional. An array of strings containing the - names of available regions. - ] - } - }, - "version_availability": { - "kafka": [ - { - "end_of_availability": "str", # Optional. A - timestamp referring to the date when the particular version will no - longer be available for creating new clusters. If null, the version - does not have an end of availability timeline. - "end_of_life": "str", # Optional. A timestamp - referring to the date when the particular version will no longer be - supported. If null, the version does not have an end of life - timeline. - "version": "str" # Optional. The engine version. - } - ], - "mongodb": [ - { - "end_of_availability": "str", # Optional. A - timestamp referring to the date when the particular version will no - longer be available for creating new clusters. If null, the version - does not have an end of availability timeline. - "end_of_life": "str", # Optional. A timestamp - referring to the date when the particular version will no longer be - supported. If null, the version does not have an end of life - timeline. - "version": "str" # Optional. The engine version. - } - ], - "mysql": [ - { - "end_of_availability": "str", # Optional. A - timestamp referring to the date when the particular version will no - longer be available for creating new clusters. If null, the version - does not have an end of availability timeline. - "end_of_life": "str", # Optional. A timestamp - referring to the date when the particular version will no longer be - supported. If null, the version does not have an end of life - timeline. - "version": "str" # Optional. The engine version. - } - ], - "opensearch": [ - { - "end_of_availability": "str", # Optional. A - timestamp referring to the date when the particular version will no - longer be available for creating new clusters. If null, the version - does not have an end of availability timeline. - "end_of_life": "str", # Optional. A timestamp - referring to the date when the particular version will no longer be - supported. If null, the version does not have an end of life - timeline. - "version": "str" # Optional. The engine version. - } - ], - "pg": [ - { - "end_of_availability": "str", # Optional. A - timestamp referring to the date when the particular version will no - longer be available for creating new clusters. If null, the version - does not have an end of availability timeline. - "end_of_life": "str", # Optional. A timestamp - referring to the date when the particular version will no longer be - supported. If null, the version does not have an end of life - timeline. - "version": "str" # Optional. The engine version. - } - ], - "redis": [ - { - "end_of_availability": "str", # Optional. A - timestamp referring to the date when the particular version will no - longer be available for creating new clusters. If null, the version - does not have an end of availability timeline. - "end_of_life": "str", # Optional. A timestamp - referring to the date when the particular version will no longer be - supported. If null, the version does not have an end of life - timeline. - "version": "str" # Optional. The engine version. - } - ], - "valkey": [ - { - "end_of_availability": "str", # Optional. A - timestamp referring to the date when the particular version will no - longer be available for creating new clusters. If null, the version - does not have an end of availability timeline. - "end_of_life": "str", # Optional. A timestamp - referring to the date when the particular version will no longer be - supported. If null, the version does not have an end of life - timeline. - "version": "str" # Optional. The engine version. - } - ] - } + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.content: + deserialized = response.json() + else: + deserialized = None + + if cls: + return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore + + return cast(JSON, deserialized) # type: ignore + + @distributed_trace + def get_migration_status(self, database_cluster_uuid: str, **kwargs: Any) -> JSON: + # pylint: disable=line-too-long + """Retrieve the Status of an Online Migration. + + To retrieve the status of the most recent online migration, send a GET request to + ``/v2/databases/$DATABASE_ID/online-migration``. + + :param database_cluster_uuid: A unique identifier for a database cluster. Required. + :type database_cluster_uuid: str + :return: JSON object + :rtype: JSON + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 200 + response == { + "created_at": "str", # Optional. The time the migration was initiated, in + ISO 8601 format. + "id": "str", # Optional. The ID of the most recent migration. + "status": "str" # Optional. The current status of the migration. Known + values are: "running", "syncing", "canceled", "error", and "done". } # response body for status code(s): 404 response == { @@ -117972,7 +127404,8 @@ def list_options(self, **kwargs: Any) -> JSON: cls: ClsType[JSON] = kwargs.pop("cls", None) - _request = build_databases_list_options_request( + _request = build_databases_get_migration_status_request( + database_cluster_uuid=database_cluster_uuid, headers=_headers, params=_params, ) @@ -118031,30 +127464,118 @@ def list_options(self, **kwargs: Any) -> JSON: return cast(JSON, deserialized) # type: ignore - @distributed_trace - def list_clusters(self, *, tag_name: Optional[str] = None, **kwargs: Any) -> JSON: + @overload + def update_online_migration( + self, + database_cluster_uuid: str, + body: JSON, + *, + content_type: str = "application/json", + **kwargs: Any, + ) -> JSON: # pylint: disable=line-too-long - """List All Database Clusters. + """Start an Online Migration. - To list all of the database clusters available on your account, send a GET request to - ``/v2/databases``. To limit the results to database clusters with a specific tag, include the - ``tag_name`` query parameter set to the name of the tag. For example, - ``/v2/databases?tag_name=$TAG_NAME``. + To start an online migration, send a PUT request to + ``/v2/databases/$DATABASE_ID/online-migration`` endpoint. Migrating a cluster establishes a + connection with an existing cluster and replicates its contents to the target cluster. Online + migration is only available for MySQL, PostgreSQL, Caching, and Valkey clusters. + If the existing database is continuously being written to, the migration process will continue + for up to two weeks unless it is manually stopped. Online migration is only available for + `MySQL + `_\\ + , `PostgreSQL + `_\\ , `Caching + `_\\ , and `Valkey + `_ clusters. - The result will be a JSON object with a ``databases`` key. This will be set to an array of - database objects, each of which will contain the standard database attributes. + :param database_cluster_uuid: A unique identifier for a database cluster. Required. + :type database_cluster_uuid: str + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: JSON object + :rtype: JSON + :raises ~azure.core.exceptions.HttpResponseError: - The embedded ``connection`` and ``private_connection`` objects will contain the information - needed to access the database cluster. For multi-node clusters, the ``standby_connection`` and - ``standby_private_connection`` objects will contain the information needed to connect to the - cluster's standby node(s). + Example: + .. code-block:: python - The embedded ``maintenance_window`` object will contain information about any scheduled - maintenance for the database cluster. + # JSON input template you can fill out and use as your body input. + body = { + "source": { + "dbname": "str", # Optional. The name of the default database. + "host": "str", # Optional. The FQDN pointing to the database + cluster's current primary node. + "password": "str", # Optional. The randomly generated password for + the default user. + "port": 0, # Optional. The port on which the database cluster is + listening. + "username": "str" # Optional. The default user for the database. + }, + "disable_ssl": bool, # Optional. Enables SSL encryption when connecting to + the source database. + "ignore_dbs": [ + "str" # Optional. List of databases that should be ignored during + migration. + ] + } - :keyword tag_name: Limits the results to database clusters with a specific - tag.:code:`
`:code:`
`Requires ``tag:read`` scope. Default value is None. - :paramtype tag_name: str + # response body for status code(s): 200 + response == { + "created_at": "str", # Optional. The time the migration was initiated, in + ISO 8601 format. + "id": "str", # Optional. The ID of the most recent migration. + "status": "str" # Optional. The current status of the migration. Known + values are: "running", "syncing", "canceled", "error", and "done". + } + # response body for status code(s): 404 + response == { + "id": "str", # A short identifier corresponding to the HTTP status code + returned. For example, the ID for a response returning a 404 status code would + be "not_found.". Required. + "message": "str", # A message providing additional information about the + error, including details to help resolve it when possible. Required. + "request_id": "str" # Optional. Optionally, some endpoints may include a + request ID that should be provided when reporting bugs or opening support + tickets to help identify the issue. + } + """ + + @overload + def update_online_migration( + self, + database_cluster_uuid: str, + body: IO[bytes], + *, + content_type: str = "application/json", + **kwargs: Any, + ) -> JSON: + # pylint: disable=line-too-long + """Start an Online Migration. + + To start an online migration, send a PUT request to + ``/v2/databases/$DATABASE_ID/online-migration`` endpoint. Migrating a cluster establishes a + connection with an existing cluster and replicates its contents to the target cluster. Online + migration is only available for MySQL, PostgreSQL, Caching, and Valkey clusters. + If the existing database is continuously being written to, the migration process will continue + for up to two weeks unless it is manually stopped. Online migration is only available for + `MySQL + `_\\ + , `PostgreSQL + `_\\ , `Caching + `_\\ , and `Valkey + `_ clusters. + + :param database_cluster_uuid: A unique identifier for a database cluster. Required. + :type database_cluster_uuid: str + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str :return: JSON object :rtype: JSON :raises ~azure.core.exceptions.HttpResponseError: @@ -118064,326 +127585,84 @@ def list_clusters(self, *, tag_name: Optional[str] = None, **kwargs: Any) -> JSO # response body for status code(s): 200 response == { - "databases": [ - { - "engine": "str", # A slug representing the database engine - used for the cluster. The possible values are: "pg" for PostgreSQL, - "mysql" for MySQL, "redis" for Caching, "mongodb" for MongoDB, "kafka" - for Kafka, "opensearch" for OpenSearch, and "valkey" for Valkey. - Required. Known values are: "pg", "mysql", "redis", "valkey", "mongodb", - "kafka", and "opensearch". - "name": "str", # A unique, human-readable name referring to - a database cluster. Required. - "num_nodes": 0, # The number of nodes in the database - cluster. Required. - "region": "str", # The slug identifier for the region where - the database cluster is located. Required. - "size": "str", # The slug identifier representing the size - of the nodes in the database cluster. Required. - "connection": { - "database": "str", # Optional. The name of the - default database. - "host": "str", # Optional. The FQDN pointing to the - database cluster's current primary node. - "password": "str", # Optional. The randomly - generated password for the default - user.:code:`
`:code:`
`Requires ``database:view_credentials`` - scope. - "port": 0, # Optional. The port on which the - database cluster is listening. - "ssl": bool, # Optional. A boolean value indicating - if the connection should be made over SSL. - "uri": "str", # Optional. A connection string in the - format accepted by the ``psql`` command. This is provided as a - convenience and should be able to be constructed by the other - attributes. - "user": "str" # Optional. The default user for the - database.:code:`
`:code:`
`Requires - ``database:view_credentials`` scope. - }, - "created_at": "2020-02-20 00:00:00", # Optional. A time - value given in ISO8601 combined date and time format that represents when - the database cluster was created. - "db_names": [ - "str" # Optional. An array of strings containing the - names of databases created in the database cluster. - ], - "do_settings": { - "service_cnames": [ - "str" # Optional. An array of custom CNAMEs - for the database cluster. Each CNAME must be a valid RFC 1123 - hostname (e.g., "db.example.com"). Maximum of 16 CNAMEs allowed, - each up to 253 characters. - ] - }, - "id": "str", # Optional. A unique ID that can be used to - identify and reference a database cluster. - "maintenance_window": { - "day": "str", # The day of the week on which to - apply maintenance updates. Required. - "hour": "str", # The hour in UTC at which - maintenance updates will be applied in 24 hour format. Required. - "description": [ - "str" # Optional. A list of strings, each - containing information about a pending maintenance update. - ], - "pending": bool # Optional. A boolean value - indicating whether any maintenance is scheduled to be performed in - the next window. - }, - "metrics_endpoints": [ - { - "host": "str", # Optional. A FQDN pointing - to the database cluster's node(s). - "port": 0 # Optional. The port on which a - service is listening. - } - ], - "private_connection": { - "database": "str", # Optional. The name of the - default database. - "host": "str", # Optional. The FQDN pointing to the - database cluster's current primary node. - "password": "str", # Optional. The randomly - generated password for the default - user.:code:`
`:code:`
`Requires ``database:view_credentials`` - scope. - "port": 0, # Optional. The port on which the - database cluster is listening. - "ssl": bool, # Optional. A boolean value indicating - if the connection should be made over SSL. - "uri": "str", # Optional. A connection string in the - format accepted by the ``psql`` command. This is provided as a - convenience and should be able to be constructed by the other - attributes. - "user": "str" # Optional. The default user for the - database.:code:`
`:code:`
`Requires - ``database:view_credentials`` scope. - }, - "private_network_uuid": "str", # Optional. A string - specifying the UUID of the VPC to which the database cluster will be - assigned. If excluded, the cluster when creating a new database cluster, - it will be assigned to your account's default VPC for the region. - :code:`
`:code:`
`Requires ``vpc:read`` scope. - "project_id": "str", # Optional. The ID of the project that - the database cluster is assigned to. If excluded when creating a new - database cluster, it will be assigned to your default - project.:code:`
`:code:`
`Requires ``project:read`` scope. - "rules": [ - { - "type": "str", # The type of resource that - the firewall rule allows to access the database cluster. - Required. Known values are: "droplet", "k8s", "ip_addr", "tag", - and "app". - "value": "str", # The ID of the specific - resource, the name of a tag applied to a group of resources, or - the IP address that the firewall rule allows to access the - database cluster. Required. - "cluster_uuid": "str", # Optional. A unique - ID for the database cluster to which the rule is applied. - "created_at": "2020-02-20 00:00:00", # - Optional. A time value given in ISO8601 combined date and time - format that represents when the firewall rule was created. - "description": "str", # Optional. A - human-readable description of the rule. - "uuid": "str" # Optional. A unique ID for - the firewall rule itself. - } - ], - "schema_registry_connection": { - "host": "str", # Optional. The FQDN pointing to the - schema registry connection uri. - "password": "str", # Optional. The randomly - generated password for the schema - registry.:code:`
`:code:`
`Requires - ``database:view_credentials`` scope. - "port": 0, # Optional. The port on which the schema - registry is listening. - "ssl": bool, # Optional. A boolean value indicating - if the connection should be made over SSL. - "uri": "str", # Optional. This is provided as a - convenience and should be able to be constructed by the other - attributes. - "user": "str" # Optional. The default user for the - schema registry.:code:`
`:code:`
`Requires - ``database:view_credentials`` scope. - }, - "semantic_version": "str", # Optional. A string representing - the semantic version of the database engine in use for the cluster. - "standby_connection": { - "database": "str", # Optional. The name of the - default database. - "host": "str", # Optional. The FQDN pointing to the - database cluster's current primary node. - "password": "str", # Optional. The randomly - generated password for the default - user.:code:`
`:code:`
`Requires ``database:view_credentials`` - scope. - "port": 0, # Optional. The port on which the - database cluster is listening. - "ssl": bool, # Optional. A boolean value indicating - if the connection should be made over SSL. - "uri": "str", # Optional. A connection string in the - format accepted by the ``psql`` command. This is provided as a - convenience and should be able to be constructed by the other - attributes. - "user": "str" # Optional. The default user for the - database.:code:`
`:code:`
`Requires - ``database:view_credentials`` scope. - }, - "standby_private_connection": { - "database": "str", # Optional. The name of the - default database. - "host": "str", # Optional. The FQDN pointing to the - database cluster's current primary node. - "password": "str", # Optional. The randomly - generated password for the default - user.:code:`
`:code:`
`Requires ``database:view_credentials`` - scope. - "port": 0, # Optional. The port on which the - database cluster is listening. - "ssl": bool, # Optional. A boolean value indicating - if the connection should be made over SSL. - "uri": "str", # Optional. A connection string in the - format accepted by the ``psql`` command. This is provided as a - convenience and should be able to be constructed by the other - attributes. - "user": "str" # Optional. The default user for the - database.:code:`
`:code:`
`Requires - ``database:view_credentials`` scope. - }, - "status": "str", # Optional. A string representing the - current status of the database cluster. Known values are: "creating", - "online", "resizing", "migrating", and "forking". - "storage_size_mib": 0, # Optional. Additional storage added - to the cluster, in MiB. If null, no additional storage is added to the - cluster, beyond what is provided as a base amount from the 'size' and any - previously added additional storage. - "tags": [ - "str" # Optional. An array of tags that have been - applied to the database cluster. :code:`
`:code:`
`Requires - ``tag:read`` scope. - ], - "ui_connection": { - "host": "str", # Optional. The FQDN pointing to the - opensearch cluster's current primary node. - "password": "str", # Optional. The randomly - generated password for the default - user.:code:`
`:code:`
`Requires ``database:view_credentials`` - scope. - "port": 0, # Optional. The port on which the - opensearch dashboard is listening. - "ssl": bool, # Optional. A boolean value indicating - if the connection should be made over SSL. - "uri": "str", # Optional. This is provided as a - convenience and should be able to be constructed by the other - attributes. - "user": "str" # Optional. The default user for the - opensearch dashboard.:code:`
`:code:`
`Requires - ``database:view_credentials`` scope. - }, - "users": [ - { - "name": "str", # The name of a database - user. Required. - "access_cert": "str", # Optional. Access - certificate for TLS client authentication. (Kafka only). - "access_key": "str", # Optional. Access key - for TLS client authentication. (Kafka only). - "mysql_settings": { - "auth_plugin": "str" # A string - specifying the authentication method to be used for - connections to the MySQL user account. The valid values are - ``mysql_native_password`` or ``caching_sha2_password``. If - excluded when creating a new user, the default for the - version of MySQL in use will be used. As of MySQL 8.0, the - default is ``caching_sha2_password``. Required. Known values - are: "mysql_native_password" and "caching_sha2_password". - }, - "password": "str", # Optional. A randomly - generated password for the database user.:code:`
`Requires - ``database:view_credentials`` scope. - "role": "str", # Optional. A string - representing the database user's role. The value will be either - "primary" or "normal". Known values are: "primary" and "normal". - "settings": { - "acl": [ - { - "permission": "str", - # Permission set applied to the ACL. 'consume' allows - for messages to be consumed from the topic. 'produce' - allows for messages to be published to the topic. - 'produceconsume' allows for both 'consume' and - 'produce' permission. 'admin' allows for - 'produceconsume' as well as any operations to - administer the topic (delete, update). Required. - Known values are: "admin", "consume", "produce", and - "produceconsume". - "topic": "str", # A - regex for matching the topic(s) that this ACL should - apply to. Required. - "id": "str" # - Optional. An identifier for the ACL. Will be computed - after the ACL is created/updated. - } - ], - "mongo_user_settings": { - "databases": [ - "str" # Optional. A - list of databases to which the user should have - access. When the database is set to ``admin``"" , the - user will have access to all databases based on the - user's role i.e. a user with the role ``readOnly`` - assigned to the ``admin`` database will have read - access to all databases. - ], - "role": "str" # Optional. - The role to assign to the user with each role mapping to - a MongoDB built-in role. ``readOnly`` maps to a `read - `_ - role. ``readWrite`` maps to a `readWrite - `_ - role. ``dbAdmin`` maps to a `dbAdmin - `_ - role. Known values are: "readOnly", "readWrite", and - "dbAdmin". - }, - "opensearch_acl": [ - { - "index": "str", # - Optional. A regex for matching the indexes that this - ACL should apply to. - "permission": "str" - # Optional. Permission set applied to the ACL. 'read' - allows user to read from the index. 'write' allows - for user to write to the index. 'readwrite' allows - for both 'read' and 'write' permission. - 'deny'(default) restricts user from performing any - operation over an index. 'admin' allows for - 'readwrite' as well as any operations to administer - the index. Known values are: "deny", "admin", "read", - "readwrite", and "write". - } - ], - "pg_allow_replication": bool # - Optional. For Postgres clusters, set to ``true`` for a user - with replication rights. This option is not currently - supported for other database engines. - } - } - ], - "version": "str", # Optional. A string representing the - version of the database engine in use for the cluster. - "version_end_of_availability": "str", # Optional. A - timestamp referring to the date when the particular version will no - longer be available for creating new clusters. If null, the version does - not have an end of availability timeline. - "version_end_of_life": "str" # Optional. A timestamp - referring to the date when the particular version will no longer be - supported. If null, the version does not have an end of life timeline. - } + "created_at": "str", # Optional. The time the migration was initiated, in + ISO 8601 format. + "id": "str", # Optional. The ID of the most recent migration. + "status": "str" # Optional. The current status of the migration. Known + values are: "running", "syncing", "canceled", "error", and "done". + } + # response body for status code(s): 404 + response == { + "id": "str", # A short identifier corresponding to the HTTP status code + returned. For example, the ID for a response returning a 404 status code would + be "not_found.". Required. + "message": "str", # A message providing additional information about the + error, including details to help resolve it when possible. Required. + "request_id": "str" # Optional. Optionally, some endpoints may include a + request ID that should be provided when reporting bugs or opening support + tickets to help identify the issue. + } + """ + + @distributed_trace + def update_online_migration( + self, database_cluster_uuid: str, body: Union[JSON, IO[bytes]], **kwargs: Any + ) -> JSON: + # pylint: disable=line-too-long + """Start an Online Migration. + + To start an online migration, send a PUT request to + ``/v2/databases/$DATABASE_ID/online-migration`` endpoint. Migrating a cluster establishes a + connection with an existing cluster and replicates its contents to the target cluster. Online + migration is only available for MySQL, PostgreSQL, Caching, and Valkey clusters. + If the existing database is continuously being written to, the migration process will continue + for up to two weeks unless it is manually stopped. Online migration is only available for + `MySQL + `_\\ + , `PostgreSQL + `_\\ , `Caching + `_\\ , and `Valkey + `_ clusters. + + :param database_cluster_uuid: A unique identifier for a database cluster. Required. + :type database_cluster_uuid: str + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :return: JSON object + :rtype: JSON + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "source": { + "dbname": "str", # Optional. The name of the default database. + "host": "str", # Optional. The FQDN pointing to the database + cluster's current primary node. + "password": "str", # Optional. The randomly generated password for + the default user. + "port": 0, # Optional. The port on which the database cluster is + listening. + "username": "str" # Optional. The default user for the database. + }, + "disable_ssl": bool, # Optional. Enables SSL encryption when connecting to + the source database. + "ignore_dbs": [ + "str" # Optional. List of databases that should be ignored during + migration. ] } + + # response body for status code(s): 200 + response == { + "created_at": "str", # Optional. The time the migration was initiated, in + ISO 8601 format. + "id": "str", # Optional. The ID of the most recent migration. + "status": "str" # Optional. The current status of the migration. Known + values are: "running", "syncing", "canceled", "error", and "done". + } # response body for status code(s): 404 response == { "id": "str", # A short identifier corresponding to the HTTP status code @@ -118409,13 +127688,27 @@ def list_clusters(self, *, tag_name: Optional[str] = None, **kwargs: Any) -> JSO } error_map.update(kwargs.pop("error_map", {}) or {}) - _headers = kwargs.pop("headers", {}) or {} + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = kwargs.pop("params", {}) or {} + content_type: Optional[str] = kwargs.pop( + "content_type", _headers.pop("Content-Type", None) + ) cls: ClsType[JSON] = kwargs.pop("cls", None) - _request = build_databases_list_clusters_request( - tag_name=tag_name, + content_type = content_type or "application/json" + _json = None + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _json = body + + _request = build_databases_update_online_migration_request( + database_cluster_uuid=database_cluster_uuid, + content_type=content_type, + json=_json, + content=_content, headers=_headers, params=_params, ) @@ -118474,657 +127767,252 @@ def list_clusters(self, *, tag_name: Optional[str] = None, **kwargs: Any) -> JSO return cast(JSON, deserialized) # type: ignore - @overload - def create_cluster( - self, body: JSON, *, content_type: str = "application/json", **kwargs: Any - ) -> JSON: + @distributed_trace + def delete_online_migration( + self, database_cluster_uuid: str, migration_id: str, **kwargs: Any + ) -> Optional[JSON]: # pylint: disable=line-too-long - """Create a New Database Cluster. - - To create a database cluster, send a POST request to ``/v2/databases``. To see a list of - options for each engine, such as available regions, size slugs, and versions, send a GET - request to the ``/v2/databases/options`` endpoint. The available sizes for the - ``storage_size_mib`` field depends on the cluster's size. To see a list of available sizes, see - `Managed Database Pricing `_. - - The create response returns a JSON object with a key called ``database``. The value of this is - an object that contains the standard attributes associated with a database cluster. The initial - value of the database cluster's ``status`` attribute is ``creating``. When the cluster is ready - to receive traffic, this changes to ``online``. + """Stop an Online Migration. - The embedded ``connection`` and ``private_connection`` objects contains the information needed - to access the database cluster. For multi-node clusters, the ``standby_connection`` and - ``standby_private_connection`` objects contain the information needed to connect to the - cluster's standby node(s). + To stop an online migration, send a DELETE request to + ``/v2/databases/$DATABASE_ID/online-migration/$MIGRATION_ID``. - DigitalOcean managed PostgreSQL and MySQL database clusters take automated daily backups. To - create a new database cluster based on a backup of an existing cluster, send a POST request to - ``/v2/databases``. In addition to the standard database cluster attributes, the JSON body must - include a key named ``backup_restore`` with the name of the original database cluster and the - timestamp of the backup to be restored. Creating a database from a backup is the same as - forking a database in the control panel. - Note: Caching cluster creates are no longer supported as of 2025-04-30T00:00:00Z. Backups are - also not supported for Caching or Valkey clusters. + A status of 204 will be given. This indicates that the request was processed successfully, but + that no response body is needed. - :param body: Required. - :type body: JSON - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: JSON object - :rtype: JSON + :param database_cluster_uuid: A unique identifier for a database cluster. Required. + :type database_cluster_uuid: str + :param migration_id: A unique identifier assigned to the online migration. Required. + :type migration_id: str + :return: JSON object or None + :rtype: JSON or None :raises ~azure.core.exceptions.HttpResponseError: Example: .. code-block:: python - # JSON input template you can fill out and use as your body input. - body = { - "engine": "str", # A slug representing the database engine used for the - cluster. The possible values are: "pg" for PostgreSQL, "mysql" for MySQL, "redis" - for Caching, "mongodb" for MongoDB, "kafka" for Kafka, "opensearch" for - OpenSearch, and "valkey" for Valkey. Required. Known values are: "pg", "mysql", - "redis", "valkey", "mongodb", "kafka", and "opensearch". - "name": "str", # A unique, human-readable name referring to a database - cluster. Required. - "num_nodes": 0, # The number of nodes in the database cluster. Required. - "region": "str", # The slug identifier for the region where the database - cluster is located. Required. - "size": "str", # The slug identifier representing the size of the nodes in - the database cluster. Required. - "autoscale": { - "storage": { - "enabled": bool, # Whether storage autoscaling is enabled - for the cluster. Required. - "increment_gib": 0, # Optional. The amount of additional - storage to add (in GiB) when autoscaling is triggered. - "threshold_percent": 0 # Optional. The storage usage - threshold percentage that triggers autoscaling. When storage usage - exceeds this percentage, additional storage will be added automatically. - } - }, - "backup_restore": { - "database_name": "str", # The name of an existing database cluster - from which the backup will be restored. Required. - "backup_created_at": "2020-02-20 00:00:00" # Optional. The timestamp - of an existing database cluster backup in ISO8601 combined date and time - format. The most recent backup will be used if excluded. - }, - "connection": { - "database": "str", # Optional. The name of the default database. - "host": "str", # Optional. The FQDN pointing to the database - cluster's current primary node. - "password": "str", # Optional. The randomly generated password for - the default user.:code:`
`:code:`
`Requires - ``database:view_credentials`` scope. - "port": 0, # Optional. The port on which the database cluster is - listening. - "ssl": bool, # Optional. A boolean value indicating if the - connection should be made over SSL. - "uri": "str", # Optional. A connection string in the format accepted - by the ``psql`` command. This is provided as a convenience and should be able - to be constructed by the other attributes. - "user": "str" # Optional. The default user for the - database.:code:`
`:code:`
`Requires ``database:view_credentials`` - scope. - }, - "created_at": "2020-02-20 00:00:00", # Optional. A time value given in - ISO8601 combined date and time format that represents when the database cluster - was created. - "db_names": [ - "str" # Optional. An array of strings containing the names of - databases created in the database cluster. - ], - "do_settings": { - "service_cnames": [ - "str" # Optional. An array of custom CNAMEs for the database - cluster. Each CNAME must be a valid RFC 1123 hostname (e.g., - "db.example.com"). Maximum of 16 CNAMEs allowed, each up to 253 - characters. - ] - }, - "id": "str", # Optional. A unique ID that can be used to identify and - reference a database cluster. - "maintenance_window": { - "day": "str", # The day of the week on which to apply maintenance - updates. Required. - "hour": "str", # The hour in UTC at which maintenance updates will - be applied in 24 hour format. Required. - "description": [ - "str" # Optional. A list of strings, each containing - information about a pending maintenance update. - ], - "pending": bool # Optional. A boolean value indicating whether any - maintenance is scheduled to be performed in the next window. - }, - "metrics_endpoints": [ - { - "host": "str", # Optional. A FQDN pointing to the database - cluster's node(s). - "port": 0 # Optional. The port on which a service is - listening. - } - ], - "private_connection": { - "database": "str", # Optional. The name of the default database. - "host": "str", # Optional. The FQDN pointing to the database - cluster's current primary node. - "password": "str", # Optional. The randomly generated password for - the default user.:code:`
`:code:`
`Requires - ``database:view_credentials`` scope. - "port": 0, # Optional. The port on which the database cluster is - listening. - "ssl": bool, # Optional. A boolean value indicating if the - connection should be made over SSL. - "uri": "str", # Optional. A connection string in the format accepted - by the ``psql`` command. This is provided as a convenience and should be able - to be constructed by the other attributes. - "user": "str" # Optional. The default user for the - database.:code:`
`:code:`
`Requires ``database:view_credentials`` - scope. - }, - "private_network_uuid": "str", # Optional. A string specifying the UUID of - the VPC to which the database cluster will be assigned. If excluded, the cluster - when creating a new database cluster, it will be assigned to your account's - default VPC for the region. :code:`
`:code:`
`Requires ``vpc:read`` scope. - "project_id": "str", # Optional. The ID of the project that the database - cluster is assigned to. If excluded when creating a new database cluster, it will - be assigned to your default project.:code:`
`:code:`
`Requires - ``project:update`` scope. - "rules": [ - { - "type": "str", # The type of resource that the firewall rule - allows to access the database cluster. Required. Known values are: - "droplet", "k8s", "ip_addr", "tag", and "app". - "value": "str", # The ID of the specific resource, the name - of a tag applied to a group of resources, or the IP address that the - firewall rule allows to access the database cluster. Required. - "cluster_uuid": "str", # Optional. A unique ID for the - database cluster to which the rule is applied. - "created_at": "2020-02-20 00:00:00", # Optional. A time - value given in ISO8601 combined date and time format that represents when - the firewall rule was created. - "description": "str", # Optional. A human-readable - description of the rule. - "uuid": "str" # Optional. A unique ID for the firewall rule - itself. - } - ], - "schema_registry_connection": { - "host": "str", # Optional. The FQDN pointing to the schema registry - connection uri. - "password": "str", # Optional. The randomly generated password for - the schema registry.:code:`
`:code:`
`Requires - ``database:view_credentials`` scope. - "port": 0, # Optional. The port on which the schema registry is - listening. - "ssl": bool, # Optional. A boolean value indicating if the - connection should be made over SSL. - "uri": "str", # Optional. This is provided as a convenience and - should be able to be constructed by the other attributes. - "user": "str" # Optional. The default user for the schema - registry.:code:`
`:code:`
`Requires ``database:view_credentials`` - scope. - }, - "semantic_version": "str", # Optional. A string representing the semantic - version of the database engine in use for the cluster. - "standby_connection": { - "database": "str", # Optional. The name of the default database. - "host": "str", # Optional. The FQDN pointing to the database - cluster's current primary node. - "password": "str", # Optional. The randomly generated password for - the default user.:code:`
`:code:`
`Requires - ``database:view_credentials`` scope. - "port": 0, # Optional. The port on which the database cluster is - listening. - "ssl": bool, # Optional. A boolean value indicating if the - connection should be made over SSL. - "uri": "str", # Optional. A connection string in the format accepted - by the ``psql`` command. This is provided as a convenience and should be able - to be constructed by the other attributes. - "user": "str" # Optional. The default user for the - database.:code:`
`:code:`
`Requires ``database:view_credentials`` - scope. - }, - "standby_private_connection": { - "database": "str", # Optional. The name of the default database. - "host": "str", # Optional. The FQDN pointing to the database - cluster's current primary node. - "password": "str", # Optional. The randomly generated password for - the default user.:code:`
`:code:`
`Requires - ``database:view_credentials`` scope. - "port": 0, # Optional. The port on which the database cluster is - listening. - "ssl": bool, # Optional. A boolean value indicating if the - connection should be made over SSL. - "uri": "str", # Optional. A connection string in the format accepted - by the ``psql`` command. This is provided as a convenience and should be able - to be constructed by the other attributes. - "user": "str" # Optional. The default user for the - database.:code:`
`:code:`
`Requires ``database:view_credentials`` - scope. - }, - "status": "str", # Optional. A string representing the current status of the - database cluster. Known values are: "creating", "online", "resizing", - "migrating", and "forking". - "storage_size_mib": 0, # Optional. Additional storage added to the cluster, - in MiB. If null, no additional storage is added to the cluster, beyond what is - provided as a base amount from the 'size' and any previously added additional - storage. - "tags": [ - "str" # Optional. An array of tags (as strings) to apply to the - database cluster. :code:`
`:code:`
`Requires ``tag:create`` scope. - ], - "ui_connection": { - "host": "str", # Optional. The FQDN pointing to the opensearch - cluster's current primary node. - "password": "str", # Optional. The randomly generated password for - the default user.:code:`
`:code:`
`Requires - ``database:view_credentials`` scope. - "port": 0, # Optional. The port on which the opensearch dashboard is - listening. - "ssl": bool, # Optional. A boolean value indicating if the - connection should be made over SSL. - "uri": "str", # Optional. This is provided as a convenience and - should be able to be constructed by the other attributes. - "user": "str" # Optional. The default user for the opensearch - dashboard.:code:`
`:code:`
`Requires ``database:view_credentials`` - scope. - }, - "users": [ - { - "name": "str", # The name of a database user. Required. - "access_cert": "str", # Optional. Access certificate for TLS - client authentication. (Kafka only). - "access_key": "str", # Optional. Access key for TLS client - authentication. (Kafka only). - "mysql_settings": { - "auth_plugin": "str" # A string specifying the - authentication method to be used for connections to the MySQL user - account. The valid values are ``mysql_native_password`` or - ``caching_sha2_password``. If excluded when creating a new user, the - default for the version of MySQL in use will be used. As of MySQL - 8.0, the default is ``caching_sha2_password``. Required. Known values - are: "mysql_native_password" and "caching_sha2_password". - }, - "password": "str", # Optional. A randomly generated password - for the database user.:code:`
`Requires ``database:view_credentials`` - scope. - "role": "str", # Optional. A string representing the - database user's role. The value will be either "primary" or "normal". - Known values are: "primary" and "normal". - "settings": { - "acl": [ - { - "permission": "str", # Permission - set applied to the ACL. 'consume' allows for messages to be - consumed from the topic. 'produce' allows for messages to be - published to the topic. 'produceconsume' allows for both - 'consume' and 'produce' permission. 'admin' allows for - 'produceconsume' as well as any operations to administer the - topic (delete, update). Required. Known values are: "admin", - "consume", "produce", and "produceconsume". - "topic": "str", # A regex for - matching the topic(s) that this ACL should apply to. - Required. - "id": "str" # Optional. An - identifier for the ACL. Will be computed after the ACL is - created/updated. - } - ], - "mongo_user_settings": { - "databases": [ - "str" # Optional. A list of - databases to which the user should have access. When the - database is set to ``admin``"" , the user will have access to - all databases based on the user's role i.e. a user with the - role ``readOnly`` assigned to the ``admin`` database will - have read access to all databases. - ], - "role": "str" # Optional. The role to assign - to the user with each role mapping to a MongoDB built-in role. - ``readOnly`` maps to a `read - `_ - role. ``readWrite`` maps to a `readWrite - `_ - role. ``dbAdmin`` maps to a `dbAdmin - `_ - role. Known values are: "readOnly", "readWrite", and "dbAdmin". - }, - "opensearch_acl": [ - { - "index": "str", # Optional. A regex - for matching the indexes that this ACL should apply to. - "permission": "str" # Optional. - Permission set applied to the ACL. 'read' allows user to read - from the index. 'write' allows for user to write to the - index. 'readwrite' allows for both 'read' and 'write' - permission. 'deny'(default) restricts user from performing - any operation over an index. 'admin' allows for 'readwrite' - as well as any operations to administer the index. Known - values are: "deny", "admin", "read", "readwrite", and - "write". - } - ], - "pg_allow_replication": bool # Optional. For - Postgres clusters, set to ``true`` for a user with replication - rights. This option is not currently supported for other database - engines. - } - } - ], - "version": "str", # Optional. A string representing the version of the - database engine in use for the cluster. - "version_end_of_availability": "str", # Optional. A timestamp referring to - the date when the particular version will no longer be available for creating new - clusters. If null, the version does not have an end of availability timeline. - "version_end_of_life": "str" # Optional. A timestamp referring to the date - when the particular version will no longer be supported. If null, the version - does not have an end of life timeline. + # response body for status code(s): 404 + response == { + "id": "str", # A short identifier corresponding to the HTTP status code + returned. For example, the ID for a response returning a 404 status code would + be "not_found.". Required. + "message": "str", # A message providing additional information about the + error, including details to help resolve it when possible. Required. + "request_id": "str" # Optional. Optionally, some endpoints may include a + request ID that should be provided when reporting bugs or opening support + tickets to help identify the issue. + } + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + 401: cast( + Type[HttpResponseError], + lambda response: ClientAuthenticationError(response=response), + ), + 429: HttpResponseError, + 500: HttpResponseError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[Optional[JSON]] = kwargs.pop("cls", None) + + _request = build_databases_delete_online_migration_request( + database_cluster_uuid=database_cluster_uuid, + migration_id=migration_id, + headers=_headers, + params=_params, + ) + _request.url = self._client.format_url(_request.url) + + _stream = False + pipeline_response: PipelineResponse = ( + self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + ) + + response = pipeline_response.http_response + + if response.status_code not in [204, 404]: + if _stream: + response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore + raise HttpResponseError(response=response) + + deserialized = None + response_headers = {} + if response.status_code == 204: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.status_code == 404: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.content: + deserialized = response.json() + else: + deserialized = None + + if cls: + return cls(pipeline_response, deserialized, response_headers) # type: ignore + + return deserialized # type: ignore + + @overload + def update_region( + self, + database_cluster_uuid: str, + body: JSON, + *, + content_type: str = "application/json", + **kwargs: Any, + ) -> Optional[JSON]: + # pylint: disable=line-too-long + """Migrate a Database Cluster to a New Region. + + To migrate a database cluster to a new region, send a ``PUT`` request to + ``/v2/databases/$DATABASE_ID/migrate``. The body of the request must specify a + ``region`` attribute. + + A successful request will receive a 202 Accepted status code with no body in + response. Querying the database cluster will show that its ``status`` attribute + will now be set to ``migrating``. This will transition back to ``online`` when the + migration has completed. + + :param database_cluster_uuid: A unique identifier for a database cluster. Required. + :type database_cluster_uuid: str + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: JSON object or None + :rtype: JSON or None + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "region": "str" # A slug identifier for the region to which the database + cluster will be migrated. Required. } - # response body for status code(s): 201 + # response body for status code(s): 404 response == { - "database": { - "engine": "str", # A slug representing the database engine used for - the cluster. The possible values are: "pg" for PostgreSQL, "mysql" for MySQL, - "redis" for Caching, "mongodb" for MongoDB, "kafka" for Kafka, "opensearch" - for OpenSearch, and "valkey" for Valkey. Required. Known values are: "pg", - "mysql", "redis", "valkey", "mongodb", "kafka", and "opensearch". - "name": "str", # A unique, human-readable name referring to a - database cluster. Required. - "num_nodes": 0, # The number of nodes in the database cluster. - Required. - "region": "str", # The slug identifier for the region where the - database cluster is located. Required. - "size": "str", # The slug identifier representing the size of the - nodes in the database cluster. Required. - "connection": { - "database": "str", # Optional. The name of the default - database. - "host": "str", # Optional. The FQDN pointing to the database - cluster's current primary node. - "password": "str", # Optional. The randomly generated - password for the default user.:code:`
`:code:`
`Requires - ``database:view_credentials`` scope. - "port": 0, # Optional. The port on which the database - cluster is listening. - "ssl": bool, # Optional. A boolean value indicating if the - connection should be made over SSL. - "uri": "str", # Optional. A connection string in the format - accepted by the ``psql`` command. This is provided as a convenience and - should be able to be constructed by the other attributes. - "user": "str" # Optional. The default user for the - database.:code:`
`:code:`
`Requires ``database:view_credentials`` - scope. - }, - "created_at": "2020-02-20 00:00:00", # Optional. A time value given - in ISO8601 combined date and time format that represents when the database - cluster was created. - "db_names": [ - "str" # Optional. An array of strings containing the names - of databases created in the database cluster. - ], - "do_settings": { - "service_cnames": [ - "str" # Optional. An array of custom CNAMEs for the - database cluster. Each CNAME must be a valid RFC 1123 hostname (e.g., - "db.example.com"). Maximum of 16 CNAMEs allowed, each up to 253 - characters. - ] - }, - "id": "str", # Optional. A unique ID that can be used to identify - and reference a database cluster. - "maintenance_window": { - "day": "str", # The day of the week on which to apply - maintenance updates. Required. - "hour": "str", # The hour in UTC at which maintenance - updates will be applied in 24 hour format. Required. - "description": [ - "str" # Optional. A list of strings, each containing - information about a pending maintenance update. - ], - "pending": bool # Optional. A boolean value indicating - whether any maintenance is scheduled to be performed in the next window. - }, - "metrics_endpoints": [ - { - "host": "str", # Optional. A FQDN pointing to the - database cluster's node(s). - "port": 0 # Optional. The port on which a service is - listening. - } - ], - "private_connection": { - "database": "str", # Optional. The name of the default - database. - "host": "str", # Optional. The FQDN pointing to the database - cluster's current primary node. - "password": "str", # Optional. The randomly generated - password for the default user.:code:`
`:code:`
`Requires - ``database:view_credentials`` scope. - "port": 0, # Optional. The port on which the database - cluster is listening. - "ssl": bool, # Optional. A boolean value indicating if the - connection should be made over SSL. - "uri": "str", # Optional. A connection string in the format - accepted by the ``psql`` command. This is provided as a convenience and - should be able to be constructed by the other attributes. - "user": "str" # Optional. The default user for the - database.:code:`
`:code:`
`Requires ``database:view_credentials`` - scope. - }, - "private_network_uuid": "str", # Optional. A string specifying the - UUID of the VPC to which the database cluster will be assigned. If excluded, - the cluster when creating a new database cluster, it will be assigned to your - account's default VPC for the region. :code:`
`:code:`
`Requires - ``vpc:read`` scope. - "project_id": "str", # Optional. The ID of the project that the - database cluster is assigned to. If excluded when creating a new database - cluster, it will be assigned to your default - project.:code:`
`:code:`
`Requires ``project:read`` scope. - "rules": [ - { - "type": "str", # The type of resource that the - firewall rule allows to access the database cluster. Required. Known - values are: "droplet", "k8s", "ip_addr", "tag", and "app". - "value": "str", # The ID of the specific resource, - the name of a tag applied to a group of resources, or the IP address - that the firewall rule allows to access the database cluster. - Required. - "cluster_uuid": "str", # Optional. A unique ID for - the database cluster to which the rule is applied. - "created_at": "2020-02-20 00:00:00", # Optional. A - time value given in ISO8601 combined date and time format that - represents when the firewall rule was created. - "description": "str", # Optional. A human-readable - description of the rule. - "uuid": "str" # Optional. A unique ID for the - firewall rule itself. - } - ], - "schema_registry_connection": { - "host": "str", # Optional. The FQDN pointing to the schema - registry connection uri. - "password": "str", # Optional. The randomly generated - password for the schema registry.:code:`
`:code:`
`Requires - ``database:view_credentials`` scope. - "port": 0, # Optional. The port on which the schema registry - is listening. - "ssl": bool, # Optional. A boolean value indicating if the - connection should be made over SSL. - "uri": "str", # Optional. This is provided as a convenience - and should be able to be constructed by the other attributes. - "user": "str" # Optional. The default user for the schema - registry.:code:`
`:code:`
`Requires ``database:view_credentials`` - scope. - }, - "semantic_version": "str", # Optional. A string representing the - semantic version of the database engine in use for the cluster. - "standby_connection": { - "database": "str", # Optional. The name of the default - database. - "host": "str", # Optional. The FQDN pointing to the database - cluster's current primary node. - "password": "str", # Optional. The randomly generated - password for the default user.:code:`
`:code:`
`Requires - ``database:view_credentials`` scope. - "port": 0, # Optional. The port on which the database - cluster is listening. - "ssl": bool, # Optional. A boolean value indicating if the - connection should be made over SSL. - "uri": "str", # Optional. A connection string in the format - accepted by the ``psql`` command. This is provided as a convenience and - should be able to be constructed by the other attributes. - "user": "str" # Optional. The default user for the - database.:code:`
`:code:`
`Requires ``database:view_credentials`` - scope. - }, - "standby_private_connection": { - "database": "str", # Optional. The name of the default - database. - "host": "str", # Optional. The FQDN pointing to the database - cluster's current primary node. - "password": "str", # Optional. The randomly generated - password for the default user.:code:`
`:code:`
`Requires - ``database:view_credentials`` scope. - "port": 0, # Optional. The port on which the database - cluster is listening. - "ssl": bool, # Optional. A boolean value indicating if the - connection should be made over SSL. - "uri": "str", # Optional. A connection string in the format - accepted by the ``psql`` command. This is provided as a convenience and - should be able to be constructed by the other attributes. - "user": "str" # Optional. The default user for the - database.:code:`
`:code:`
`Requires ``database:view_credentials`` - scope. - }, - "status": "str", # Optional. A string representing the current - status of the database cluster. Known values are: "creating", "online", - "resizing", "migrating", and "forking". - "storage_size_mib": 0, # Optional. Additional storage added to the - cluster, in MiB. If null, no additional storage is added to the cluster, - beyond what is provided as a base amount from the 'size' and any previously - added additional storage. - "tags": [ - "str" # Optional. An array of tags that have been applied to - the database cluster. :code:`
`:code:`
`Requires ``tag:read`` - scope. - ], - "ui_connection": { - "host": "str", # Optional. The FQDN pointing to the - opensearch cluster's current primary node. - "password": "str", # Optional. The randomly generated - password for the default user.:code:`
`:code:`
`Requires - ``database:view_credentials`` scope. - "port": 0, # Optional. The port on which the opensearch - dashboard is listening. - "ssl": bool, # Optional. A boolean value indicating if the - connection should be made over SSL. - "uri": "str", # Optional. This is provided as a convenience - and should be able to be constructed by the other attributes. - "user": "str" # Optional. The default user for the - opensearch dashboard.:code:`
`:code:`
`Requires - ``database:view_credentials`` scope. - }, - "users": [ - { - "name": "str", # The name of a database user. - Required. - "access_cert": "str", # Optional. Access certificate - for TLS client authentication. (Kafka only). - "access_key": "str", # Optional. Access key for TLS - client authentication. (Kafka only). - "mysql_settings": { - "auth_plugin": "str" # A string specifying - the authentication method to be used for connections to the MySQL - user account. The valid values are ``mysql_native_password`` or - ``caching_sha2_password``. If excluded when creating a new user, - the default for the version of MySQL in use will be used. As of - MySQL 8.0, the default is ``caching_sha2_password``. Required. - Known values are: "mysql_native_password" and - "caching_sha2_password". - }, - "password": "str", # Optional. A randomly generated - password for the database user.:code:`
`Requires - ``database:view_credentials`` scope. - "role": "str", # Optional. A string representing the - database user's role. The value will be either "primary" or "normal". - Known values are: "primary" and "normal". - "settings": { - "acl": [ - { - "permission": "str", # - Permission set applied to the ACL. 'consume' allows for - messages to be consumed from the topic. 'produce' allows - for messages to be published to the topic. - 'produceconsume' allows for both 'consume' and 'produce' - permission. 'admin' allows for 'produceconsume' as well - as any operations to administer the topic (delete, - update). Required. Known values are: "admin", "consume", - "produce", and "produceconsume". - "topic": "str", # A regex - for matching the topic(s) that this ACL should apply to. - Required. - "id": "str" # Optional. An - identifier for the ACL. Will be computed after the ACL is - created/updated. - } - ], - "mongo_user_settings": { - "databases": [ - "str" # Optional. A list of - databases to which the user should have access. When the - database is set to ``admin``"" , the user will have - access to all databases based on the user's role i.e. a - user with the role ``readOnly`` assigned to the ``admin`` - database will have read access to all databases. - ], - "role": "str" # Optional. The role - to assign to the user with each role mapping to a MongoDB - built-in role. ``readOnly`` maps to a `read - `_ - role. ``readWrite`` maps to a `readWrite - `_ - role. ``dbAdmin`` maps to a `dbAdmin - `_ - role. Known values are: "readOnly", "readWrite", and - "dbAdmin". - }, - "opensearch_acl": [ - { - "index": "str", # Optional. - A regex for matching the indexes that this ACL should - apply to. - "permission": "str" # - Optional. Permission set applied to the ACL. 'read' - allows user to read from the index. 'write' allows for - user to write to the index. 'readwrite' allows for both - 'read' and 'write' permission. 'deny'(default) restricts - user from performing any operation over an index. 'admin' - allows for 'readwrite' as well as any operations to - administer the index. Known values are: "deny", "admin", - "read", "readwrite", and "write". - } - ], - "pg_allow_replication": bool # Optional. For - Postgres clusters, set to ``true`` for a user with replication - rights. This option is not currently supported for other database - engines. - } - } - ], - "version": "str", # Optional. A string representing the version of - the database engine in use for the cluster. - "version_end_of_availability": "str", # Optional. A timestamp - referring to the date when the particular version will no longer be available - for creating new clusters. If null, the version does not have an end of - availability timeline. - "version_end_of_life": "str" # Optional. A timestamp referring to - the date when the particular version will no longer be supported. If null, - the version does not have an end of life timeline. - } + "id": "str", # A short identifier corresponding to the HTTP status code + returned. For example, the ID for a response returning a 404 status code would + be "not_found.". Required. + "message": "str", # A message providing additional information about the + error, including details to help resolve it when possible. Required. + "request_id": "str" # Optional. Optionally, some endpoints may include a + request ID that should be provided when reporting bugs or opening support + tickets to help identify the issue. + } + """ + + @overload + def update_region( + self, + database_cluster_uuid: str, + body: IO[bytes], + *, + content_type: str = "application/json", + **kwargs: Any, + ) -> Optional[JSON]: + # pylint: disable=line-too-long + """Migrate a Database Cluster to a New Region. + + To migrate a database cluster to a new region, send a ``PUT`` request to + ``/v2/databases/$DATABASE_ID/migrate``. The body of the request must specify a + ``region`` attribute. + + A successful request will receive a 202 Accepted status code with no body in + response. Querying the database cluster will show that its ``status`` attribute + will now be set to ``migrating``. This will transition back to ``online`` when the + migration has completed. + + :param database_cluster_uuid: A unique identifier for a database cluster. Required. + :type database_cluster_uuid: str + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: JSON object or None + :rtype: JSON or None + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 404 + response == { + "id": "str", # A short identifier corresponding to the HTTP status code + returned. For example, the ID for a response returning a 404 status code would + be "not_found.". Required. + "message": "str", # A message providing additional information about the + error, including details to help resolve it when possible. Required. + "request_id": "str" # Optional. Optionally, some endpoints may include a + request ID that should be provided when reporting bugs or opening support + tickets to help identify the issue. + } + """ + + @distributed_trace + def update_region( + self, database_cluster_uuid: str, body: Union[JSON, IO[bytes]], **kwargs: Any + ) -> Optional[JSON]: + # pylint: disable=line-too-long + """Migrate a Database Cluster to a New Region. + + To migrate a database cluster to a new region, send a ``PUT`` request to + ``/v2/databases/$DATABASE_ID/migrate``. The body of the request must specify a + ``region`` attribute. + + A successful request will receive a 202 Accepted status code with no body in + response. Querying the database cluster will show that its ``status`` attribute + will now be set to ``migrating``. This will transition back to ``online`` when the + migration has completed. + + :param database_cluster_uuid: A unique identifier for a database cluster. Required. + :type database_cluster_uuid: str + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :return: JSON object or None + :rtype: JSON or None + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "region": "str" # A slug identifier for the region to which the database + cluster will be migrated. Required. } + # response body for status code(s): 404 response == { "id": "str", # A short identifier corresponding to the HTTP status code @@ -119137,352 +128025,373 @@ def create_cluster( tickets to help identify the issue. } """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + 401: cast( + Type[HttpResponseError], + lambda response: ClientAuthenticationError(response=response), + ), + 429: HttpResponseError, + 500: HttpResponseError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop( + "content_type", _headers.pop("Content-Type", None) + ) + cls: ClsType[Optional[JSON]] = kwargs.pop("cls", None) + + content_type = content_type or "application/json" + _json = None + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _json = body + + _request = build_databases_update_region_request( + database_cluster_uuid=database_cluster_uuid, + content_type=content_type, + json=_json, + content=_content, + headers=_headers, + params=_params, + ) + _request.url = self._client.format_url(_request.url) + + _stream = False + pipeline_response: PipelineResponse = ( + self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + ) + + response = pipeline_response.http_response + + if response.status_code not in [202, 404]: + if _stream: + response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore + raise HttpResponseError(response=response) + + deserialized = None + response_headers = {} + if response.status_code == 202: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.status_code == 404: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.content: + deserialized = response.json() + else: + deserialized = None + + if cls: + return cls(pipeline_response, deserialized, response_headers) # type: ignore + + return deserialized # type: ignore @overload - def create_cluster( - self, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any - ) -> JSON: + def update_cluster_size( + self, + database_cluster_uuid: str, + body: JSON, + *, + content_type: str = "application/json", + **kwargs: Any, + ) -> Optional[JSON]: # pylint: disable=line-too-long - """Create a New Database Cluster. + """Resize a Database Cluster. - To create a database cluster, send a POST request to ``/v2/databases``. To see a list of - options for each engine, such as available regions, size slugs, and versions, send a GET - request to the ``/v2/databases/options`` endpoint. The available sizes for the - ``storage_size_mib`` field depends on the cluster's size. To see a list of available sizes, see - `Managed Database Pricing `_. + To resize a database cluster, send a PUT request to ``/v2/databases/$DATABASE_ID/resize``. The + body of the request must specify both the size and num_nodes attributes. + A successful request will receive a 202 Accepted status code with no body in response. Querying + the database cluster will show that its status attribute will now be set to resizing. This will + transition back to online when the resize operation has completed. - The create response returns a JSON object with a key called ``database``. The value of this is - an object that contains the standard attributes associated with a database cluster. The initial - value of the database cluster's ``status`` attribute is ``creating``. When the cluster is ready - to receive traffic, this changes to ``online``. + :param database_cluster_uuid: A unique identifier for a database cluster. Required. + :type database_cluster_uuid: str + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: JSON object or None + :rtype: JSON or None + :raises ~azure.core.exceptions.HttpResponseError: - The embedded ``connection`` and ``private_connection`` objects contains the information needed - to access the database cluster. For multi-node clusters, the ``standby_connection`` and - ``standby_private_connection`` objects contain the information needed to connect to the - cluster's standby node(s). + Example: + .. code-block:: python - DigitalOcean managed PostgreSQL and MySQL database clusters take automated daily backups. To - create a new database cluster based on a backup of an existing cluster, send a POST request to - ``/v2/databases``. In addition to the standard database cluster attributes, the JSON body must - include a key named ``backup_restore`` with the name of the original database cluster and the - timestamp of the backup to be restored. Creating a database from a backup is the same as - forking a database in the control panel. - Note: Caching cluster creates are no longer supported as of 2025-04-30T00:00:00Z. Backups are - also not supported for Caching or Valkey clusters. + # JSON input template you can fill out and use as your body input. + body = { + "num_nodes": 0, # The number of nodes in the database cluster. Valid values + are are 1-3. In addition to the primary node, up to two standby nodes may be + added for highly available configurations. Required. + "size": "str", # A slug identifier representing desired the size of the + nodes in the database cluster. Required. + "storage_size_mib": 0 # Optional. Additional storage added to the cluster, + in MiB. If null, no additional storage is added to the cluster, beyond what is + provided as a base amount from the 'size' and any previously added additional + storage. + } + + # response body for status code(s): 404 + response == { + "id": "str", # A short identifier corresponding to the HTTP status code + returned. For example, the ID for a response returning a 404 status code would + be "not_found.". Required. + "message": "str", # A message providing additional information about the + error, including details to help resolve it when possible. Required. + "request_id": "str" # Optional. Optionally, some endpoints may include a + request ID that should be provided when reporting bugs or opening support + tickets to help identify the issue. + } + """ + + @overload + def update_cluster_size( + self, + database_cluster_uuid: str, + body: IO[bytes], + *, + content_type: str = "application/json", + **kwargs: Any, + ) -> Optional[JSON]: + # pylint: disable=line-too-long + """Resize a Database Cluster. + + To resize a database cluster, send a PUT request to ``/v2/databases/$DATABASE_ID/resize``. The + body of the request must specify both the size and num_nodes attributes. + A successful request will receive a 202 Accepted status code with no body in response. Querying + the database cluster will show that its status attribute will now be set to resizing. This will + transition back to online when the resize operation has completed. + :param database_cluster_uuid: A unique identifier for a database cluster. Required. + :type database_cluster_uuid: str :param body: Required. :type body: IO[bytes] :keyword content_type: Body Parameter content-type. Content type parameter for binary body. Default value is "application/json". :paramtype content_type: str - :return: JSON object - :rtype: JSON + :return: JSON object or None + :rtype: JSON or None :raises ~azure.core.exceptions.HttpResponseError: Example: .. code-block:: python - # response body for status code(s): 201 + # response body for status code(s): 404 response == { - "database": { - "engine": "str", # A slug representing the database engine used for - the cluster. The possible values are: "pg" for PostgreSQL, "mysql" for MySQL, - "redis" for Caching, "mongodb" for MongoDB, "kafka" for Kafka, "opensearch" - for OpenSearch, and "valkey" for Valkey. Required. Known values are: "pg", - "mysql", "redis", "valkey", "mongodb", "kafka", and "opensearch". - "name": "str", # A unique, human-readable name referring to a - database cluster. Required. - "num_nodes": 0, # The number of nodes in the database cluster. - Required. - "region": "str", # The slug identifier for the region where the - database cluster is located. Required. - "size": "str", # The slug identifier representing the size of the - nodes in the database cluster. Required. - "connection": { - "database": "str", # Optional. The name of the default - database. - "host": "str", # Optional. The FQDN pointing to the database - cluster's current primary node. - "password": "str", # Optional. The randomly generated - password for the default user.:code:`
`:code:`
`Requires - ``database:view_credentials`` scope. - "port": 0, # Optional. The port on which the database - cluster is listening. - "ssl": bool, # Optional. A boolean value indicating if the - connection should be made over SSL. - "uri": "str", # Optional. A connection string in the format - accepted by the ``psql`` command. This is provided as a convenience and - should be able to be constructed by the other attributes. - "user": "str" # Optional. The default user for the - database.:code:`
`:code:`
`Requires ``database:view_credentials`` - scope. - }, - "created_at": "2020-02-20 00:00:00", # Optional. A time value given - in ISO8601 combined date and time format that represents when the database - cluster was created. - "db_names": [ - "str" # Optional. An array of strings containing the names - of databases created in the database cluster. - ], - "do_settings": { - "service_cnames": [ - "str" # Optional. An array of custom CNAMEs for the - database cluster. Each CNAME must be a valid RFC 1123 hostname (e.g., - "db.example.com"). Maximum of 16 CNAMEs allowed, each up to 253 - characters. - ] - }, - "id": "str", # Optional. A unique ID that can be used to identify - and reference a database cluster. - "maintenance_window": { - "day": "str", # The day of the week on which to apply - maintenance updates. Required. - "hour": "str", # The hour in UTC at which maintenance - updates will be applied in 24 hour format. Required. - "description": [ - "str" # Optional. A list of strings, each containing - information about a pending maintenance update. - ], - "pending": bool # Optional. A boolean value indicating - whether any maintenance is scheduled to be performed in the next window. - }, - "metrics_endpoints": [ - { - "host": "str", # Optional. A FQDN pointing to the - database cluster's node(s). - "port": 0 # Optional. The port on which a service is - listening. - } - ], - "private_connection": { - "database": "str", # Optional. The name of the default - database. - "host": "str", # Optional. The FQDN pointing to the database - cluster's current primary node. - "password": "str", # Optional. The randomly generated - password for the default user.:code:`
`:code:`
`Requires - ``database:view_credentials`` scope. - "port": 0, # Optional. The port on which the database - cluster is listening. - "ssl": bool, # Optional. A boolean value indicating if the - connection should be made over SSL. - "uri": "str", # Optional. A connection string in the format - accepted by the ``psql`` command. This is provided as a convenience and - should be able to be constructed by the other attributes. - "user": "str" # Optional. The default user for the - database.:code:`
`:code:`
`Requires ``database:view_credentials`` - scope. - }, - "private_network_uuid": "str", # Optional. A string specifying the - UUID of the VPC to which the database cluster will be assigned. If excluded, - the cluster when creating a new database cluster, it will be assigned to your - account's default VPC for the region. :code:`
`:code:`
`Requires - ``vpc:read`` scope. - "project_id": "str", # Optional. The ID of the project that the - database cluster is assigned to. If excluded when creating a new database - cluster, it will be assigned to your default - project.:code:`
`:code:`
`Requires ``project:read`` scope. - "rules": [ - { - "type": "str", # The type of resource that the - firewall rule allows to access the database cluster. Required. Known - values are: "droplet", "k8s", "ip_addr", "tag", and "app". - "value": "str", # The ID of the specific resource, - the name of a tag applied to a group of resources, or the IP address - that the firewall rule allows to access the database cluster. - Required. - "cluster_uuid": "str", # Optional. A unique ID for - the database cluster to which the rule is applied. - "created_at": "2020-02-20 00:00:00", # Optional. A - time value given in ISO8601 combined date and time format that - represents when the firewall rule was created. - "description": "str", # Optional. A human-readable - description of the rule. - "uuid": "str" # Optional. A unique ID for the - firewall rule itself. - } - ], - "schema_registry_connection": { - "host": "str", # Optional. The FQDN pointing to the schema - registry connection uri. - "password": "str", # Optional. The randomly generated - password for the schema registry.:code:`
`:code:`
`Requires - ``database:view_credentials`` scope. - "port": 0, # Optional. The port on which the schema registry - is listening. - "ssl": bool, # Optional. A boolean value indicating if the - connection should be made over SSL. - "uri": "str", # Optional. This is provided as a convenience - and should be able to be constructed by the other attributes. - "user": "str" # Optional. The default user for the schema - registry.:code:`
`:code:`
`Requires ``database:view_credentials`` - scope. - }, - "semantic_version": "str", # Optional. A string representing the - semantic version of the database engine in use for the cluster. - "standby_connection": { - "database": "str", # Optional. The name of the default - database. - "host": "str", # Optional. The FQDN pointing to the database - cluster's current primary node. - "password": "str", # Optional. The randomly generated - password for the default user.:code:`
`:code:`
`Requires - ``database:view_credentials`` scope. - "port": 0, # Optional. The port on which the database - cluster is listening. - "ssl": bool, # Optional. A boolean value indicating if the - connection should be made over SSL. - "uri": "str", # Optional. A connection string in the format - accepted by the ``psql`` command. This is provided as a convenience and - should be able to be constructed by the other attributes. - "user": "str" # Optional. The default user for the - database.:code:`
`:code:`
`Requires ``database:view_credentials`` - scope. - }, - "standby_private_connection": { - "database": "str", # Optional. The name of the default - database. - "host": "str", # Optional. The FQDN pointing to the database - cluster's current primary node. - "password": "str", # Optional. The randomly generated - password for the default user.:code:`
`:code:`
`Requires - ``database:view_credentials`` scope. - "port": 0, # Optional. The port on which the database - cluster is listening. - "ssl": bool, # Optional. A boolean value indicating if the - connection should be made over SSL. - "uri": "str", # Optional. A connection string in the format - accepted by the ``psql`` command. This is provided as a convenience and - should be able to be constructed by the other attributes. - "user": "str" # Optional. The default user for the - database.:code:`
`:code:`
`Requires ``database:view_credentials`` - scope. - }, - "status": "str", # Optional. A string representing the current - status of the database cluster. Known values are: "creating", "online", - "resizing", "migrating", and "forking". - "storage_size_mib": 0, # Optional. Additional storage added to the - cluster, in MiB. If null, no additional storage is added to the cluster, - beyond what is provided as a base amount from the 'size' and any previously - added additional storage. - "tags": [ - "str" # Optional. An array of tags that have been applied to - the database cluster. :code:`
`:code:`
`Requires ``tag:read`` - scope. - ], - "ui_connection": { - "host": "str", # Optional. The FQDN pointing to the - opensearch cluster's current primary node. - "password": "str", # Optional. The randomly generated - password for the default user.:code:`
`:code:`
`Requires - ``database:view_credentials`` scope. - "port": 0, # Optional. The port on which the opensearch - dashboard is listening. - "ssl": bool, # Optional. A boolean value indicating if the - connection should be made over SSL. - "uri": "str", # Optional. This is provided as a convenience - and should be able to be constructed by the other attributes. - "user": "str" # Optional. The default user for the - opensearch dashboard.:code:`
`:code:`
`Requires - ``database:view_credentials`` scope. - }, - "users": [ - { - "name": "str", # The name of a database user. - Required. - "access_cert": "str", # Optional. Access certificate - for TLS client authentication. (Kafka only). - "access_key": "str", # Optional. Access key for TLS - client authentication. (Kafka only). - "mysql_settings": { - "auth_plugin": "str" # A string specifying - the authentication method to be used for connections to the MySQL - user account. The valid values are ``mysql_native_password`` or - ``caching_sha2_password``. If excluded when creating a new user, - the default for the version of MySQL in use will be used. As of - MySQL 8.0, the default is ``caching_sha2_password``. Required. - Known values are: "mysql_native_password" and - "caching_sha2_password". - }, - "password": "str", # Optional. A randomly generated - password for the database user.:code:`
`Requires - ``database:view_credentials`` scope. - "role": "str", # Optional. A string representing the - database user's role. The value will be either "primary" or "normal". - Known values are: "primary" and "normal". - "settings": { - "acl": [ - { - "permission": "str", # - Permission set applied to the ACL. 'consume' allows for - messages to be consumed from the topic. 'produce' allows - for messages to be published to the topic. - 'produceconsume' allows for both 'consume' and 'produce' - permission. 'admin' allows for 'produceconsume' as well - as any operations to administer the topic (delete, - update). Required. Known values are: "admin", "consume", - "produce", and "produceconsume". - "topic": "str", # A regex - for matching the topic(s) that this ACL should apply to. - Required. - "id": "str" # Optional. An - identifier for the ACL. Will be computed after the ACL is - created/updated. - } - ], - "mongo_user_settings": { - "databases": [ - "str" # Optional. A list of - databases to which the user should have access. When the - database is set to ``admin``"" , the user will have - access to all databases based on the user's role i.e. a - user with the role ``readOnly`` assigned to the ``admin`` - database will have read access to all databases. - ], - "role": "str" # Optional. The role - to assign to the user with each role mapping to a MongoDB - built-in role. ``readOnly`` maps to a `read - `_ - role. ``readWrite`` maps to a `readWrite - `_ - role. ``dbAdmin`` maps to a `dbAdmin - `_ - role. Known values are: "readOnly", "readWrite", and - "dbAdmin". - }, - "opensearch_acl": [ - { - "index": "str", # Optional. - A regex for matching the indexes that this ACL should - apply to. - "permission": "str" # - Optional. Permission set applied to the ACL. 'read' - allows user to read from the index. 'write' allows for - user to write to the index. 'readwrite' allows for both - 'read' and 'write' permission. 'deny'(default) restricts - user from performing any operation over an index. 'admin' - allows for 'readwrite' as well as any operations to - administer the index. Known values are: "deny", "admin", - "read", "readwrite", and "write". - } - ], - "pg_allow_replication": bool # Optional. For - Postgres clusters, set to ``true`` for a user with replication - rights. This option is not currently supported for other database - engines. - } - } - ], - "version": "str", # Optional. A string representing the version of - the database engine in use for the cluster. - "version_end_of_availability": "str", # Optional. A timestamp - referring to the date when the particular version will no longer be available - for creating new clusters. If null, the version does not have an end of - availability timeline. - "version_end_of_life": "str" # Optional. A timestamp referring to - the date when the particular version will no longer be supported. If null, - the version does not have an end of life timeline. - } + "id": "str", # A short identifier corresponding to the HTTP status code + returned. For example, the ID for a response returning a 404 status code would + be "not_found.". Required. + "message": "str", # A message providing additional information about the + error, including details to help resolve it when possible. Required. + "request_id": "str" # Optional. Optionally, some endpoints may include a + request ID that should be provided when reporting bugs or opening support + tickets to help identify the issue. + } + """ + + @distributed_trace + def update_cluster_size( + self, database_cluster_uuid: str, body: Union[JSON, IO[bytes]], **kwargs: Any + ) -> Optional[JSON]: + # pylint: disable=line-too-long + """Resize a Database Cluster. + + To resize a database cluster, send a PUT request to ``/v2/databases/$DATABASE_ID/resize``. The + body of the request must specify both the size and num_nodes attributes. + A successful request will receive a 202 Accepted status code with no body in response. Querying + the database cluster will show that its status attribute will now be set to resizing. This will + transition back to online when the resize operation has completed. + + :param database_cluster_uuid: A unique identifier for a database cluster. Required. + :type database_cluster_uuid: str + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :return: JSON object or None + :rtype: JSON or None + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "num_nodes": 0, # The number of nodes in the database cluster. Valid values + are are 1-3. In addition to the primary node, up to two standby nodes may be + added for highly available configurations. Required. + "size": "str", # A slug identifier representing desired the size of the + nodes in the database cluster. Required. + "storage_size_mib": 0 # Optional. Additional storage added to the cluster, + in MiB. If null, no additional storage is added to the cluster, beyond what is + provided as a base amount from the 'size' and any previously added additional + storage. + } + + # response body for status code(s): 404 + response == { + "id": "str", # A short identifier corresponding to the HTTP status code + returned. For example, the ID for a response returning a 404 status code would + be "not_found.". Required. + "message": "str", # A message providing additional information about the + error, including details to help resolve it when possible. Required. + "request_id": "str" # Optional. Optionally, some endpoints may include a + request ID that should be provided when reporting bugs or opening support + tickets to help identify the issue. + } + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + 401: cast( + Type[HttpResponseError], + lambda response: ClientAuthenticationError(response=response), + ), + 429: HttpResponseError, + 500: HttpResponseError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop( + "content_type", _headers.pop("Content-Type", None) + ) + cls: ClsType[Optional[JSON]] = kwargs.pop("cls", None) + + content_type = content_type or "application/json" + _json = None + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _json = body + + _request = build_databases_update_cluster_size_request( + database_cluster_uuid=database_cluster_uuid, + content_type=content_type, + json=_json, + content=_content, + headers=_headers, + params=_params, + ) + _request.url = self._client.format_url(_request.url) + + _stream = False + pipeline_response: PipelineResponse = ( + self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + ) + + response = pipeline_response.http_response + + if response.status_code not in [202, 404]: + if _stream: + response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore + raise HttpResponseError(response=response) + + deserialized = None + response_headers = {} + if response.status_code == 202: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.status_code == 404: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.content: + deserialized = response.json() + else: + deserialized = None + + if cls: + return cls(pipeline_response, deserialized, response_headers) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + def list_firewall_rules(self, database_cluster_uuid: str, **kwargs: Any) -> JSON: + # pylint: disable=line-too-long + """List Firewall Rules (Trusted Sources) for a Database Cluster. + + To list all of a database cluster's firewall rules (known as "trusted sources" in the control + panel), send a GET request to ``/v2/databases/$DATABASE_ID/firewall``. + The result will be a JSON object with a ``rules`` key. + + :param database_cluster_uuid: A unique identifier for a database cluster. Required. + :type database_cluster_uuid: str + :return: JSON object + :rtype: JSON + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 200 + response == { + "rules": [ + { + "type": "str", # The type of resource that the firewall rule + allows to access the database cluster. Required. Known values are: + "droplet", "k8s", "ip_addr", "tag", and "app". + "value": "str", # The ID of the specific resource, the name + of a tag applied to a group of resources, or the IP address that the + firewall rule allows to access the database cluster. Required. + "cluster_uuid": "str", # Optional. A unique ID for the + database cluster to which the rule is applied. + "created_at": "2020-02-20 00:00:00", # Optional. A time + value given in ISO8601 combined date and time format that represents when + the firewall rule was created. + "description": "str", # Optional. A human-readable + description of the rule. + "uuid": "str" # Optional. A unique ID for the firewall rule + itself. + } + ] } # response body for status code(s): 404 response == { @@ -119496,41 +128405,228 @@ def create_cluster( tickets to help identify the issue. } """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + 401: cast( + Type[HttpResponseError], + lambda response: ClientAuthenticationError(response=response), + ), + 429: HttpResponseError, + 500: HttpResponseError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) - @distributed_trace - def create_cluster(self, body: Union[JSON, IO[bytes]], **kwargs: Any) -> JSON: + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[JSON] = kwargs.pop("cls", None) + + _request = build_databases_list_firewall_rules_request( + database_cluster_uuid=database_cluster_uuid, + headers=_headers, + params=_params, + ) + _request.url = self._client.format_url(_request.url) + + _stream = False + pipeline_response: PipelineResponse = ( + self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + ) + + response = pipeline_response.http_response + + if response.status_code not in [200, 404]: + if _stream: + response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore + raise HttpResponseError(response=response) + + response_headers = {} + if response.status_code == 200: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.content: + deserialized = response.json() + else: + deserialized = None + + if response.status_code == 404: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.content: + deserialized = response.json() + else: + deserialized = None + + if cls: + return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore + + return cast(JSON, deserialized) # type: ignore + + @overload + def update_firewall_rules( + self, + database_cluster_uuid: str, + body: JSON, + *, + content_type: str = "application/json", + **kwargs: Any, + ) -> Optional[JSON]: # pylint: disable=line-too-long - """Create a New Database Cluster. + """Update Firewall Rules (Trusted Sources) for a Database. - To create a database cluster, send a POST request to ``/v2/databases``. To see a list of - options for each engine, such as available regions, size slugs, and versions, send a GET - request to the ``/v2/databases/options`` endpoint. The available sizes for the - ``storage_size_mib`` field depends on the cluster's size. To see a list of available sizes, see - `Managed Database Pricing `_. + To update a database cluster's firewall rules (known as "trusted sources" in the control + panel), send a PUT request to ``/v2/databases/$DATABASE_ID/firewall`` specifying which + resources should be able to open connections to the database. You may limit connections to + specific Droplets, Kubernetes clusters, or IP addresses. When a tag is provided, any Droplet or + Kubernetes node with that tag applied to it will have access. The firewall is limited to 100 + rules (or trusted sources). When possible, we recommend `placing your databases into a VPC + network `_ to limit access to them + instead of using a firewall. + A successful. - The create response returns a JSON object with a key called ``database``. The value of this is - an object that contains the standard attributes associated with a database cluster. The initial - value of the database cluster's ``status`` attribute is ``creating``. When the cluster is ready - to receive traffic, this changes to ``online``. + :param database_cluster_uuid: A unique identifier for a database cluster. Required. + :type database_cluster_uuid: str + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: JSON object or None + :rtype: JSON or None + :raises ~azure.core.exceptions.HttpResponseError: - The embedded ``connection`` and ``private_connection`` objects contains the information needed - to access the database cluster. For multi-node clusters, the ``standby_connection`` and - ``standby_private_connection`` objects contain the information needed to connect to the - cluster's standby node(s). + Example: + .. code-block:: python - DigitalOcean managed PostgreSQL and MySQL database clusters take automated daily backups. To - create a new database cluster based on a backup of an existing cluster, send a POST request to - ``/v2/databases``. In addition to the standard database cluster attributes, the JSON body must - include a key named ``backup_restore`` with the name of the original database cluster and the - timestamp of the backup to be restored. Creating a database from a backup is the same as - forking a database in the control panel. - Note: Caching cluster creates are no longer supported as of 2025-04-30T00:00:00Z. Backups are - also not supported for Caching or Valkey clusters. + # JSON input template you can fill out and use as your body input. + body = { + "rules": [ + { + "type": "str", # The type of resource that the firewall rule + allows to access the database cluster. Required. Known values are: + "droplet", "k8s", "ip_addr", "tag", and "app". + "value": "str", # The ID of the specific resource, the name + of a tag applied to a group of resources, or the IP address that the + firewall rule allows to access the database cluster. Required. + "cluster_uuid": "str", # Optional. A unique ID for the + database cluster to which the rule is applied. + "created_at": "2020-02-20 00:00:00", # Optional. A time + value given in ISO8601 combined date and time format that represents when + the firewall rule was created. + "description": "str", # Optional. A human-readable + description of the rule. + "uuid": "str" # Optional. A unique ID for the firewall rule + itself. + } + ] + } + + # response body for status code(s): 404 + response == { + "id": "str", # A short identifier corresponding to the HTTP status code + returned. For example, the ID for a response returning a 404 status code would + be "not_found.". Required. + "message": "str", # A message providing additional information about the + error, including details to help resolve it when possible. Required. + "request_id": "str" # Optional. Optionally, some endpoints may include a + request ID that should be provided when reporting bugs or opening support + tickets to help identify the issue. + } + """ + + @overload + def update_firewall_rules( + self, + database_cluster_uuid: str, + body: IO[bytes], + *, + content_type: str = "application/json", + **kwargs: Any, + ) -> Optional[JSON]: + # pylint: disable=line-too-long + """Update Firewall Rules (Trusted Sources) for a Database. + + To update a database cluster's firewall rules (known as "trusted sources" in the control + panel), send a PUT request to ``/v2/databases/$DATABASE_ID/firewall`` specifying which + resources should be able to open connections to the database. You may limit connections to + specific Droplets, Kubernetes clusters, or IP addresses. When a tag is provided, any Droplet or + Kubernetes node with that tag applied to it will have access. The firewall is limited to 100 + rules (or trusted sources). When possible, we recommend `placing your databases into a VPC + network `_ to limit access to them + instead of using a firewall. + A successful. + + :param database_cluster_uuid: A unique identifier for a database cluster. Required. + :type database_cluster_uuid: str + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: JSON object or None + :rtype: JSON or None + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 404 + response == { + "id": "str", # A short identifier corresponding to the HTTP status code + returned. For example, the ID for a response returning a 404 status code would + be "not_found.". Required. + "message": "str", # A message providing additional information about the + error, including details to help resolve it when possible. Required. + "request_id": "str" # Optional. Optionally, some endpoints may include a + request ID that should be provided when reporting bugs or opening support + tickets to help identify the issue. + } + """ + + @distributed_trace + def update_firewall_rules( + self, database_cluster_uuid: str, body: Union[JSON, IO[bytes]], **kwargs: Any + ) -> Optional[JSON]: + # pylint: disable=line-too-long + """Update Firewall Rules (Trusted Sources) for a Database. + + To update a database cluster's firewall rules (known as "trusted sources" in the control + panel), send a PUT request to ``/v2/databases/$DATABASE_ID/firewall`` specifying which + resources should be able to open connections to the database. You may limit connections to + specific Droplets, Kubernetes clusters, or IP addresses. When a tag is provided, any Droplet or + Kubernetes node with that tag applied to it will have access. The firewall is limited to 100 + rules (or trusted sources). When possible, we recommend `placing your databases into a VPC + network `_ to limit access to them + instead of using a firewall. + A successful. + :param database_cluster_uuid: A unique identifier for a database cluster. Required. + :type database_cluster_uuid: str :param body: Is either a JSON type or a IO[bytes] type. Required. :type body: JSON or IO[bytes] - :return: JSON object - :rtype: JSON + :return: JSON object or None + :rtype: JSON or None :raises ~azure.core.exceptions.HttpResponseError: Example: @@ -119538,117 +128634,6 @@ def create_cluster(self, body: Union[JSON, IO[bytes]], **kwargs: Any) -> JSON: # JSON input template you can fill out and use as your body input. body = { - "engine": "str", # A slug representing the database engine used for the - cluster. The possible values are: "pg" for PostgreSQL, "mysql" for MySQL, "redis" - for Caching, "mongodb" for MongoDB, "kafka" for Kafka, "opensearch" for - OpenSearch, and "valkey" for Valkey. Required. Known values are: "pg", "mysql", - "redis", "valkey", "mongodb", "kafka", and "opensearch". - "name": "str", # A unique, human-readable name referring to a database - cluster. Required. - "num_nodes": 0, # The number of nodes in the database cluster. Required. - "region": "str", # The slug identifier for the region where the database - cluster is located. Required. - "size": "str", # The slug identifier representing the size of the nodes in - the database cluster. Required. - "autoscale": { - "storage": { - "enabled": bool, # Whether storage autoscaling is enabled - for the cluster. Required. - "increment_gib": 0, # Optional. The amount of additional - storage to add (in GiB) when autoscaling is triggered. - "threshold_percent": 0 # Optional. The storage usage - threshold percentage that triggers autoscaling. When storage usage - exceeds this percentage, additional storage will be added automatically. - } - }, - "backup_restore": { - "database_name": "str", # The name of an existing database cluster - from which the backup will be restored. Required. - "backup_created_at": "2020-02-20 00:00:00" # Optional. The timestamp - of an existing database cluster backup in ISO8601 combined date and time - format. The most recent backup will be used if excluded. - }, - "connection": { - "database": "str", # Optional. The name of the default database. - "host": "str", # Optional. The FQDN pointing to the database - cluster's current primary node. - "password": "str", # Optional. The randomly generated password for - the default user.:code:`
`:code:`
`Requires - ``database:view_credentials`` scope. - "port": 0, # Optional. The port on which the database cluster is - listening. - "ssl": bool, # Optional. A boolean value indicating if the - connection should be made over SSL. - "uri": "str", # Optional. A connection string in the format accepted - by the ``psql`` command. This is provided as a convenience and should be able - to be constructed by the other attributes. - "user": "str" # Optional. The default user for the - database.:code:`
`:code:`
`Requires ``database:view_credentials`` - scope. - }, - "created_at": "2020-02-20 00:00:00", # Optional. A time value given in - ISO8601 combined date and time format that represents when the database cluster - was created. - "db_names": [ - "str" # Optional. An array of strings containing the names of - databases created in the database cluster. - ], - "do_settings": { - "service_cnames": [ - "str" # Optional. An array of custom CNAMEs for the database - cluster. Each CNAME must be a valid RFC 1123 hostname (e.g., - "db.example.com"). Maximum of 16 CNAMEs allowed, each up to 253 - characters. - ] - }, - "id": "str", # Optional. A unique ID that can be used to identify and - reference a database cluster. - "maintenance_window": { - "day": "str", # The day of the week on which to apply maintenance - updates. Required. - "hour": "str", # The hour in UTC at which maintenance updates will - be applied in 24 hour format. Required. - "description": [ - "str" # Optional. A list of strings, each containing - information about a pending maintenance update. - ], - "pending": bool # Optional. A boolean value indicating whether any - maintenance is scheduled to be performed in the next window. - }, - "metrics_endpoints": [ - { - "host": "str", # Optional. A FQDN pointing to the database - cluster's node(s). - "port": 0 # Optional. The port on which a service is - listening. - } - ], - "private_connection": { - "database": "str", # Optional. The name of the default database. - "host": "str", # Optional. The FQDN pointing to the database - cluster's current primary node. - "password": "str", # Optional. The randomly generated password for - the default user.:code:`
`:code:`
`Requires - ``database:view_credentials`` scope. - "port": 0, # Optional. The port on which the database cluster is - listening. - "ssl": bool, # Optional. A boolean value indicating if the - connection should be made over SSL. - "uri": "str", # Optional. A connection string in the format accepted - by the ``psql`` command. This is provided as a convenience and should be able - to be constructed by the other attributes. - "user": "str" # Optional. The default user for the - database.:code:`
`:code:`
`Requires ``database:view_credentials`` - scope. - }, - "private_network_uuid": "str", # Optional. A string specifying the UUID of - the VPC to which the database cluster will be assigned. If excluded, the cluster - when creating a new database cluster, it will be assigned to your account's - default VPC for the region. :code:`
`:code:`
`Requires ``vpc:read`` scope. - "project_id": "str", # Optional. The ID of the project that the database - cluster is assigned to. If excluded when creating a new database cluster, it will - be assigned to your default project.:code:`
`:code:`
`Requires - ``project:update`` scope. "rules": [ { "type": "str", # The type of resource that the firewall rule @@ -119667,482 +128652,245 @@ def create_cluster(self, body: Union[JSON, IO[bytes]], **kwargs: Any) -> JSON: "uuid": "str" # Optional. A unique ID for the firewall rule itself. } - ], - "schema_registry_connection": { - "host": "str", # Optional. The FQDN pointing to the schema registry - connection uri. - "password": "str", # Optional. The randomly generated password for - the schema registry.:code:`
`:code:`
`Requires - ``database:view_credentials`` scope. - "port": 0, # Optional. The port on which the schema registry is - listening. - "ssl": bool, # Optional. A boolean value indicating if the - connection should be made over SSL. - "uri": "str", # Optional. This is provided as a convenience and - should be able to be constructed by the other attributes. - "user": "str" # Optional. The default user for the schema - registry.:code:`
`:code:`
`Requires ``database:view_credentials`` - scope. - }, - "semantic_version": "str", # Optional. A string representing the semantic - version of the database engine in use for the cluster. - "standby_connection": { - "database": "str", # Optional. The name of the default database. - "host": "str", # Optional. The FQDN pointing to the database - cluster's current primary node. - "password": "str", # Optional. The randomly generated password for - the default user.:code:`
`:code:`
`Requires - ``database:view_credentials`` scope. - "port": 0, # Optional. The port on which the database cluster is - listening. - "ssl": bool, # Optional. A boolean value indicating if the - connection should be made over SSL. - "uri": "str", # Optional. A connection string in the format accepted - by the ``psql`` command. This is provided as a convenience and should be able - to be constructed by the other attributes. - "user": "str" # Optional. The default user for the - database.:code:`
`:code:`
`Requires ``database:view_credentials`` - scope. - }, - "standby_private_connection": { - "database": "str", # Optional. The name of the default database. - "host": "str", # Optional. The FQDN pointing to the database - cluster's current primary node. - "password": "str", # Optional. The randomly generated password for - the default user.:code:`
`:code:`
`Requires - ``database:view_credentials`` scope. - "port": 0, # Optional. The port on which the database cluster is - listening. - "ssl": bool, # Optional. A boolean value indicating if the - connection should be made over SSL. - "uri": "str", # Optional. A connection string in the format accepted - by the ``psql`` command. This is provided as a convenience and should be able - to be constructed by the other attributes. - "user": "str" # Optional. The default user for the - database.:code:`
`:code:`
`Requires ``database:view_credentials`` - scope. - }, - "status": "str", # Optional. A string representing the current status of the - database cluster. Known values are: "creating", "online", "resizing", - "migrating", and "forking". - "storage_size_mib": 0, # Optional. Additional storage added to the cluster, - in MiB. If null, no additional storage is added to the cluster, beyond what is - provided as a base amount from the 'size' and any previously added additional - storage. - "tags": [ - "str" # Optional. An array of tags (as strings) to apply to the - database cluster. :code:`
`:code:`
`Requires ``tag:create`` scope. - ], - "ui_connection": { - "host": "str", # Optional. The FQDN pointing to the opensearch - cluster's current primary node. - "password": "str", # Optional. The randomly generated password for - the default user.:code:`
`:code:`
`Requires - ``database:view_credentials`` scope. - "port": 0, # Optional. The port on which the opensearch dashboard is - listening. - "ssl": bool, # Optional. A boolean value indicating if the - connection should be made over SSL. - "uri": "str", # Optional. This is provided as a convenience and - should be able to be constructed by the other attributes. - "user": "str" # Optional. The default user for the opensearch - dashboard.:code:`
`:code:`
`Requires ``database:view_credentials`` - scope. - }, - "users": [ - { - "name": "str", # The name of a database user. Required. - "access_cert": "str", # Optional. Access certificate for TLS - client authentication. (Kafka only). - "access_key": "str", # Optional. Access key for TLS client - authentication. (Kafka only). - "mysql_settings": { - "auth_plugin": "str" # A string specifying the - authentication method to be used for connections to the MySQL user - account. The valid values are ``mysql_native_password`` or - ``caching_sha2_password``. If excluded when creating a new user, the - default for the version of MySQL in use will be used. As of MySQL - 8.0, the default is ``caching_sha2_password``. Required. Known values - are: "mysql_native_password" and "caching_sha2_password". - }, - "password": "str", # Optional. A randomly generated password - for the database user.:code:`
`Requires ``database:view_credentials`` - scope. - "role": "str", # Optional. A string representing the - database user's role. The value will be either "primary" or "normal". - Known values are: "primary" and "normal". - "settings": { - "acl": [ - { - "permission": "str", # Permission - set applied to the ACL. 'consume' allows for messages to be - consumed from the topic. 'produce' allows for messages to be - published to the topic. 'produceconsume' allows for both - 'consume' and 'produce' permission. 'admin' allows for - 'produceconsume' as well as any operations to administer the - topic (delete, update). Required. Known values are: "admin", - "consume", "produce", and "produceconsume". - "topic": "str", # A regex for - matching the topic(s) that this ACL should apply to. - Required. - "id": "str" # Optional. An - identifier for the ACL. Will be computed after the ACL is - created/updated. - } - ], - "mongo_user_settings": { - "databases": [ - "str" # Optional. A list of - databases to which the user should have access. When the - database is set to ``admin``"" , the user will have access to - all databases based on the user's role i.e. a user with the - role ``readOnly`` assigned to the ``admin`` database will - have read access to all databases. - ], - "role": "str" # Optional. The role to assign - to the user with each role mapping to a MongoDB built-in role. - ``readOnly`` maps to a `read - `_ - role. ``readWrite`` maps to a `readWrite - `_ - role. ``dbAdmin`` maps to a `dbAdmin - `_ - role. Known values are: "readOnly", "readWrite", and "dbAdmin". - }, - "opensearch_acl": [ - { - "index": "str", # Optional. A regex - for matching the indexes that this ACL should apply to. - "permission": "str" # Optional. - Permission set applied to the ACL. 'read' allows user to read - from the index. 'write' allows for user to write to the - index. 'readwrite' allows for both 'read' and 'write' - permission. 'deny'(default) restricts user from performing - any operation over an index. 'admin' allows for 'readwrite' - as well as any operations to administer the index. Known - values are: "deny", "admin", "read", "readwrite", and - "write". - } - ], - "pg_allow_replication": bool # Optional. For - Postgres clusters, set to ``true`` for a user with replication - rights. This option is not currently supported for other database - engines. - } - } - ], - "version": "str", # Optional. A string representing the version of the - database engine in use for the cluster. - "version_end_of_availability": "str", # Optional. A timestamp referring to - the date when the particular version will no longer be available for creating new - clusters. If null, the version does not have an end of availability timeline. - "version_end_of_life": "str" # Optional. A timestamp referring to the date - when the particular version will no longer be supported. If null, the version - does not have an end of life timeline. + ] } - # response body for status code(s): 201 + # response body for status code(s): 404 response == { - "database": { - "engine": "str", # A slug representing the database engine used for - the cluster. The possible values are: "pg" for PostgreSQL, "mysql" for MySQL, - "redis" for Caching, "mongodb" for MongoDB, "kafka" for Kafka, "opensearch" - for OpenSearch, and "valkey" for Valkey. Required. Known values are: "pg", - "mysql", "redis", "valkey", "mongodb", "kafka", and "opensearch". - "name": "str", # A unique, human-readable name referring to a - database cluster. Required. - "num_nodes": 0, # The number of nodes in the database cluster. - Required. - "region": "str", # The slug identifier for the region where the - database cluster is located. Required. - "size": "str", # The slug identifier representing the size of the - nodes in the database cluster. Required. - "connection": { - "database": "str", # Optional. The name of the default - database. - "host": "str", # Optional. The FQDN pointing to the database - cluster's current primary node. - "password": "str", # Optional. The randomly generated - password for the default user.:code:`
`:code:`
`Requires - ``database:view_credentials`` scope. - "port": 0, # Optional. The port on which the database - cluster is listening. - "ssl": bool, # Optional. A boolean value indicating if the - connection should be made over SSL. - "uri": "str", # Optional. A connection string in the format - accepted by the ``psql`` command. This is provided as a convenience and - should be able to be constructed by the other attributes. - "user": "str" # Optional. The default user for the - database.:code:`
`:code:`
`Requires ``database:view_credentials`` - scope. - }, - "created_at": "2020-02-20 00:00:00", # Optional. A time value given - in ISO8601 combined date and time format that represents when the database - cluster was created. - "db_names": [ - "str" # Optional. An array of strings containing the names - of databases created in the database cluster. - ], - "do_settings": { - "service_cnames": [ - "str" # Optional. An array of custom CNAMEs for the - database cluster. Each CNAME must be a valid RFC 1123 hostname (e.g., - "db.example.com"). Maximum of 16 CNAMEs allowed, each up to 253 - characters. - ] - }, - "id": "str", # Optional. A unique ID that can be used to identify - and reference a database cluster. - "maintenance_window": { - "day": "str", # The day of the week on which to apply - maintenance updates. Required. - "hour": "str", # The hour in UTC at which maintenance - updates will be applied in 24 hour format. Required. - "description": [ - "str" # Optional. A list of strings, each containing - information about a pending maintenance update. - ], - "pending": bool # Optional. A boolean value indicating - whether any maintenance is scheduled to be performed in the next window. - }, - "metrics_endpoints": [ - { - "host": "str", # Optional. A FQDN pointing to the - database cluster's node(s). - "port": 0 # Optional. The port on which a service is - listening. - } - ], - "private_connection": { - "database": "str", # Optional. The name of the default - database. - "host": "str", # Optional. The FQDN pointing to the database - cluster's current primary node. - "password": "str", # Optional. The randomly generated - password for the default user.:code:`
`:code:`
`Requires - ``database:view_credentials`` scope. - "port": 0, # Optional. The port on which the database - cluster is listening. - "ssl": bool, # Optional. A boolean value indicating if the - connection should be made over SSL. - "uri": "str", # Optional. A connection string in the format - accepted by the ``psql`` command. This is provided as a convenience and - should be able to be constructed by the other attributes. - "user": "str" # Optional. The default user for the - database.:code:`
`:code:`
`Requires ``database:view_credentials`` - scope. - }, - "private_network_uuid": "str", # Optional. A string specifying the - UUID of the VPC to which the database cluster will be assigned. If excluded, - the cluster when creating a new database cluster, it will be assigned to your - account's default VPC for the region. :code:`
`:code:`
`Requires - ``vpc:read`` scope. - "project_id": "str", # Optional. The ID of the project that the - database cluster is assigned to. If excluded when creating a new database - cluster, it will be assigned to your default - project.:code:`
`:code:`
`Requires ``project:read`` scope. - "rules": [ - { - "type": "str", # The type of resource that the - firewall rule allows to access the database cluster. Required. Known - values are: "droplet", "k8s", "ip_addr", "tag", and "app". - "value": "str", # The ID of the specific resource, - the name of a tag applied to a group of resources, or the IP address - that the firewall rule allows to access the database cluster. - Required. - "cluster_uuid": "str", # Optional. A unique ID for - the database cluster to which the rule is applied. - "created_at": "2020-02-20 00:00:00", # Optional. A - time value given in ISO8601 combined date and time format that - represents when the firewall rule was created. - "description": "str", # Optional. A human-readable - description of the rule. - "uuid": "str" # Optional. A unique ID for the - firewall rule itself. - } - ], - "schema_registry_connection": { - "host": "str", # Optional. The FQDN pointing to the schema - registry connection uri. - "password": "str", # Optional. The randomly generated - password for the schema registry.:code:`
`:code:`
`Requires - ``database:view_credentials`` scope. - "port": 0, # Optional. The port on which the schema registry - is listening. - "ssl": bool, # Optional. A boolean value indicating if the - connection should be made over SSL. - "uri": "str", # Optional. This is provided as a convenience - and should be able to be constructed by the other attributes. - "user": "str" # Optional. The default user for the schema - registry.:code:`
`:code:`
`Requires ``database:view_credentials`` - scope. - }, - "semantic_version": "str", # Optional. A string representing the - semantic version of the database engine in use for the cluster. - "standby_connection": { - "database": "str", # Optional. The name of the default - database. - "host": "str", # Optional. The FQDN pointing to the database - cluster's current primary node. - "password": "str", # Optional. The randomly generated - password for the default user.:code:`
`:code:`
`Requires - ``database:view_credentials`` scope. - "port": 0, # Optional. The port on which the database - cluster is listening. - "ssl": bool, # Optional. A boolean value indicating if the - connection should be made over SSL. - "uri": "str", # Optional. A connection string in the format - accepted by the ``psql`` command. This is provided as a convenience and - should be able to be constructed by the other attributes. - "user": "str" # Optional. The default user for the - database.:code:`
`:code:`
`Requires ``database:view_credentials`` - scope. - }, - "standby_private_connection": { - "database": "str", # Optional. The name of the default - database. - "host": "str", # Optional. The FQDN pointing to the database - cluster's current primary node. - "password": "str", # Optional. The randomly generated - password for the default user.:code:`
`:code:`
`Requires - ``database:view_credentials`` scope. - "port": 0, # Optional. The port on which the database - cluster is listening. - "ssl": bool, # Optional. A boolean value indicating if the - connection should be made over SSL. - "uri": "str", # Optional. A connection string in the format - accepted by the ``psql`` command. This is provided as a convenience and - should be able to be constructed by the other attributes. - "user": "str" # Optional. The default user for the - database.:code:`
`:code:`
`Requires ``database:view_credentials`` - scope. - }, - "status": "str", # Optional. A string representing the current - status of the database cluster. Known values are: "creating", "online", - "resizing", "migrating", and "forking". - "storage_size_mib": 0, # Optional. Additional storage added to the - cluster, in MiB. If null, no additional storage is added to the cluster, - beyond what is provided as a base amount from the 'size' and any previously - added additional storage. - "tags": [ - "str" # Optional. An array of tags that have been applied to - the database cluster. :code:`
`:code:`
`Requires ``tag:read`` - scope. - ], - "ui_connection": { - "host": "str", # Optional. The FQDN pointing to the - opensearch cluster's current primary node. - "password": "str", # Optional. The randomly generated - password for the default user.:code:`
`:code:`
`Requires - ``database:view_credentials`` scope. - "port": 0, # Optional. The port on which the opensearch - dashboard is listening. - "ssl": bool, # Optional. A boolean value indicating if the - connection should be made over SSL. - "uri": "str", # Optional. This is provided as a convenience - and should be able to be constructed by the other attributes. - "user": "str" # Optional. The default user for the - opensearch dashboard.:code:`
`:code:`
`Requires - ``database:view_credentials`` scope. - }, - "users": [ - { - "name": "str", # The name of a database user. - Required. - "access_cert": "str", # Optional. Access certificate - for TLS client authentication. (Kafka only). - "access_key": "str", # Optional. Access key for TLS - client authentication. (Kafka only). - "mysql_settings": { - "auth_plugin": "str" # A string specifying - the authentication method to be used for connections to the MySQL - user account. The valid values are ``mysql_native_password`` or - ``caching_sha2_password``. If excluded when creating a new user, - the default for the version of MySQL in use will be used. As of - MySQL 8.0, the default is ``caching_sha2_password``. Required. - Known values are: "mysql_native_password" and - "caching_sha2_password". - }, - "password": "str", # Optional. A randomly generated - password for the database user.:code:`
`Requires - ``database:view_credentials`` scope. - "role": "str", # Optional. A string representing the - database user's role. The value will be either "primary" or "normal". - Known values are: "primary" and "normal". - "settings": { - "acl": [ - { - "permission": "str", # - Permission set applied to the ACL. 'consume' allows for - messages to be consumed from the topic. 'produce' allows - for messages to be published to the topic. - 'produceconsume' allows for both 'consume' and 'produce' - permission. 'admin' allows for 'produceconsume' as well - as any operations to administer the topic (delete, - update). Required. Known values are: "admin", "consume", - "produce", and "produceconsume". - "topic": "str", # A regex - for matching the topic(s) that this ACL should apply to. - Required. - "id": "str" # Optional. An - identifier for the ACL. Will be computed after the ACL is - created/updated. - } - ], - "mongo_user_settings": { - "databases": [ - "str" # Optional. A list of - databases to which the user should have access. When the - database is set to ``admin``"" , the user will have - access to all databases based on the user's role i.e. a - user with the role ``readOnly`` assigned to the ``admin`` - database will have read access to all databases. - ], - "role": "str" # Optional. The role - to assign to the user with each role mapping to a MongoDB - built-in role. ``readOnly`` maps to a `read - `_ - role. ``readWrite`` maps to a `readWrite - `_ - role. ``dbAdmin`` maps to a `dbAdmin - `_ - role. Known values are: "readOnly", "readWrite", and - "dbAdmin". - }, - "opensearch_acl": [ - { - "index": "str", # Optional. - A regex for matching the indexes that this ACL should - apply to. - "permission": "str" # - Optional. Permission set applied to the ACL. 'read' - allows user to read from the index. 'write' allows for - user to write to the index. 'readwrite' allows for both - 'read' and 'write' permission. 'deny'(default) restricts - user from performing any operation over an index. 'admin' - allows for 'readwrite' as well as any operations to - administer the index. Known values are: "deny", "admin", - "read", "readwrite", and "write". - } - ], - "pg_allow_replication": bool # Optional. For - Postgres clusters, set to ``true`` for a user with replication - rights. This option is not currently supported for other database - engines. - } - } - ], - "version": "str", # Optional. A string representing the version of - the database engine in use for the cluster. - "version_end_of_availability": "str", # Optional. A timestamp - referring to the date when the particular version will no longer be available - for creating new clusters. If null, the version does not have an end of - availability timeline. - "version_end_of_life": "str" # Optional. A timestamp referring to - the date when the particular version will no longer be supported. If null, - the version does not have an end of life timeline. - } + "id": "str", # A short identifier corresponding to the HTTP status code + returned. For example, the ID for a response returning a 404 status code would + be "not_found.". Required. + "message": "str", # A message providing additional information about the + error, including details to help resolve it when possible. Required. + "request_id": "str" # Optional. Optionally, some endpoints may include a + request ID that should be provided when reporting bugs or opening support + tickets to help identify the issue. + } + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + 401: cast( + Type[HttpResponseError], + lambda response: ClientAuthenticationError(response=response), + ), + 429: HttpResponseError, + 500: HttpResponseError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop( + "content_type", _headers.pop("Content-Type", None) + ) + cls: ClsType[Optional[JSON]] = kwargs.pop("cls", None) + + content_type = content_type or "application/json" + _json = None + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _json = body + + _request = build_databases_update_firewall_rules_request( + database_cluster_uuid=database_cluster_uuid, + content_type=content_type, + json=_json, + content=_content, + headers=_headers, + params=_params, + ) + _request.url = self._client.format_url(_request.url) + + _stream = False + pipeline_response: PipelineResponse = ( + self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + ) + + response = pipeline_response.http_response + + if response.status_code not in [204, 404]: + if _stream: + response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore + raise HttpResponseError(response=response) + + deserialized = None + response_headers = {} + if response.status_code == 204: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.status_code == 404: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.content: + deserialized = response.json() + else: + deserialized = None + + if cls: + return cls(pipeline_response, deserialized, response_headers) # type: ignore + + return deserialized # type: ignore + + @overload + def update_maintenance_window( + self, + database_cluster_uuid: str, + body: JSON, + *, + content_type: str = "application/json", + **kwargs: Any, + ) -> Optional[JSON]: + # pylint: disable=line-too-long + """Configure a Database Cluster's Maintenance Window. + + To configure the window when automatic maintenance should be performed for a database cluster, + send a PUT request to ``/v2/databases/$DATABASE_ID/maintenance``. + A successful request will receive a 204 No Content status code with no body in response. + + :param database_cluster_uuid: A unique identifier for a database cluster. Required. + :type database_cluster_uuid: str + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: JSON object or None + :rtype: JSON or None + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "day": "str", # The day of the week on which to apply maintenance updates. + Required. + "hour": "str", # The hour in UTC at which maintenance updates will be + applied in 24 hour format. Required. + "description": [ + "str" # Optional. A list of strings, each containing information + about a pending maintenance update. + ], + "pending": bool # Optional. A boolean value indicating whether any + maintenance is scheduled to be performed in the next window. + } + + # response body for status code(s): 404 + response == { + "id": "str", # A short identifier corresponding to the HTTP status code + returned. For example, the ID for a response returning a 404 status code would + be "not_found.". Required. + "message": "str", # A message providing additional information about the + error, including details to help resolve it when possible. Required. + "request_id": "str" # Optional. Optionally, some endpoints may include a + request ID that should be provided when reporting bugs or opening support + tickets to help identify the issue. + } + """ + + @overload + def update_maintenance_window( + self, + database_cluster_uuid: str, + body: IO[bytes], + *, + content_type: str = "application/json", + **kwargs: Any, + ) -> Optional[JSON]: + # pylint: disable=line-too-long + """Configure a Database Cluster's Maintenance Window. + + To configure the window when automatic maintenance should be performed for a database cluster, + send a PUT request to ``/v2/databases/$DATABASE_ID/maintenance``. + A successful request will receive a 204 No Content status code with no body in response. + + :param database_cluster_uuid: A unique identifier for a database cluster. Required. + :type database_cluster_uuid: str + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: JSON object or None + :rtype: JSON or None + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 404 + response == { + "id": "str", # A short identifier corresponding to the HTTP status code + returned. For example, the ID for a response returning a 404 status code would + be "not_found.". Required. + "message": "str", # A message providing additional information about the + error, including details to help resolve it when possible. Required. + "request_id": "str" # Optional. Optionally, some endpoints may include a + request ID that should be provided when reporting bugs or opening support + tickets to help identify the issue. + } + """ + + @distributed_trace + def update_maintenance_window( + self, database_cluster_uuid: str, body: Union[JSON, IO[bytes]], **kwargs: Any + ) -> Optional[JSON]: + # pylint: disable=line-too-long + """Configure a Database Cluster's Maintenance Window. + + To configure the window when automatic maintenance should be performed for a database cluster, + send a PUT request to ``/v2/databases/$DATABASE_ID/maintenance``. + A successful request will receive a 204 No Content status code with no body in response. + + :param database_cluster_uuid: A unique identifier for a database cluster. Required. + :type database_cluster_uuid: str + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :return: JSON object or None + :rtype: JSON or None + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "day": "str", # The day of the week on which to apply maintenance updates. + Required. + "hour": "str", # The hour in UTC at which maintenance updates will be + applied in 24 hour format. Required. + "description": [ + "str" # Optional. A list of strings, each containing information + about a pending maintenance update. + ], + "pending": bool # Optional. A boolean value indicating whether any + maintenance is scheduled to be performed in the next window. } + # response body for status code(s): 404 response == { "id": "str", # A short identifier corresponding to the HTTP status code @@ -120174,7 +128922,7 @@ def create_cluster(self, body: Union[JSON, IO[bytes]], **kwargs: Any) -> JSON: content_type: Optional[str] = kwargs.pop( "content_type", _headers.pop("Content-Type", None) ) - cls: ClsType[JSON] = kwargs.pop("cls", None) + cls: ClsType[Optional[JSON]] = kwargs.pop("cls", None) content_type = content_type or "application/json" _json = None @@ -120184,7 +128932,8 @@ def create_cluster(self, body: Union[JSON, IO[bytes]], **kwargs: Any) -> JSON: else: _json = body - _request = build_databases_create_cluster_request( + _request = build_databases_update_maintenance_window_request( + database_cluster_uuid=database_cluster_uuid, content_type=content_type, json=_json, content=_content, @@ -120202,14 +128951,15 @@ def create_cluster(self, body: Union[JSON, IO[bytes]], **kwargs: Any) -> JSON: response = pipeline_response.http_response - if response.status_code not in [201, 404]: + if response.status_code not in [204, 404]: if _stream: response.read() # Load the body in memory and close the socket map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore raise HttpResponseError(response=response) + deserialized = None response_headers = {} - if response.status_code == 201: + if response.status_code == 204: response_headers["ratelimit-limit"] = self._deserialize( "int", response.headers.get("ratelimit-limit") ) @@ -120220,11 +128970,6 @@ def create_cluster(self, body: Union[JSON, IO[bytes]], **kwargs: Any) -> JSON: "int", response.headers.get("ratelimit-reset") ) - if response.content: - deserialized = response.json() - else: - deserialized = None - if response.status_code == 404: response_headers["ratelimit-limit"] = self._deserialize( "int", response.headers.get("ratelimit-limit") @@ -120242,340 +128987,30 @@ def create_cluster(self, body: Union[JSON, IO[bytes]], **kwargs: Any) -> JSON: deserialized = None if cls: - return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore + return cls(pipeline_response, deserialized, response_headers) # type: ignore - return cast(JSON, deserialized) # type: ignore + return deserialized # type: ignore @distributed_trace - def get_cluster(self, database_cluster_uuid: str, **kwargs: Any) -> JSON: + def install_update( + self, database_cluster_uuid: str, **kwargs: Any + ) -> Optional[JSON]: # pylint: disable=line-too-long - """Retrieve an Existing Database Cluster. - - To show information about an existing database cluster, send a GET request to - ``/v2/databases/$DATABASE_ID``. - - The response will be a JSON object with a database key. This will be set to an object - containing the standard database cluster attributes. - - The embedded ``connection`` and ``private_connection`` objects will contain the information - needed to access the database cluster. For multi-node clusters, the ``standby_connection`` and - ``standby_private_connection`` objects contain the information needed to connect to the - cluster's standby node(s). + """Start Database Maintenance. - The embedded maintenance_window object will contain information about any scheduled maintenance - for the database cluster. + To start the installation of updates for a database cluster, send a PUT request to + ``/v2/databases/$DATABASE_ID/install_update``. + A successful request will receive a 204 No Content status code with no body in response. :param database_cluster_uuid: A unique identifier for a database cluster. Required. :type database_cluster_uuid: str - :return: JSON object - :rtype: JSON + :return: JSON object or None + :rtype: JSON or None :raises ~azure.core.exceptions.HttpResponseError: Example: .. code-block:: python - # response body for status code(s): 200 - response == { - "database": { - "engine": "str", # A slug representing the database engine used for - the cluster. The possible values are: "pg" for PostgreSQL, "mysql" for MySQL, - "redis" for Caching, "mongodb" for MongoDB, "kafka" for Kafka, "opensearch" - for OpenSearch, and "valkey" for Valkey. Required. Known values are: "pg", - "mysql", "redis", "valkey", "mongodb", "kafka", and "opensearch". - "name": "str", # A unique, human-readable name referring to a - database cluster. Required. - "num_nodes": 0, # The number of nodes in the database cluster. - Required. - "region": "str", # The slug identifier for the region where the - database cluster is located. Required. - "size": "str", # The slug identifier representing the size of the - nodes in the database cluster. Required. - "connection": { - "database": "str", # Optional. The name of the default - database. - "host": "str", # Optional. The FQDN pointing to the database - cluster's current primary node. - "password": "str", # Optional. The randomly generated - password for the default user.:code:`
`:code:`
`Requires - ``database:view_credentials`` scope. - "port": 0, # Optional. The port on which the database - cluster is listening. - "ssl": bool, # Optional. A boolean value indicating if the - connection should be made over SSL. - "uri": "str", # Optional. A connection string in the format - accepted by the ``psql`` command. This is provided as a convenience and - should be able to be constructed by the other attributes. - "user": "str" # Optional. The default user for the - database.:code:`
`:code:`
`Requires ``database:view_credentials`` - scope. - }, - "created_at": "2020-02-20 00:00:00", # Optional. A time value given - in ISO8601 combined date and time format that represents when the database - cluster was created. - "db_names": [ - "str" # Optional. An array of strings containing the names - of databases created in the database cluster. - ], - "do_settings": { - "service_cnames": [ - "str" # Optional. An array of custom CNAMEs for the - database cluster. Each CNAME must be a valid RFC 1123 hostname (e.g., - "db.example.com"). Maximum of 16 CNAMEs allowed, each up to 253 - characters. - ] - }, - "id": "str", # Optional. A unique ID that can be used to identify - and reference a database cluster. - "maintenance_window": { - "day": "str", # The day of the week on which to apply - maintenance updates. Required. - "hour": "str", # The hour in UTC at which maintenance - updates will be applied in 24 hour format. Required. - "description": [ - "str" # Optional. A list of strings, each containing - information about a pending maintenance update. - ], - "pending": bool # Optional. A boolean value indicating - whether any maintenance is scheduled to be performed in the next window. - }, - "metrics_endpoints": [ - { - "host": "str", # Optional. A FQDN pointing to the - database cluster's node(s). - "port": 0 # Optional. The port on which a service is - listening. - } - ], - "private_connection": { - "database": "str", # Optional. The name of the default - database. - "host": "str", # Optional. The FQDN pointing to the database - cluster's current primary node. - "password": "str", # Optional. The randomly generated - password for the default user.:code:`
`:code:`
`Requires - ``database:view_credentials`` scope. - "port": 0, # Optional. The port on which the database - cluster is listening. - "ssl": bool, # Optional. A boolean value indicating if the - connection should be made over SSL. - "uri": "str", # Optional. A connection string in the format - accepted by the ``psql`` command. This is provided as a convenience and - should be able to be constructed by the other attributes. - "user": "str" # Optional. The default user for the - database.:code:`
`:code:`
`Requires ``database:view_credentials`` - scope. - }, - "private_network_uuid": "str", # Optional. A string specifying the - UUID of the VPC to which the database cluster will be assigned. If excluded, - the cluster when creating a new database cluster, it will be assigned to your - account's default VPC for the region. :code:`
`:code:`
`Requires - ``vpc:read`` scope. - "project_id": "str", # Optional. The ID of the project that the - database cluster is assigned to. If excluded when creating a new database - cluster, it will be assigned to your default - project.:code:`
`:code:`
`Requires ``project:read`` scope. - "rules": [ - { - "type": "str", # The type of resource that the - firewall rule allows to access the database cluster. Required. Known - values are: "droplet", "k8s", "ip_addr", "tag", and "app". - "value": "str", # The ID of the specific resource, - the name of a tag applied to a group of resources, or the IP address - that the firewall rule allows to access the database cluster. - Required. - "cluster_uuid": "str", # Optional. A unique ID for - the database cluster to which the rule is applied. - "created_at": "2020-02-20 00:00:00", # Optional. A - time value given in ISO8601 combined date and time format that - represents when the firewall rule was created. - "description": "str", # Optional. A human-readable - description of the rule. - "uuid": "str" # Optional. A unique ID for the - firewall rule itself. - } - ], - "schema_registry_connection": { - "host": "str", # Optional. The FQDN pointing to the schema - registry connection uri. - "password": "str", # Optional. The randomly generated - password for the schema registry.:code:`
`:code:`
`Requires - ``database:view_credentials`` scope. - "port": 0, # Optional. The port on which the schema registry - is listening. - "ssl": bool, # Optional. A boolean value indicating if the - connection should be made over SSL. - "uri": "str", # Optional. This is provided as a convenience - and should be able to be constructed by the other attributes. - "user": "str" # Optional. The default user for the schema - registry.:code:`
`:code:`
`Requires ``database:view_credentials`` - scope. - }, - "semantic_version": "str", # Optional. A string representing the - semantic version of the database engine in use for the cluster. - "standby_connection": { - "database": "str", # Optional. The name of the default - database. - "host": "str", # Optional. The FQDN pointing to the database - cluster's current primary node. - "password": "str", # Optional. The randomly generated - password for the default user.:code:`
`:code:`
`Requires - ``database:view_credentials`` scope. - "port": 0, # Optional. The port on which the database - cluster is listening. - "ssl": bool, # Optional. A boolean value indicating if the - connection should be made over SSL. - "uri": "str", # Optional. A connection string in the format - accepted by the ``psql`` command. This is provided as a convenience and - should be able to be constructed by the other attributes. - "user": "str" # Optional. The default user for the - database.:code:`
`:code:`
`Requires ``database:view_credentials`` - scope. - }, - "standby_private_connection": { - "database": "str", # Optional. The name of the default - database. - "host": "str", # Optional. The FQDN pointing to the database - cluster's current primary node. - "password": "str", # Optional. The randomly generated - password for the default user.:code:`
`:code:`
`Requires - ``database:view_credentials`` scope. - "port": 0, # Optional. The port on which the database - cluster is listening. - "ssl": bool, # Optional. A boolean value indicating if the - connection should be made over SSL. - "uri": "str", # Optional. A connection string in the format - accepted by the ``psql`` command. This is provided as a convenience and - should be able to be constructed by the other attributes. - "user": "str" # Optional. The default user for the - database.:code:`
`:code:`
`Requires ``database:view_credentials`` - scope. - }, - "status": "str", # Optional. A string representing the current - status of the database cluster. Known values are: "creating", "online", - "resizing", "migrating", and "forking". - "storage_size_mib": 0, # Optional. Additional storage added to the - cluster, in MiB. If null, no additional storage is added to the cluster, - beyond what is provided as a base amount from the 'size' and any previously - added additional storage. - "tags": [ - "str" # Optional. An array of tags that have been applied to - the database cluster. :code:`
`:code:`
`Requires ``tag:read`` - scope. - ], - "ui_connection": { - "host": "str", # Optional. The FQDN pointing to the - opensearch cluster's current primary node. - "password": "str", # Optional. The randomly generated - password for the default user.:code:`
`:code:`
`Requires - ``database:view_credentials`` scope. - "port": 0, # Optional. The port on which the opensearch - dashboard is listening. - "ssl": bool, # Optional. A boolean value indicating if the - connection should be made over SSL. - "uri": "str", # Optional. This is provided as a convenience - and should be able to be constructed by the other attributes. - "user": "str" # Optional. The default user for the - opensearch dashboard.:code:`
`:code:`
`Requires - ``database:view_credentials`` scope. - }, - "users": [ - { - "name": "str", # The name of a database user. - Required. - "access_cert": "str", # Optional. Access certificate - for TLS client authentication. (Kafka only). - "access_key": "str", # Optional. Access key for TLS - client authentication. (Kafka only). - "mysql_settings": { - "auth_plugin": "str" # A string specifying - the authentication method to be used for connections to the MySQL - user account. The valid values are ``mysql_native_password`` or - ``caching_sha2_password``. If excluded when creating a new user, - the default for the version of MySQL in use will be used. As of - MySQL 8.0, the default is ``caching_sha2_password``. Required. - Known values are: "mysql_native_password" and - "caching_sha2_password". - }, - "password": "str", # Optional. A randomly generated - password for the database user.:code:`
`Requires - ``database:view_credentials`` scope. - "role": "str", # Optional. A string representing the - database user's role. The value will be either "primary" or "normal". - Known values are: "primary" and "normal". - "settings": { - "acl": [ - { - "permission": "str", # - Permission set applied to the ACL. 'consume' allows for - messages to be consumed from the topic. 'produce' allows - for messages to be published to the topic. - 'produceconsume' allows for both 'consume' and 'produce' - permission. 'admin' allows for 'produceconsume' as well - as any operations to administer the topic (delete, - update). Required. Known values are: "admin", "consume", - "produce", and "produceconsume". - "topic": "str", # A regex - for matching the topic(s) that this ACL should apply to. - Required. - "id": "str" # Optional. An - identifier for the ACL. Will be computed after the ACL is - created/updated. - } - ], - "mongo_user_settings": { - "databases": [ - "str" # Optional. A list of - databases to which the user should have access. When the - database is set to ``admin``"" , the user will have - access to all databases based on the user's role i.e. a - user with the role ``readOnly`` assigned to the ``admin`` - database will have read access to all databases. - ], - "role": "str" # Optional. The role - to assign to the user with each role mapping to a MongoDB - built-in role. ``readOnly`` maps to a `read - `_ - role. ``readWrite`` maps to a `readWrite - `_ - role. ``dbAdmin`` maps to a `dbAdmin - `_ - role. Known values are: "readOnly", "readWrite", and - "dbAdmin". - }, - "opensearch_acl": [ - { - "index": "str", # Optional. - A regex for matching the indexes that this ACL should - apply to. - "permission": "str" # - Optional. Permission set applied to the ACL. 'read' - allows user to read from the index. 'write' allows for - user to write to the index. 'readwrite' allows for both - 'read' and 'write' permission. 'deny'(default) restricts - user from performing any operation over an index. 'admin' - allows for 'readwrite' as well as any operations to - administer the index. Known values are: "deny", "admin", - "read", "readwrite", and "write". - } - ], - "pg_allow_replication": bool # Optional. For - Postgres clusters, set to ``true`` for a user with replication - rights. This option is not currently supported for other database - engines. - } - } - ], - "version": "str", # Optional. A string representing the version of - the database engine in use for the cluster. - "version_end_of_availability": "str", # Optional. A timestamp - referring to the date when the particular version will no longer be available - for creating new clusters. If null, the version does not have an end of - availability timeline. - "version_end_of_life": "str" # Optional. A timestamp referring to - the date when the particular version will no longer be supported. If null, - the version does not have an end of life timeline. - } - } # response body for status code(s): 404 response == { "id": "str", # A short identifier corresponding to the HTTP status code @@ -120604,9 +129039,9 @@ def get_cluster(self, database_cluster_uuid: str, **kwargs: Any) -> JSON: _headers = kwargs.pop("headers", {}) or {} _params = kwargs.pop("params", {}) or {} - cls: ClsType[JSON] = kwargs.pop("cls", None) + cls: ClsType[Optional[JSON]] = kwargs.pop("cls", None) - _request = build_databases_get_cluster_request( + _request = build_databases_install_update_request( database_cluster_uuid=database_cluster_uuid, headers=_headers, params=_params, @@ -120622,14 +129057,15 @@ def get_cluster(self, database_cluster_uuid: str, **kwargs: Any) -> JSON: response = pipeline_response.http_response - if response.status_code not in [200, 404]: + if response.status_code not in [204, 404]: if _stream: response.read() # Load the body in memory and close the socket map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore raise HttpResponseError(response=response) + deserialized = None response_headers = {} - if response.status_code == 200: + if response.status_code == 204: response_headers["ratelimit-limit"] = self._deserialize( "int", response.headers.get("ratelimit-limit") ) @@ -120640,11 +129076,6 @@ def get_cluster(self, database_cluster_uuid: str, **kwargs: Any) -> JSON: "int", response.headers.get("ratelimit-reset") ) - if response.content: - deserialized = response.json() - else: - deserialized = None - if response.status_code == 404: response_headers["ratelimit-limit"] = self._deserialize( "int", response.headers.get("ratelimit-limit") @@ -120662,30 +129093,56 @@ def get_cluster(self, database_cluster_uuid: str, **kwargs: Any) -> JSON: deserialized = None if cls: - return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore + return cls(pipeline_response, deserialized, response_headers) # type: ignore - return cast(JSON, deserialized) # type: ignore + return deserialized # type: ignore @distributed_trace - def destroy_cluster( - self, database_cluster_uuid: str, **kwargs: Any - ) -> Optional[JSON]: + def list_backups(self, database_cluster_uuid: str, **kwargs: Any) -> JSON: # pylint: disable=line-too-long - """Destroy a Database Cluster. + """List Backups for a Database Cluster. - To destroy a specific database, send a DELETE request to ``/v2/databases/$DATABASE_ID``. - A status of 204 will be given. This indicates that the request was processed successfully, but - that no response body is needed. + To list all of the available backups of a PostgreSQL or MySQL database cluster, send a GET + request to ``/v2/databases/$DATABASE_ID/backups``. + **Note**\\ : Backups are not supported for Caching or Valkey clusters. + The result will be a JSON object with a ``backups key``. This will be set to an array of backup + objects, each of which will contain the size of the backup and the timestamp at which it was + created. :param database_cluster_uuid: A unique identifier for a database cluster. Required. :type database_cluster_uuid: str - :return: JSON object or None - :rtype: JSON or None + :return: JSON object + :rtype: JSON :raises ~azure.core.exceptions.HttpResponseError: Example: .. code-block:: python + # response body for status code(s): 200 + response == { + "backups": [ + { + "created_at": "2020-02-20 00:00:00", # A time value given in + ISO8601 combined date and time format at which the backup was created. + Required. + "size_gigabytes": 0.0, # The size of the database backup in + GBs. Required. + "incremental": bool # Optional. Indicates if this backup is + a full or an incremental one (available only for MySQL). + } + ], + "backup_progress": "str", # Optional. If a backup is currently in progress, + this attribute shows the percentage of completion. If no backup is in progress, + this attribute will be hidden. + "scheduled_backup_time": { + "backup_hour": 0, # Optional. The hour of the day when the backup is + scheduled (in UTC). + "backup_interval_hours": 0, # Optional. The frequency, in hours, at + which backups are taken. + "backup_minute": 0 # Optional. The minute of the hour when the + backup is scheduled. + } + } # response body for status code(s): 404 response == { "id": "str", # A short identifier corresponding to the HTTP status code @@ -120714,9 +129171,9 @@ def destroy_cluster( _headers = kwargs.pop("headers", {}) or {} _params = kwargs.pop("params", {}) or {} - cls: ClsType[Optional[JSON]] = kwargs.pop("cls", None) + cls: ClsType[JSON] = kwargs.pop("cls", None) - _request = build_databases_destroy_cluster_request( + _request = build_databases_list_backups_request( database_cluster_uuid=database_cluster_uuid, headers=_headers, params=_params, @@ -120732,15 +129189,14 @@ def destroy_cluster( response = pipeline_response.http_response - if response.status_code not in [204, 404]: + if response.status_code not in [200, 404]: if _stream: response.read() # Load the body in memory and close the socket map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore raise HttpResponseError(response=response) - deserialized = None response_headers = {} - if response.status_code == 204: + if response.status_code == 200: response_headers["ratelimit-limit"] = self._deserialize( "int", response.headers.get("ratelimit-limit") ) @@ -120751,6 +129207,11 @@ def destroy_cluster( "int", response.headers.get("ratelimit-reset") ) + if response.content: + deserialized = response.json() + else: + deserialized = None + if response.status_code == 404: response_headers["ratelimit-limit"] = self._deserialize( "int", response.headers.get("ratelimit-limit") @@ -120768,19 +129229,22 @@ def destroy_cluster( deserialized = None if cls: - return cls(pipeline_response, deserialized, response_headers) # type: ignore + return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore - return deserialized # type: ignore + return cast(JSON, deserialized) # type: ignore @distributed_trace - def get_config(self, database_cluster_uuid: str, **kwargs: Any) -> JSON: + def list_replicas(self, database_cluster_uuid: str, **kwargs: Any) -> JSON: # pylint: disable=line-too-long - """Retrieve an Existing Database Cluster Configuration. + """List All Read-only Replicas. - Shows configuration parameters for an existing database cluster by sending a GET request to - ``/v2/databases/$DATABASE_ID/config``. - The response is a JSON object with a ``config`` key, which is set to an object - containing any database configuration parameters. + To list all of the read-only replicas associated with a database cluster, send a GET request to + ``/v2/databases/$DATABASE_ID/replicas``. + + **Note**\\ : Read-only replicas are not supported for Caching or Valkey clusters. + + The result will be a JSON object with a ``replicas`` key. This will be set to an array of + database replica objects, each of which will contain the standard database replica attributes. :param database_cluster_uuid: A unique identifier for a database cluster. Required. :type database_cluster_uuid: str @@ -120793,7 +129257,91 @@ def get_config(self, database_cluster_uuid: str, **kwargs: Any) -> JSON: # response body for status code(s): 200 response == { - "config": {} + "replicas": [ + { + "name": "str", # The name to give the read-only replicating. + Required. + "connection": { + "database": "str", # Optional. The name of the + default database. + "host": "str", # Optional. The FQDN pointing to the + database cluster's current primary node. + "password": "str", # Optional. The randomly + generated password for the default + user.:code:`
`:code:`
`Requires ``database:view_credentials`` + scope. + "port": 0, # Optional. The port on which the + database cluster is listening. + "ssl": bool, # Optional. A boolean value indicating + if the connection should be made over SSL. + "uri": "str", # Optional. A connection string in the + format accepted by the ``psql`` command. This is provided as a + convenience and should be able to be constructed by the other + attributes. + "user": "str" # Optional. The default user for the + database.:code:`
`:code:`
`Requires + ``database:view_credentials`` scope. + }, + "created_at": "2020-02-20 00:00:00", # Optional. A time + value given in ISO8601 combined date and time format that represents when + the database cluster was created. + "do_settings": { + "service_cnames": [ + "str" # Optional. An array of custom CNAMEs + for the database cluster. Each CNAME must be a valid RFC 1123 + hostname (e.g., "db.example.com"). Maximum of 16 CNAMEs allowed, + each up to 253 characters. + ] + }, + "id": "str", # Optional. A unique ID that can be used to + identify and reference a database replica. + "private_connection": { + "database": "str", # Optional. The name of the + default database. + "host": "str", # Optional. The FQDN pointing to the + database cluster's current primary node. + "password": "str", # Optional. The randomly + generated password for the default + user.:code:`
`:code:`
`Requires ``database:view_credentials`` + scope. + "port": 0, # Optional. The port on which the + database cluster is listening. + "ssl": bool, # Optional. A boolean value indicating + if the connection should be made over SSL. + "uri": "str", # Optional. A connection string in the + format accepted by the ``psql`` command. This is provided as a + convenience and should be able to be constructed by the other + attributes. + "user": "str" # Optional. The default user for the + database.:code:`
`:code:`
`Requires + ``database:view_credentials`` scope. + }, + "private_network_uuid": "str", # Optional. A string + specifying the UUID of the VPC to which the read-only replica will be + assigned. If excluded, the replica will be assigned to your account's + default VPC for the region. :code:`
`:code:`
`Requires ``vpc:read`` + scope. + "region": "str", # Optional. A slug identifier for the + region where the read-only replica will be located. If excluded, the + replica will be placed in the same region as the cluster. + "size": "str", # Optional. A slug identifier representing + the size of the node for the read-only replica. The size of the replica + must be at least as large as the node size for the database cluster from + which it is replicating. + "status": "str", # Optional. A string representing the + current status of the database cluster. Known values are: "creating", + "online", "resizing", "migrating", and "forking". + "storage_size_mib": 0, # Optional. Additional storage added + to the cluster, in MiB. If null, no additional storage is added to the + cluster, beyond what is provided as a base amount from the 'size' and any + previously added additional storage. + "tags": [ + "str" # Optional. A flat array of tag names as + strings applied to the read-only + replica.:code:`
`:code:`
`Requires ``tag:read`` scope. + ] + } + ] } # response body for status code(s): 404 response == { @@ -120825,7 +129373,7 @@ def get_config(self, database_cluster_uuid: str, **kwargs: Any) -> JSON: cls: ClsType[JSON] = kwargs.pop("cls", None) - _request = build_databases_get_config_request( + _request = build_databases_list_replicas_request( database_cluster_uuid=database_cluster_uuid, headers=_headers, params=_params, @@ -120886,29 +129434,37 @@ def get_config(self, database_cluster_uuid: str, **kwargs: Any) -> JSON: return cast(JSON, deserialized) # type: ignore @overload - def patch_config( + def create_replica( self, database_cluster_uuid: str, - body: JSON, + body: Optional[JSON] = None, *, content_type: str = "application/json", **kwargs: Any, - ) -> Optional[JSON]: + ) -> JSON: # pylint: disable=line-too-long - """Update the Database Configuration for an Existing Database. + """Create a Read-only Replica. - To update the configuration for an existing database cluster, send a PATCH request to - ``/v2/databases/$DATABASE_ID/config``. + To create a read-only replica for a PostgreSQL or MySQL database cluster, send a POST request + to ``/v2/databases/$DATABASE_ID/replicas`` specifying the name it should be given, the size of + the node to be used, and the region where it will be located. + + **Note**\\ : Read-only replicas are not supported for Caching or Valkey clusters. + + The response will be a JSON object with a key called ``replica``. The value of this will be an + object that contains the standard attributes associated with a database replica. The initial + value of the read-only replica's ``status`` attribute will be ``forking``. When the replica is + ready to receive traffic, this will transition to ``active``. :param database_cluster_uuid: A unique identifier for a database cluster. Required. :type database_cluster_uuid: str - :param body: Required. + :param body: Default value is None. :type body: JSON :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str - :return: JSON object or None - :rtype: JSON or None + :return: JSON object + :rtype: JSON :raises ~azure.core.exceptions.HttpResponseError: Example: @@ -120916,9 +129472,161 @@ def patch_config( # JSON input template you can fill out and use as your body input. body = { - "config": {} + "name": "str", # The name to give the read-only replicating. Required. + "connection": { + "database": "str", # Optional. The name of the default database. + "host": "str", # Optional. The FQDN pointing to the database + cluster's current primary node. + "password": "str", # Optional. The randomly generated password for + the default user.:code:`
`:code:`
`Requires + ``database:view_credentials`` scope. + "port": 0, # Optional. The port on which the database cluster is + listening. + "ssl": bool, # Optional. A boolean value indicating if the + connection should be made over SSL. + "uri": "str", # Optional. A connection string in the format accepted + by the ``psql`` command. This is provided as a convenience and should be able + to be constructed by the other attributes. + "user": "str" # Optional. The default user for the + database.:code:`
`:code:`
`Requires ``database:view_credentials`` + scope. + }, + "created_at": "2020-02-20 00:00:00", # Optional. A time value given in + ISO8601 combined date and time format that represents when the database cluster + was created. + "do_settings": { + "service_cnames": [ + "str" # Optional. An array of custom CNAMEs for the database + cluster. Each CNAME must be a valid RFC 1123 hostname (e.g., + "db.example.com"). Maximum of 16 CNAMEs allowed, each up to 253 + characters. + ] + }, + "id": "str", # Optional. A unique ID that can be used to identify and + reference a database replica. + "private_connection": { + "database": "str", # Optional. The name of the default database. + "host": "str", # Optional. The FQDN pointing to the database + cluster's current primary node. + "password": "str", # Optional. The randomly generated password for + the default user.:code:`
`:code:`
`Requires + ``database:view_credentials`` scope. + "port": 0, # Optional. The port on which the database cluster is + listening. + "ssl": bool, # Optional. A boolean value indicating if the + connection should be made over SSL. + "uri": "str", # Optional. A connection string in the format accepted + by the ``psql`` command. This is provided as a convenience and should be able + to be constructed by the other attributes. + "user": "str" # Optional. The default user for the + database.:code:`
`:code:`
`Requires ``database:view_credentials`` + scope. + }, + "private_network_uuid": "str", # Optional. A string specifying the UUID of + the VPC to which the read-only replica will be assigned. If excluded, the replica + will be assigned to your account's default VPC for the region. + :code:`
`:code:`
`Requires ``vpc:read`` scope. + "region": "str", # Optional. A slug identifier for the region where the + read-only replica will be located. If excluded, the replica will be placed in the + same region as the cluster. + "size": "str", # Optional. A slug identifier representing the size of the + node for the read-only replica. The size of the replica must be at least as large + as the node size for the database cluster from which it is replicating. + "status": "str", # Optional. A string representing the current status of the + database cluster. Known values are: "creating", "online", "resizing", + "migrating", and "forking". + "storage_size_mib": 0, # Optional. Additional storage added to the cluster, + in MiB. If null, no additional storage is added to the cluster, beyond what is + provided as a base amount from the 'size' and any previously added additional + storage. + "tags": [ + "str" # Optional. A flat array of tag names as strings to apply to + the read-only replica after it is created. Tag names can either be existing + or new tags. :code:`
`:code:`
`Requires ``tag:create`` scope. + ] } + # response body for status code(s): 201 + response == { + "replica": { + "name": "str", # The name to give the read-only replicating. + Required. + "connection": { + "database": "str", # Optional. The name of the default + database. + "host": "str", # Optional. The FQDN pointing to the database + cluster's current primary node. + "password": "str", # Optional. The randomly generated + password for the default user.:code:`
`:code:`
`Requires + ``database:view_credentials`` scope. + "port": 0, # Optional. The port on which the database + cluster is listening. + "ssl": bool, # Optional. A boolean value indicating if the + connection should be made over SSL. + "uri": "str", # Optional. A connection string in the format + accepted by the ``psql`` command. This is provided as a convenience and + should be able to be constructed by the other attributes. + "user": "str" # Optional. The default user for the + database.:code:`
`:code:`
`Requires ``database:view_credentials`` + scope. + }, + "created_at": "2020-02-20 00:00:00", # Optional. A time value given + in ISO8601 combined date and time format that represents when the database + cluster was created. + "do_settings": { + "service_cnames": [ + "str" # Optional. An array of custom CNAMEs for the + database cluster. Each CNAME must be a valid RFC 1123 hostname (e.g., + "db.example.com"). Maximum of 16 CNAMEs allowed, each up to 253 + characters. + ] + }, + "id": "str", # Optional. A unique ID that can be used to identify + and reference a database replica. + "private_connection": { + "database": "str", # Optional. The name of the default + database. + "host": "str", # Optional. The FQDN pointing to the database + cluster's current primary node. + "password": "str", # Optional. The randomly generated + password for the default user.:code:`
`:code:`
`Requires + ``database:view_credentials`` scope. + "port": 0, # Optional. The port on which the database + cluster is listening. + "ssl": bool, # Optional. A boolean value indicating if the + connection should be made over SSL. + "uri": "str", # Optional. A connection string in the format + accepted by the ``psql`` command. This is provided as a convenience and + should be able to be constructed by the other attributes. + "user": "str" # Optional. The default user for the + database.:code:`
`:code:`
`Requires ``database:view_credentials`` + scope. + }, + "private_network_uuid": "str", # Optional. A string specifying the + UUID of the VPC to which the read-only replica will be assigned. If excluded, + the replica will be assigned to your account's default VPC for the region. + :code:`
`:code:`
`Requires ``vpc:read`` scope. + "region": "str", # Optional. A slug identifier for the region where + the read-only replica will be located. If excluded, the replica will be + placed in the same region as the cluster. + "size": "str", # Optional. A slug identifier representing the size + of the node for the read-only replica. The size of the replica must be at + least as large as the node size for the database cluster from which it is + replicating. + "status": "str", # Optional. A string representing the current + status of the database cluster. Known values are: "creating", "online", + "resizing", "migrating", and "forking". + "storage_size_mib": 0, # Optional. Additional storage added to the + cluster, in MiB. If null, no additional storage is added to the cluster, + beyond what is provided as a base amount from the 'size' and any previously + added additional storage. + "tags": [ + "str" # Optional. A flat array of tag names as strings + applied to the read-only replica.:code:`
`:code:`
`Requires + ``tag:read`` scope. + ] + } + } # response body for status code(s): 404 response == { "id": "str", # A short identifier corresponding to the HTTP status code @@ -120933,34 +129641,123 @@ def patch_config( """ @overload - def patch_config( + def create_replica( self, database_cluster_uuid: str, - body: IO[bytes], + body: Optional[IO[bytes]] = None, *, content_type: str = "application/json", **kwargs: Any, - ) -> Optional[JSON]: + ) -> JSON: # pylint: disable=line-too-long - """Update the Database Configuration for an Existing Database. + """Create a Read-only Replica. - To update the configuration for an existing database cluster, send a PATCH request to - ``/v2/databases/$DATABASE_ID/config``. + To create a read-only replica for a PostgreSQL or MySQL database cluster, send a POST request + to ``/v2/databases/$DATABASE_ID/replicas`` specifying the name it should be given, the size of + the node to be used, and the region where it will be located. + + **Note**\\ : Read-only replicas are not supported for Caching or Valkey clusters. + + The response will be a JSON object with a key called ``replica``. The value of this will be an + object that contains the standard attributes associated with a database replica. The initial + value of the read-only replica's ``status`` attribute will be ``forking``. When the replica is + ready to receive traffic, this will transition to ``active``. :param database_cluster_uuid: A unique identifier for a database cluster. Required. :type database_cluster_uuid: str - :param body: Required. + :param body: Default value is None. :type body: IO[bytes] :keyword content_type: Body Parameter content-type. Content type parameter for binary body. Default value is "application/json". :paramtype content_type: str - :return: JSON object or None - :rtype: JSON or None + :return: JSON object + :rtype: JSON :raises ~azure.core.exceptions.HttpResponseError: Example: .. code-block:: python + # response body for status code(s): 201 + response == { + "replica": { + "name": "str", # The name to give the read-only replicating. + Required. + "connection": { + "database": "str", # Optional. The name of the default + database. + "host": "str", # Optional. The FQDN pointing to the database + cluster's current primary node. + "password": "str", # Optional. The randomly generated + password for the default user.:code:`
`:code:`
`Requires + ``database:view_credentials`` scope. + "port": 0, # Optional. The port on which the database + cluster is listening. + "ssl": bool, # Optional. A boolean value indicating if the + connection should be made over SSL. + "uri": "str", # Optional. A connection string in the format + accepted by the ``psql`` command. This is provided as a convenience and + should be able to be constructed by the other attributes. + "user": "str" # Optional. The default user for the + database.:code:`
`:code:`
`Requires ``database:view_credentials`` + scope. + }, + "created_at": "2020-02-20 00:00:00", # Optional. A time value given + in ISO8601 combined date and time format that represents when the database + cluster was created. + "do_settings": { + "service_cnames": [ + "str" # Optional. An array of custom CNAMEs for the + database cluster. Each CNAME must be a valid RFC 1123 hostname (e.g., + "db.example.com"). Maximum of 16 CNAMEs allowed, each up to 253 + characters. + ] + }, + "id": "str", # Optional. A unique ID that can be used to identify + and reference a database replica. + "private_connection": { + "database": "str", # Optional. The name of the default + database. + "host": "str", # Optional. The FQDN pointing to the database + cluster's current primary node. + "password": "str", # Optional. The randomly generated + password for the default user.:code:`
`:code:`
`Requires + ``database:view_credentials`` scope. + "port": 0, # Optional. The port on which the database + cluster is listening. + "ssl": bool, # Optional. A boolean value indicating if the + connection should be made over SSL. + "uri": "str", # Optional. A connection string in the format + accepted by the ``psql`` command. This is provided as a convenience and + should be able to be constructed by the other attributes. + "user": "str" # Optional. The default user for the + database.:code:`
`:code:`
`Requires ``database:view_credentials`` + scope. + }, + "private_network_uuid": "str", # Optional. A string specifying the + UUID of the VPC to which the read-only replica will be assigned. If excluded, + the replica will be assigned to your account's default VPC for the region. + :code:`
`:code:`
`Requires ``vpc:read`` scope. + "region": "str", # Optional. A slug identifier for the region where + the read-only replica will be located. If excluded, the replica will be + placed in the same region as the cluster. + "size": "str", # Optional. A slug identifier representing the size + of the node for the read-only replica. The size of the replica must be at + least as large as the node size for the database cluster from which it is + replicating. + "status": "str", # Optional. A string representing the current + status of the database cluster. Known values are: "creating", "online", + "resizing", "migrating", and "forking". + "storage_size_mib": 0, # Optional. Additional storage added to the + cluster, in MiB. If null, no additional storage is added to the cluster, + beyond what is provided as a base amount from the 'size' and any previously + added additional storage. + "tags": [ + "str" # Optional. A flat array of tag names as strings + applied to the read-only replica.:code:`
`:code:`
`Requires + ``tag:read`` scope. + ] + } + } # response body for status code(s): 404 response == { "id": "str", # A short identifier corresponding to the HTTP status code @@ -120975,21 +129772,32 @@ def patch_config( """ @distributed_trace - def patch_config( - self, database_cluster_uuid: str, body: Union[JSON, IO[bytes]], **kwargs: Any - ) -> Optional[JSON]: + def create_replica( + self, + database_cluster_uuid: str, + body: Optional[Union[JSON, IO[bytes]]] = None, + **kwargs: Any, + ) -> JSON: # pylint: disable=line-too-long - """Update the Database Configuration for an Existing Database. + """Create a Read-only Replica. - To update the configuration for an existing database cluster, send a PATCH request to - ``/v2/databases/$DATABASE_ID/config``. + To create a read-only replica for a PostgreSQL or MySQL database cluster, send a POST request + to ``/v2/databases/$DATABASE_ID/replicas`` specifying the name it should be given, the size of + the node to be used, and the region where it will be located. + + **Note**\\ : Read-only replicas are not supported for Caching or Valkey clusters. + + The response will be a JSON object with a key called ``replica``. The value of this will be an + object that contains the standard attributes associated with a database replica. The initial + value of the read-only replica's ``status`` attribute will be ``forking``. When the replica is + ready to receive traffic, this will transition to ``active``. :param database_cluster_uuid: A unique identifier for a database cluster. Required. :type database_cluster_uuid: str - :param body: Is either a JSON type or a IO[bytes] type. Required. + :param body: Is either a JSON type or a IO[bytes] type. Default value is None. :type body: JSON or IO[bytes] - :return: JSON object or None - :rtype: JSON or None + :return: JSON object + :rtype: JSON :raises ~azure.core.exceptions.HttpResponseError: Example: @@ -120997,9 +129805,161 @@ def patch_config( # JSON input template you can fill out and use as your body input. body = { - "config": {} + "name": "str", # The name to give the read-only replicating. Required. + "connection": { + "database": "str", # Optional. The name of the default database. + "host": "str", # Optional. The FQDN pointing to the database + cluster's current primary node. + "password": "str", # Optional. The randomly generated password for + the default user.:code:`
`:code:`
`Requires + ``database:view_credentials`` scope. + "port": 0, # Optional. The port on which the database cluster is + listening. + "ssl": bool, # Optional. A boolean value indicating if the + connection should be made over SSL. + "uri": "str", # Optional. A connection string in the format accepted + by the ``psql`` command. This is provided as a convenience and should be able + to be constructed by the other attributes. + "user": "str" # Optional. The default user for the + database.:code:`
`:code:`
`Requires ``database:view_credentials`` + scope. + }, + "created_at": "2020-02-20 00:00:00", # Optional. A time value given in + ISO8601 combined date and time format that represents when the database cluster + was created. + "do_settings": { + "service_cnames": [ + "str" # Optional. An array of custom CNAMEs for the database + cluster. Each CNAME must be a valid RFC 1123 hostname (e.g., + "db.example.com"). Maximum of 16 CNAMEs allowed, each up to 253 + characters. + ] + }, + "id": "str", # Optional. A unique ID that can be used to identify and + reference a database replica. + "private_connection": { + "database": "str", # Optional. The name of the default database. + "host": "str", # Optional. The FQDN pointing to the database + cluster's current primary node. + "password": "str", # Optional. The randomly generated password for + the default user.:code:`
`:code:`
`Requires + ``database:view_credentials`` scope. + "port": 0, # Optional. The port on which the database cluster is + listening. + "ssl": bool, # Optional. A boolean value indicating if the + connection should be made over SSL. + "uri": "str", # Optional. A connection string in the format accepted + by the ``psql`` command. This is provided as a convenience and should be able + to be constructed by the other attributes. + "user": "str" # Optional. The default user for the + database.:code:`
`:code:`
`Requires ``database:view_credentials`` + scope. + }, + "private_network_uuid": "str", # Optional. A string specifying the UUID of + the VPC to which the read-only replica will be assigned. If excluded, the replica + will be assigned to your account's default VPC for the region. + :code:`
`:code:`
`Requires ``vpc:read`` scope. + "region": "str", # Optional. A slug identifier for the region where the + read-only replica will be located. If excluded, the replica will be placed in the + same region as the cluster. + "size": "str", # Optional. A slug identifier representing the size of the + node for the read-only replica. The size of the replica must be at least as large + as the node size for the database cluster from which it is replicating. + "status": "str", # Optional. A string representing the current status of the + database cluster. Known values are: "creating", "online", "resizing", + "migrating", and "forking". + "storage_size_mib": 0, # Optional. Additional storage added to the cluster, + in MiB. If null, no additional storage is added to the cluster, beyond what is + provided as a base amount from the 'size' and any previously added additional + storage. + "tags": [ + "str" # Optional. A flat array of tag names as strings to apply to + the read-only replica after it is created. Tag names can either be existing + or new tags. :code:`
`:code:`
`Requires ``tag:create`` scope. + ] } + # response body for status code(s): 201 + response == { + "replica": { + "name": "str", # The name to give the read-only replicating. + Required. + "connection": { + "database": "str", # Optional. The name of the default + database. + "host": "str", # Optional. The FQDN pointing to the database + cluster's current primary node. + "password": "str", # Optional. The randomly generated + password for the default user.:code:`
`:code:`
`Requires + ``database:view_credentials`` scope. + "port": 0, # Optional. The port on which the database + cluster is listening. + "ssl": bool, # Optional. A boolean value indicating if the + connection should be made over SSL. + "uri": "str", # Optional. A connection string in the format + accepted by the ``psql`` command. This is provided as a convenience and + should be able to be constructed by the other attributes. + "user": "str" # Optional. The default user for the + database.:code:`
`:code:`
`Requires ``database:view_credentials`` + scope. + }, + "created_at": "2020-02-20 00:00:00", # Optional. A time value given + in ISO8601 combined date and time format that represents when the database + cluster was created. + "do_settings": { + "service_cnames": [ + "str" # Optional. An array of custom CNAMEs for the + database cluster. Each CNAME must be a valid RFC 1123 hostname (e.g., + "db.example.com"). Maximum of 16 CNAMEs allowed, each up to 253 + characters. + ] + }, + "id": "str", # Optional. A unique ID that can be used to identify + and reference a database replica. + "private_connection": { + "database": "str", # Optional. The name of the default + database. + "host": "str", # Optional. The FQDN pointing to the database + cluster's current primary node. + "password": "str", # Optional. The randomly generated + password for the default user.:code:`
`:code:`
`Requires + ``database:view_credentials`` scope. + "port": 0, # Optional. The port on which the database + cluster is listening. + "ssl": bool, # Optional. A boolean value indicating if the + connection should be made over SSL. + "uri": "str", # Optional. A connection string in the format + accepted by the ``psql`` command. This is provided as a convenience and + should be able to be constructed by the other attributes. + "user": "str" # Optional. The default user for the + database.:code:`
`:code:`
`Requires ``database:view_credentials`` + scope. + }, + "private_network_uuid": "str", # Optional. A string specifying the + UUID of the VPC to which the read-only replica will be assigned. If excluded, + the replica will be assigned to your account's default VPC for the region. + :code:`
`:code:`
`Requires ``vpc:read`` scope. + "region": "str", # Optional. A slug identifier for the region where + the read-only replica will be located. If excluded, the replica will be + placed in the same region as the cluster. + "size": "str", # Optional. A slug identifier representing the size + of the node for the read-only replica. The size of the replica must be at + least as large as the node size for the database cluster from which it is + replicating. + "status": "str", # Optional. A string representing the current + status of the database cluster. Known values are: "creating", "online", + "resizing", "migrating", and "forking". + "storage_size_mib": 0, # Optional. Additional storage added to the + cluster, in MiB. If null, no additional storage is added to the cluster, + beyond what is provided as a base amount from the 'size' and any previously + added additional storage. + "tags": [ + "str" # Optional. A flat array of tag names as strings + applied to the read-only replica.:code:`
`:code:`
`Requires + ``tag:read`` scope. + ] + } + } # response body for status code(s): 404 response == { "id": "str", # A short identifier corresponding to the HTTP status code @@ -121031,7 +129991,7 @@ def patch_config( content_type: Optional[str] = kwargs.pop( "content_type", _headers.pop("Content-Type", None) ) - cls: ClsType[Optional[JSON]] = kwargs.pop("cls", None) + cls: ClsType[JSON] = kwargs.pop("cls", None) content_type = content_type or "application/json" _json = None @@ -121039,9 +129999,12 @@ def patch_config( if isinstance(body, (IOBase, bytes)): _content = body else: - _json = body + if body is not None: + _json = body + else: + _json = None - _request = build_databases_patch_config_request( + _request = build_databases_create_replica_request( database_cluster_uuid=database_cluster_uuid, content_type=content_type, json=_json, @@ -121060,15 +130023,14 @@ def patch_config( response = pipeline_response.http_response - if response.status_code not in [200, 404]: + if response.status_code not in [201, 404]: if _stream: response.read() # Load the body in memory and close the socket map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore raise HttpResponseError(response=response) - deserialized = None response_headers = {} - if response.status_code == 200: + if response.status_code == 201: response_headers["ratelimit-limit"] = self._deserialize( "int", response.headers.get("ratelimit-limit") ) @@ -121079,6 +130041,11 @@ def patch_config( "int", response.headers.get("ratelimit-reset") ) + if response.content: + deserialized = response.json() + else: + deserialized = None + if response.status_code == 404: response_headers["ratelimit-limit"] = self._deserialize( "int", response.headers.get("ratelimit-limit") @@ -121096,21 +130063,19 @@ def patch_config( deserialized = None if cls: - return cls(pipeline_response, deserialized, response_headers) # type: ignore + return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore - return deserialized # type: ignore + return cast(JSON, deserialized) # type: ignore @distributed_trace - def get_ca(self, database_cluster_uuid: str, **kwargs: Any) -> JSON: + def list_events_logs(self, database_cluster_uuid: str, **kwargs: Any) -> JSON: # pylint: disable=line-too-long - """Retrieve the Public Certificate. + """List all Events Logs. - To retrieve the public certificate used to secure the connection to the database cluster send a - GET request to - ``/v2/databases/$DATABASE_ID/ca``. + To list all of the cluster events, send a GET request to + ``/v2/databases/$DATABASE_ID/events``. - The response will be a JSON object with a ``ca`` key. This will be set to an object - containing the base64 encoding of the public key certificate. + The result will be a JSON object with a ``events`` key. :param database_cluster_uuid: A unique identifier for a database cluster. Required. :type database_cluster_uuid: str @@ -121123,10 +130088,18 @@ def get_ca(self, database_cluster_uuid: str, **kwargs: Any) -> JSON: # response body for status code(s): 200 response == { - "ca": { - "certificate": "str" # base64 encoding of the certificate used to - secure database connections. Required. - } + "events": [ + { + "cluster_name": "str", # Optional. The name of cluster. + "create_time": "str", # Optional. The time of the generation + of a event. + "event_type": "str", # Optional. Type of the event. Known + values are: "cluster_maintenance_perform", "cluster_master_promotion", + "cluster_create", "cluster_update", "cluster_delete", "cluster_poweron", + and "cluster_poweroff". + "id": "str" # Optional. ID of the particular event. + } + ] } # response body for status code(s): 404 response == { @@ -121158,7 +130131,7 @@ def get_ca(self, database_cluster_uuid: str, **kwargs: Any) -> JSON: cls: ClsType[JSON] = kwargs.pop("cls", None) - _request = build_databases_get_ca_request( + _request = build_databases_list_events_logs_request( database_cluster_uuid=database_cluster_uuid, headers=_headers, params=_params, @@ -121219,15 +130192,24 @@ def get_ca(self, database_cluster_uuid: str, **kwargs: Any) -> JSON: return cast(JSON, deserialized) # type: ignore @distributed_trace - def get_migration_status(self, database_cluster_uuid: str, **kwargs: Any) -> JSON: + def get_replica( + self, database_cluster_uuid: str, replica_name: str, **kwargs: Any + ) -> JSON: # pylint: disable=line-too-long - """Retrieve the Status of an Online Migration. + """Retrieve an Existing Read-only Replica. - To retrieve the status of the most recent online migration, send a GET request to - ``/v2/databases/$DATABASE_ID/online-migration``. + To show information about an existing database replica, send a GET request to + ``/v2/databases/$DATABASE_ID/replicas/$REPLICA_NAME``. + + **Note**\\ : Read-only replicas are not supported for Caching or Valkey clusters. + + The response will be a JSON object with a ``replica key``. This will be set to an object + containing the standard database replica attributes. :param database_cluster_uuid: A unique identifier for a database cluster. Required. :type database_cluster_uuid: str + :param replica_name: The name of the database replica. Required. + :type replica_name: str :return: JSON object :rtype: JSON :raises ~azure.core.exceptions.HttpResponseError: @@ -121237,11 +130219,84 @@ def get_migration_status(self, database_cluster_uuid: str, **kwargs: Any) -> JSO # response body for status code(s): 200 response == { - "created_at": "str", # Optional. The time the migration was initiated, in - ISO 8601 format. - "id": "str", # Optional. The ID of the most recent migration. - "status": "str" # Optional. The current status of the migration. Known - values are: "running", "syncing", "canceled", "error", and "done". + "replica": { + "name": "str", # The name to give the read-only replicating. + Required. + "connection": { + "database": "str", # Optional. The name of the default + database. + "host": "str", # Optional. The FQDN pointing to the database + cluster's current primary node. + "password": "str", # Optional. The randomly generated + password for the default user.:code:`
`:code:`
`Requires + ``database:view_credentials`` scope. + "port": 0, # Optional. The port on which the database + cluster is listening. + "ssl": bool, # Optional. A boolean value indicating if the + connection should be made over SSL. + "uri": "str", # Optional. A connection string in the format + accepted by the ``psql`` command. This is provided as a convenience and + should be able to be constructed by the other attributes. + "user": "str" # Optional. The default user for the + database.:code:`
`:code:`
`Requires ``database:view_credentials`` + scope. + }, + "created_at": "2020-02-20 00:00:00", # Optional. A time value given + in ISO8601 combined date and time format that represents when the database + cluster was created. + "do_settings": { + "service_cnames": [ + "str" # Optional. An array of custom CNAMEs for the + database cluster. Each CNAME must be a valid RFC 1123 hostname (e.g., + "db.example.com"). Maximum of 16 CNAMEs allowed, each up to 253 + characters. + ] + }, + "id": "str", # Optional. A unique ID that can be used to identify + and reference a database replica. + "private_connection": { + "database": "str", # Optional. The name of the default + database. + "host": "str", # Optional. The FQDN pointing to the database + cluster's current primary node. + "password": "str", # Optional. The randomly generated + password for the default user.:code:`
`:code:`
`Requires + ``database:view_credentials`` scope. + "port": 0, # Optional. The port on which the database + cluster is listening. + "ssl": bool, # Optional. A boolean value indicating if the + connection should be made over SSL. + "uri": "str", # Optional. A connection string in the format + accepted by the ``psql`` command. This is provided as a convenience and + should be able to be constructed by the other attributes. + "user": "str" # Optional. The default user for the + database.:code:`
`:code:`
`Requires ``database:view_credentials`` + scope. + }, + "private_network_uuid": "str", # Optional. A string specifying the + UUID of the VPC to which the read-only replica will be assigned. If excluded, + the replica will be assigned to your account's default VPC for the region. + :code:`
`:code:`
`Requires ``vpc:read`` scope. + "region": "str", # Optional. A slug identifier for the region where + the read-only replica will be located. If excluded, the replica will be + placed in the same region as the cluster. + "size": "str", # Optional. A slug identifier representing the size + of the node for the read-only replica. The size of the replica must be at + least as large as the node size for the database cluster from which it is + replicating. + "status": "str", # Optional. A string representing the current + status of the database cluster. Known values are: "creating", "online", + "resizing", "migrating", and "forking". + "storage_size_mib": 0, # Optional. Additional storage added to the + cluster, in MiB. If null, no additional storage is added to the cluster, + beyond what is provided as a base amount from the 'size' and any previously + added additional storage. + "tags": [ + "str" # Optional. A flat array of tag names as strings + applied to the read-only replica.:code:`
`:code:`
`Requires + ``tag:read`` scope. + ] + } } # response body for status code(s): 404 response == { @@ -121273,8 +130328,9 @@ def get_migration_status(self, database_cluster_uuid: str, **kwargs: Any) -> JSO cls: ClsType[JSON] = kwargs.pop("cls", None) - _request = build_databases_get_migration_status_request( + _request = build_databases_get_replica_request( database_cluster_uuid=database_cluster_uuid, + replica_name=replica_name, headers=_headers, params=_params, ) @@ -121333,205 +130389,32 @@ def get_migration_status(self, database_cluster_uuid: str, **kwargs: Any) -> JSO return cast(JSON, deserialized) # type: ignore - @overload - def update_online_migration( - self, - database_cluster_uuid: str, - body: JSON, - *, - content_type: str = "application/json", - **kwargs: Any, - ) -> JSON: - # pylint: disable=line-too-long - """Start an Online Migration. - - To start an online migration, send a PUT request to - ``/v2/databases/$DATABASE_ID/online-migration`` endpoint. Migrating a cluster establishes a - connection with an existing cluster and replicates its contents to the target cluster. Online - migration is only available for MySQL, PostgreSQL, Caching, and Valkey clusters. - If the existing database is continuously being written to, the migration process will continue - for up to two weeks unless it is manually stopped. Online migration is only available for - `MySQL - `_\\ - , `PostgreSQL - `_\\ , `Caching - `_\\ , and `Valkey - `_ clusters. - - :param database_cluster_uuid: A unique identifier for a database cluster. Required. - :type database_cluster_uuid: str - :param body: Required. - :type body: JSON - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: JSON object - :rtype: JSON - :raises ~azure.core.exceptions.HttpResponseError: - - Example: - .. code-block:: python - - # JSON input template you can fill out and use as your body input. - body = { - "source": { - "dbname": "str", # Optional. The name of the default database. - "host": "str", # Optional. The FQDN pointing to the database - cluster's current primary node. - "password": "str", # Optional. The randomly generated password for - the default user. - "port": 0, # Optional. The port on which the database cluster is - listening. - "username": "str" # Optional. The default user for the database. - }, - "disable_ssl": bool, # Optional. Enables SSL encryption when connecting to - the source database. - "ignore_dbs": [ - "str" # Optional. List of databases that should be ignored during - migration. - ] - } - - # response body for status code(s): 200 - response == { - "created_at": "str", # Optional. The time the migration was initiated, in - ISO 8601 format. - "id": "str", # Optional. The ID of the most recent migration. - "status": "str" # Optional. The current status of the migration. Known - values are: "running", "syncing", "canceled", "error", and "done". - } - # response body for status code(s): 404 - response == { - "id": "str", # A short identifier corresponding to the HTTP status code - returned. For example, the ID for a response returning a 404 status code would - be "not_found.". Required. - "message": "str", # A message providing additional information about the - error, including details to help resolve it when possible. Required. - "request_id": "str" # Optional. Optionally, some endpoints may include a - request ID that should be provided when reporting bugs or opening support - tickets to help identify the issue. - } - """ - - @overload - def update_online_migration( - self, - database_cluster_uuid: str, - body: IO[bytes], - *, - content_type: str = "application/json", - **kwargs: Any, - ) -> JSON: + @distributed_trace + def destroy_replica( + self, database_cluster_uuid: str, replica_name: str, **kwargs: Any + ) -> Optional[JSON]: # pylint: disable=line-too-long - """Start an Online Migration. - - To start an online migration, send a PUT request to - ``/v2/databases/$DATABASE_ID/online-migration`` endpoint. Migrating a cluster establishes a - connection with an existing cluster and replicates its contents to the target cluster. Online - migration is only available for MySQL, PostgreSQL, Caching, and Valkey clusters. - If the existing database is continuously being written to, the migration process will continue - for up to two weeks unless it is manually stopped. Online migration is only available for - `MySQL - `_\\ - , `PostgreSQL - `_\\ , `Caching - `_\\ , and `Valkey - `_ clusters. - - :param database_cluster_uuid: A unique identifier for a database cluster. Required. - :type database_cluster_uuid: str - :param body: Required. - :type body: IO[bytes] - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str - :return: JSON object - :rtype: JSON - :raises ~azure.core.exceptions.HttpResponseError: - - Example: - .. code-block:: python + """Destroy a Read-only Replica. - # response body for status code(s): 200 - response == { - "created_at": "str", # Optional. The time the migration was initiated, in - ISO 8601 format. - "id": "str", # Optional. The ID of the most recent migration. - "status": "str" # Optional. The current status of the migration. Known - values are: "running", "syncing", "canceled", "error", and "done". - } - # response body for status code(s): 404 - response == { - "id": "str", # A short identifier corresponding to the HTTP status code - returned. For example, the ID for a response returning a 404 status code would - be "not_found.". Required. - "message": "str", # A message providing additional information about the - error, including details to help resolve it when possible. Required. - "request_id": "str" # Optional. Optionally, some endpoints may include a - request ID that should be provided when reporting bugs or opening support - tickets to help identify the issue. - } - """ + To destroy a specific read-only replica, send a DELETE request to + ``/v2/databases/$DATABASE_ID/replicas/$REPLICA_NAME``. - @distributed_trace - def update_online_migration( - self, database_cluster_uuid: str, body: Union[JSON, IO[bytes]], **kwargs: Any - ) -> JSON: - # pylint: disable=line-too-long - """Start an Online Migration. + **Note**\\ : Read-only replicas are not supported for Caching or Valkey clusters. - To start an online migration, send a PUT request to - ``/v2/databases/$DATABASE_ID/online-migration`` endpoint. Migrating a cluster establishes a - connection with an existing cluster and replicates its contents to the target cluster. Online - migration is only available for MySQL, PostgreSQL, Caching, and Valkey clusters. - If the existing database is continuously being written to, the migration process will continue - for up to two weeks unless it is manually stopped. Online migration is only available for - `MySQL - `_\\ - , `PostgreSQL - `_\\ , `Caching - `_\\ , and `Valkey - `_ clusters. + A status of 204 will be given. This indicates that the request was processed successfully, but + that no response body is needed. :param database_cluster_uuid: A unique identifier for a database cluster. Required. :type database_cluster_uuid: str - :param body: Is either a JSON type or a IO[bytes] type. Required. - :type body: JSON or IO[bytes] - :return: JSON object - :rtype: JSON + :param replica_name: The name of the database replica. Required. + :type replica_name: str + :return: JSON object or None + :rtype: JSON or None :raises ~azure.core.exceptions.HttpResponseError: Example: .. code-block:: python - # JSON input template you can fill out and use as your body input. - body = { - "source": { - "dbname": "str", # Optional. The name of the default database. - "host": "str", # Optional. The FQDN pointing to the database - cluster's current primary node. - "password": "str", # Optional. The randomly generated password for - the default user. - "port": 0, # Optional. The port on which the database cluster is - listening. - "username": "str" # Optional. The default user for the database. - }, - "disable_ssl": bool, # Optional. Enables SSL encryption when connecting to - the source database. - "ignore_dbs": [ - "str" # Optional. List of databases that should be ignored during - migration. - ] - } - - # response body for status code(s): 200 - response == { - "created_at": "str", # Optional. The time the migration was initiated, in - ISO 8601 format. - "id": "str", # Optional. The ID of the most recent migration. - "status": "str" # Optional. The current status of the migration. Known - values are: "running", "syncing", "canceled", "error", and "done". - } # response body for status code(s): 404 response == { "id": "str", # A short identifier corresponding to the HTTP status code @@ -121557,27 +130440,14 @@ def update_online_migration( } error_map.update(kwargs.pop("error_map", {}) or {}) - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _headers = kwargs.pop("headers", {}) or {} _params = kwargs.pop("params", {}) or {} - content_type: Optional[str] = kwargs.pop( - "content_type", _headers.pop("Content-Type", None) - ) - cls: ClsType[JSON] = kwargs.pop("cls", None) - - content_type = content_type or "application/json" - _json = None - _content = None - if isinstance(body, (IOBase, bytes)): - _content = body - else: - _json = body + cls: ClsType[Optional[JSON]] = kwargs.pop("cls", None) - _request = build_databases_update_online_migration_request( + _request = build_databases_destroy_replica_request( database_cluster_uuid=database_cluster_uuid, - content_type=content_type, - json=_json, - content=_content, + replica_name=replica_name, headers=_headers, params=_params, ) @@ -121592,14 +130462,15 @@ def update_online_migration( response = pipeline_response.http_response - if response.status_code not in [200, 404]: + if response.status_code not in [204, 404]: if _stream: response.read() # Load the body in memory and close the socket map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore raise HttpResponseError(response=response) + deserialized = None response_headers = {} - if response.status_code == 200: + if response.status_code == 204: response_headers["ratelimit-limit"] = self._deserialize( "int", response.headers.get("ratelimit-limit") ) @@ -121610,11 +130481,6 @@ def update_online_migration( "int", response.headers.get("ratelimit-reset") ) - if response.content: - deserialized = response.json() - else: - deserialized = None - if response.status_code == 404: response_headers["ratelimit-limit"] = self._deserialize( "int", response.headers.get("ratelimit-limit") @@ -121632,27 +130498,29 @@ def update_online_migration( deserialized = None if cls: - return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore + return cls(pipeline_response, deserialized, response_headers) # type: ignore - return cast(JSON, deserialized) # type: ignore + return deserialized # type: ignore @distributed_trace - def delete_online_migration( - self, database_cluster_uuid: str, migration_id: str, **kwargs: Any + def promote_replica( + self, database_cluster_uuid: str, replica_name: str, **kwargs: Any ) -> Optional[JSON]: # pylint: disable=line-too-long - """Stop an Online Migration. + """Promote a Read-only Replica to become a Primary Cluster. - To stop an online migration, send a DELETE request to - ``/v2/databases/$DATABASE_ID/online-migration/$MIGRATION_ID``. + To promote a specific read-only replica, send a PUT request to + ``/v2/databases/$DATABASE_ID/replicas/$REPLICA_NAME/promote``. + + **Note**\\ : Read-only replicas are not supported for Caching or Valkey clusters. A status of 204 will be given. This indicates that the request was processed successfully, but that no response body is needed. :param database_cluster_uuid: A unique identifier for a database cluster. Required. :type database_cluster_uuid: str - :param migration_id: A unique identifier assigned to the online migration. Required. - :type migration_id: str + :param replica_name: The name of the database replica. Required. + :type replica_name: str :return: JSON object or None :rtype: JSON or None :raises ~azure.core.exceptions.HttpResponseError: @@ -121690,9 +130558,9 @@ def delete_online_migration( cls: ClsType[Optional[JSON]] = kwargs.pop("cls", None) - _request = build_databases_delete_online_migration_request( + _request = build_databases_promote_replica_request( database_cluster_uuid=database_cluster_uuid, - migration_id=migration_id, + replica_name=replica_name, headers=_headers, params=_params, ) @@ -121747,141 +130615,119 @@ def delete_online_migration( return deserialized # type: ignore - @overload - def update_region( - self, - database_cluster_uuid: str, - body: JSON, - *, - content_type: str = "application/json", - **kwargs: Any, - ) -> Optional[JSON]: + @distributed_trace + def list_users(self, database_cluster_uuid: str, **kwargs: Any) -> JSON: # pylint: disable=line-too-long - """Migrate a Database Cluster to a New Region. - - To migrate a database cluster to a new region, send a ``PUT`` request to - ``/v2/databases/$DATABASE_ID/migrate``. The body of the request must specify a - ``region`` attribute. - - A successful request will receive a 202 Accepted status code with no body in - response. Querying the database cluster will show that its ``status`` attribute - will now be set to ``migrating``. This will transition back to ``online`` when the - migration has completed. - - :param database_cluster_uuid: A unique identifier for a database cluster. Required. - :type database_cluster_uuid: str - :param body: Required. - :type body: JSON - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: JSON object or None - :rtype: JSON or None - :raises ~azure.core.exceptions.HttpResponseError: - - Example: - .. code-block:: python + """List all Database Users. - # JSON input template you can fill out and use as your body input. - body = { - "region": "str" # A slug identifier for the region to which the database - cluster will be migrated. Required. - } + To list all of the users for your database cluster, send a GET request to + ``/v2/databases/$DATABASE_ID/users``. - # response body for status code(s): 404 - response == { - "id": "str", # A short identifier corresponding to the HTTP status code - returned. For example, the ID for a response returning a 404 status code would - be "not_found.". Required. - "message": "str", # A message providing additional information about the - error, including details to help resolve it when possible. Required. - "request_id": "str" # Optional. Optionally, some endpoints may include a - request ID that should be provided when reporting bugs or opening support - tickets to help identify the issue. - } - """ + Note: User management is not supported for Caching or Valkey clusters. - @overload - def update_region( - self, - database_cluster_uuid: str, - body: IO[bytes], - *, - content_type: str = "application/json", - **kwargs: Any, - ) -> Optional[JSON]: - # pylint: disable=line-too-long - """Migrate a Database Cluster to a New Region. + The result will be a JSON object with a ``users`` key. This will be set to an array + of database user objects, each of which will contain the standard database user attributes. + User passwords will not show without the ``database:view_credentials`` scope. - To migrate a database cluster to a new region, send a ``PUT`` request to - ``/v2/databases/$DATABASE_ID/migrate``. The body of the request must specify a - ``region`` attribute. + For MySQL clusters, additional options will be contained in the mysql_settings object. - A successful request will receive a 202 Accepted status code with no body in - response. Querying the database cluster will show that its ``status`` attribute - will now be set to ``migrating``. This will transition back to ``online`` when the - migration has completed. + For MongoDB clusters, additional information will be contained in the mongo_user_settings + object. :param database_cluster_uuid: A unique identifier for a database cluster. Required. :type database_cluster_uuid: str - :param body: Required. - :type body: IO[bytes] - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str - :return: JSON object or None - :rtype: JSON or None + :return: JSON object + :rtype: JSON :raises ~azure.core.exceptions.HttpResponseError: Example: .. code-block:: python - # response body for status code(s): 404 + # response body for status code(s): 200 response == { - "id": "str", # A short identifier corresponding to the HTTP status code - returned. For example, the ID for a response returning a 404 status code would - be "not_found.". Required. - "message": "str", # A message providing additional information about the - error, including details to help resolve it when possible. Required. - "request_id": "str" # Optional. Optionally, some endpoints may include a - request ID that should be provided when reporting bugs or opening support - tickets to help identify the issue. - } - """ - - @distributed_trace - def update_region( - self, database_cluster_uuid: str, body: Union[JSON, IO[bytes]], **kwargs: Any - ) -> Optional[JSON]: - # pylint: disable=line-too-long - """Migrate a Database Cluster to a New Region. - - To migrate a database cluster to a new region, send a ``PUT`` request to - ``/v2/databases/$DATABASE_ID/migrate``. The body of the request must specify a - ``region`` attribute. - - A successful request will receive a 202 Accepted status code with no body in - response. Querying the database cluster will show that its ``status`` attribute - will now be set to ``migrating``. This will transition back to ``online`` when the - migration has completed. - - :param database_cluster_uuid: A unique identifier for a database cluster. Required. - :type database_cluster_uuid: str - :param body: Is either a JSON type or a IO[bytes] type. Required. - :type body: JSON or IO[bytes] - :return: JSON object or None - :rtype: JSON or None - :raises ~azure.core.exceptions.HttpResponseError: - - Example: - .. code-block:: python - - # JSON input template you can fill out and use as your body input. - body = { - "region": "str" # A slug identifier for the region to which the database - cluster will be migrated. Required. + "users": [ + { + "name": "str", # The name of a database user. Required. + "access_cert": "str", # Optional. Access certificate for TLS + client authentication. (Kafka only). + "access_key": "str", # Optional. Access key for TLS client + authentication. (Kafka only). + "mysql_settings": { + "auth_plugin": "str" # A string specifying the + authentication method to be used for connections to the MySQL user + account. The valid values are ``mysql_native_password`` or + ``caching_sha2_password``. If excluded when creating a new user, the + default for the version of MySQL in use will be used. As of MySQL + 8.0, the default is ``caching_sha2_password``. Required. Known values + are: "mysql_native_password" and "caching_sha2_password". + }, + "password": "str", # Optional. A randomly generated password + for the database user.:code:`
`Requires ``database:view_credentials`` + scope. + "role": "str", # Optional. A string representing the + database user's role. The value will be either "primary" or "normal". + Known values are: "primary" and "normal". + "settings": { + "acl": [ + { + "permission": "str", # Permission + set applied to the ACL. 'consume' allows for messages to be + consumed from the topic. 'produce' allows for messages to be + published to the topic. 'produceconsume' allows for both + 'consume' and 'produce' permission. 'admin' allows for + 'produceconsume' as well as any operations to administer the + topic (delete, update). Required. Known values are: "admin", + "consume", "produce", and "produceconsume". + "topic": "str", # A regex for + matching the topic(s) that this ACL should apply to. + Required. + "id": "str" # Optional. An + identifier for the ACL. Will be computed after the ACL is + created/updated. + } + ], + "mongo_user_settings": { + "databases": [ + "str" # Optional. A list of + databases to which the user should have access. When the + database is set to ``admin``"" , the user will have access to + all databases based on the user's role i.e. a user with the + role ``readOnly`` assigned to the ``admin`` database will + have read access to all databases. + ], + "role": "str" # Optional. The role to assign + to the user with each role mapping to a MongoDB built-in role. + ``readOnly`` maps to a `read + `_ + role. ``readWrite`` maps to a `readWrite + `_ + role. ``dbAdmin`` maps to a `dbAdmin + `_ + role. Known values are: "readOnly", "readWrite", and "dbAdmin". + }, + "opensearch_acl": [ + { + "index": "str", # Optional. A regex + for matching the indexes that this ACL should apply to. + "permission": "str" # Optional. + Permission set applied to the ACL. 'read' allows user to read + from the index. 'write' allows for user to write to the + index. 'readwrite' allows for both 'read' and 'write' + permission. 'deny'(default) restricts user from performing + any operation over an index. 'admin' allows for 'readwrite' + as well as any operations to administer the index. Known + values are: "deny", "admin", "read", "readwrite", and + "write". + } + ], + "pg_allow_replication": bool # Optional. For + Postgres clusters, set to ``true`` for a user with replication + rights. This option is not currently supported for other database + engines. + } + } + ] } - # response body for status code(s): 404 response == { "id": "str", # A short identifier corresponding to the HTTP status code @@ -121907,27 +130753,13 @@ def update_region( } error_map.update(kwargs.pop("error_map", {}) or {}) - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _headers = kwargs.pop("headers", {}) or {} _params = kwargs.pop("params", {}) or {} - content_type: Optional[str] = kwargs.pop( - "content_type", _headers.pop("Content-Type", None) - ) - cls: ClsType[Optional[JSON]] = kwargs.pop("cls", None) - - content_type = content_type or "application/json" - _json = None - _content = None - if isinstance(body, (IOBase, bytes)): - _content = body - else: - _json = body + cls: ClsType[JSON] = kwargs.pop("cls", None) - _request = build_databases_update_region_request( + _request = build_databases_list_users_request( database_cluster_uuid=database_cluster_uuid, - content_type=content_type, - json=_json, - content=_content, headers=_headers, params=_params, ) @@ -121942,15 +130774,14 @@ def update_region( response = pipeline_response.http_response - if response.status_code not in [202, 404]: + if response.status_code not in [200, 404]: if _stream: response.read() # Load the body in memory and close the socket map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore raise HttpResponseError(response=response) - deserialized = None response_headers = {} - if response.status_code == 202: + if response.status_code == 200: response_headers["ratelimit-limit"] = self._deserialize( "int", response.headers.get("ratelimit-limit") ) @@ -121961,6 +130792,11 @@ def update_region( "int", response.headers.get("ratelimit-reset") ) + if response.content: + deserialized = response.json() + else: + deserialized = None + if response.status_code == 404: response_headers["ratelimit-limit"] = self._deserialize( "int", response.headers.get("ratelimit-limit") @@ -121978,27 +130814,39 @@ def update_region( deserialized = None if cls: - return cls(pipeline_response, deserialized, response_headers) # type: ignore + return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore - return deserialized # type: ignore + return cast(JSON, deserialized) # type: ignore @overload - def update_cluster_size( + def add_user( self, database_cluster_uuid: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any, - ) -> Optional[JSON]: + ) -> JSON: # pylint: disable=line-too-long - """Resize a Database Cluster. + """Add a Database User. - To resize a database cluster, send a PUT request to ``/v2/databases/$DATABASE_ID/resize``. The - body of the request must specify both the size and num_nodes attributes. - A successful request will receive a 202 Accepted status code with no body in response. Querying - the database cluster will show that its status attribute will now be set to resizing. This will - transition back to online when the resize operation has completed. + To add a new database user, send a POST request to ``/v2/databases/$DATABASE_ID/users`` + with the desired username. + + Note: User management is not supported for Caching or Valkey clusters. + + When adding a user to a MySQL cluster, additional options can be configured in the + ``mysql_settings`` object. + + When adding a user to a Kafka cluster, additional options can be configured in + the ``settings`` object. + + When adding a user to a MongoDB cluster, additional options can be configured in + the ``settings.mongo_user_settings`` object. + + The response will be a JSON object with a key called ``user``. The value of this will be an + object that contains the standard attributes associated with a database user including + its randomly generated password. :param database_cluster_uuid: A unique identifier for a database cluster. Required. :type database_cluster_uuid: str @@ -122007,8 +130855,8 @@ def update_cluster_size( :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str - :return: JSON object or None - :rtype: JSON or None + :return: JSON object + :rtype: JSON :raises ~azure.core.exceptions.HttpResponseError: Example: @@ -122016,17 +130864,158 @@ def update_cluster_size( # JSON input template you can fill out and use as your body input. body = { - "num_nodes": 0, # The number of nodes in the database cluster. Valid values - are are 1-3. In addition to the primary node, up to two standby nodes may be - added for highly available configurations. Required. - "size": "str", # A slug identifier representing desired the size of the - nodes in the database cluster. Required. - "storage_size_mib": 0 # Optional. Additional storage added to the cluster, - in MiB. If null, no additional storage is added to the cluster, beyond what is - provided as a base amount from the 'size' and any previously added additional - storage. + "name": "str", # The name of a database user. Required. + "access_cert": "str", # Optional. Access certificate for TLS client + authentication. (Kafka only). + "access_key": "str", # Optional. Access key for TLS client authentication. + (Kafka only). + "mysql_settings": { + "auth_plugin": "str" # A string specifying the authentication method + to be used for connections to the MySQL user account. The valid values are + ``mysql_native_password`` or ``caching_sha2_password``. If excluded when + creating a new user, the default for the version of MySQL in use will be + used. As of MySQL 8.0, the default is ``caching_sha2_password``. Required. + Known values are: "mysql_native_password" and "caching_sha2_password". + }, + "password": "str", # Optional. A randomly generated password for the + database user.:code:`
`Requires ``database:view_credentials`` scope. + "readonly": bool, # Optional. (To be deprecated: use + settings.mongo_user_settings.role instead for access controls to MongoDB + databases). For MongoDB clusters, set to ``true`` to create a read-only user. + This option is not currently supported for other database engines. + "role": "str", # Optional. A string representing the database user's role. + The value will be either "primary" or "normal". Known values are: "primary" and + "normal". + "settings": { + "acl": [ + { + "permission": "str", # Permission set applied to the + ACL. 'consume' allows for messages to be consumed from the topic. + 'produce' allows for messages to be published to the topic. + 'produceconsume' allows for both 'consume' and 'produce' permission. + 'admin' allows for 'produceconsume' as well as any operations to + administer the topic (delete, update). Required. Known values are: + "admin", "consume", "produce", and "produceconsume". + "topic": "str", # A regex for matching the topic(s) + that this ACL should apply to. Required. + "id": "str" # Optional. An identifier for the ACL. + Will be computed after the ACL is created/updated. + } + ], + "mongo_user_settings": { + "databases": [ + "str" # Optional. A list of databases to which the + user should have access. When the database is set to ``admin``"" , + the user will have access to all databases based on the user's role + i.e. a user with the role ``readOnly`` assigned to the ``admin`` + database will have read access to all databases. + ], + "role": "str" # Optional. The role to assign to the user + with each role mapping to a MongoDB built-in role. ``readOnly`` maps to + a `read + `_ + role. ``readWrite`` maps to a `readWrite + `_ + role. ``dbAdmin`` maps to a `dbAdmin + `_ + role. Known values are: "readOnly", "readWrite", and "dbAdmin". + }, + "opensearch_acl": [ + { + "index": "str", # Optional. A regex for matching the + indexes that this ACL should apply to. + "permission": "str" # Optional. Permission set + applied to the ACL. 'read' allows user to read from the index. + 'write' allows for user to write to the index. 'readwrite' allows for + both 'read' and 'write' permission. 'deny'(default) restricts user + from performing any operation over an index. 'admin' allows for + 'readwrite' as well as any operations to administer the index. Known + values are: "deny", "admin", "read", "readwrite", and "write". + } + ], + "pg_allow_replication": bool # Optional. For Postgres clusters, set + to ``true`` for a user with replication rights. This option is not currently + supported for other database engines. + } } + # response body for status code(s): 201 + response == { + "user": { + "name": "str", # The name of a database user. Required. + "access_cert": "str", # Optional. Access certificate for TLS client + authentication. (Kafka only). + "access_key": "str", # Optional. Access key for TLS client + authentication. (Kafka only). + "mysql_settings": { + "auth_plugin": "str" # A string specifying the + authentication method to be used for connections to the MySQL user + account. The valid values are ``mysql_native_password`` or + ``caching_sha2_password``. If excluded when creating a new user, the + default for the version of MySQL in use will be used. As of MySQL 8.0, + the default is ``caching_sha2_password``. Required. Known values are: + "mysql_native_password" and "caching_sha2_password". + }, + "password": "str", # Optional. A randomly generated password for the + database user.:code:`
`Requires ``database:view_credentials`` scope. + "role": "str", # Optional. A string representing the database user's + role. The value will be either "primary" or "normal". Known values are: + "primary" and "normal". + "settings": { + "acl": [ + { + "permission": "str", # Permission set + applied to the ACL. 'consume' allows for messages to be consumed + from the topic. 'produce' allows for messages to be published to + the topic. 'produceconsume' allows for both 'consume' and + 'produce' permission. 'admin' allows for 'produceconsume' as well + as any operations to administer the topic (delete, update). + Required. Known values are: "admin", "consume", "produce", and + "produceconsume". + "topic": "str", # A regex for matching the + topic(s) that this ACL should apply to. Required. + "id": "str" # Optional. An identifier for + the ACL. Will be computed after the ACL is created/updated. + } + ], + "mongo_user_settings": { + "databases": [ + "str" # Optional. A list of databases to + which the user should have access. When the database is set to + ``admin``"" , the user will have access to all databases based on + the user's role i.e. a user with the role ``readOnly`` assigned + to the ``admin`` database will have read access to all databases. + ], + "role": "str" # Optional. The role to assign to the + user with each role mapping to a MongoDB built-in role. ``readOnly`` + maps to a `read + `_ + role. ``readWrite`` maps to a `readWrite + `_ + role. ``dbAdmin`` maps to a `dbAdmin + `_ + role. Known values are: "readOnly", "readWrite", and "dbAdmin". + }, + "opensearch_acl": [ + { + "index": "str", # Optional. A regex for + matching the indexes that this ACL should apply to. + "permission": "str" # Optional. Permission + set applied to the ACL. 'read' allows user to read from the + index. 'write' allows for user to write to the index. 'readwrite' + allows for both 'read' and 'write' permission. 'deny'(default) + restricts user from performing any operation over an index. + 'admin' allows for 'readwrite' as well as any operations to + administer the index. Known values are: "deny", "admin", "read", + "readwrite", and "write". + } + ], + "pg_allow_replication": bool # Optional. For Postgres + clusters, set to ``true`` for a user with replication rights. This option + is not currently supported for other database engines. + } + } + } # response body for status code(s): 404 response == { "id": "str", # A short identifier corresponding to the HTTP status code @@ -122041,22 +131030,34 @@ def update_cluster_size( """ @overload - def update_cluster_size( + def add_user( self, database_cluster_uuid: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any, - ) -> Optional[JSON]: + ) -> JSON: # pylint: disable=line-too-long - """Resize a Database Cluster. + """Add a Database User. - To resize a database cluster, send a PUT request to ``/v2/databases/$DATABASE_ID/resize``. The - body of the request must specify both the size and num_nodes attributes. - A successful request will receive a 202 Accepted status code with no body in response. Querying - the database cluster will show that its status attribute will now be set to resizing. This will - transition back to online when the resize operation has completed. + To add a new database user, send a POST request to ``/v2/databases/$DATABASE_ID/users`` + with the desired username. + + Note: User management is not supported for Caching or Valkey clusters. + + When adding a user to a MySQL cluster, additional options can be configured in the + ``mysql_settings`` object. + + When adding a user to a Kafka cluster, additional options can be configured in + the ``settings`` object. + + When adding a user to a MongoDB cluster, additional options can be configured in + the ``settings.mongo_user_settings`` object. + + The response will be a JSON object with a key called ``user``. The value of this will be an + object that contains the standard attributes associated with a database user including + its randomly generated password. :param database_cluster_uuid: A unique identifier for a database cluster. Required. :type database_cluster_uuid: str @@ -122065,13 +131066,90 @@ def update_cluster_size( :keyword content_type: Body Parameter content-type. Content type parameter for binary body. Default value is "application/json". :paramtype content_type: str - :return: JSON object or None - :rtype: JSON or None + :return: JSON object + :rtype: JSON :raises ~azure.core.exceptions.HttpResponseError: Example: .. code-block:: python + # response body for status code(s): 201 + response == { + "user": { + "name": "str", # The name of a database user. Required. + "access_cert": "str", # Optional. Access certificate for TLS client + authentication. (Kafka only). + "access_key": "str", # Optional. Access key for TLS client + authentication. (Kafka only). + "mysql_settings": { + "auth_plugin": "str" # A string specifying the + authentication method to be used for connections to the MySQL user + account. The valid values are ``mysql_native_password`` or + ``caching_sha2_password``. If excluded when creating a new user, the + default for the version of MySQL in use will be used. As of MySQL 8.0, + the default is ``caching_sha2_password``. Required. Known values are: + "mysql_native_password" and "caching_sha2_password". + }, + "password": "str", # Optional. A randomly generated password for the + database user.:code:`
`Requires ``database:view_credentials`` scope. + "role": "str", # Optional. A string representing the database user's + role. The value will be either "primary" or "normal". Known values are: + "primary" and "normal". + "settings": { + "acl": [ + { + "permission": "str", # Permission set + applied to the ACL. 'consume' allows for messages to be consumed + from the topic. 'produce' allows for messages to be published to + the topic. 'produceconsume' allows for both 'consume' and + 'produce' permission. 'admin' allows for 'produceconsume' as well + as any operations to administer the topic (delete, update). + Required. Known values are: "admin", "consume", "produce", and + "produceconsume". + "topic": "str", # A regex for matching the + topic(s) that this ACL should apply to. Required. + "id": "str" # Optional. An identifier for + the ACL. Will be computed after the ACL is created/updated. + } + ], + "mongo_user_settings": { + "databases": [ + "str" # Optional. A list of databases to + which the user should have access. When the database is set to + ``admin``"" , the user will have access to all databases based on + the user's role i.e. a user with the role ``readOnly`` assigned + to the ``admin`` database will have read access to all databases. + ], + "role": "str" # Optional. The role to assign to the + user with each role mapping to a MongoDB built-in role. ``readOnly`` + maps to a `read + `_ + role. ``readWrite`` maps to a `readWrite + `_ + role. ``dbAdmin`` maps to a `dbAdmin + `_ + role. Known values are: "readOnly", "readWrite", and "dbAdmin". + }, + "opensearch_acl": [ + { + "index": "str", # Optional. A regex for + matching the indexes that this ACL should apply to. + "permission": "str" # Optional. Permission + set applied to the ACL. 'read' allows user to read from the + index. 'write' allows for user to write to the index. 'readwrite' + allows for both 'read' and 'write' permission. 'deny'(default) + restricts user from performing any operation over an index. + 'admin' allows for 'readwrite' as well as any operations to + administer the index. Known values are: "deny", "admin", "read", + "readwrite", and "write". + } + ], + "pg_allow_replication": bool # Optional. For Postgres + clusters, set to ``true`` for a user with replication rights. This option + is not currently supported for other database engines. + } + } + } # response body for status code(s): 404 response == { "id": "str", # A short identifier corresponding to the HTTP status code @@ -122086,24 +131164,36 @@ def update_cluster_size( """ @distributed_trace - def update_cluster_size( + def add_user( self, database_cluster_uuid: str, body: Union[JSON, IO[bytes]], **kwargs: Any - ) -> Optional[JSON]: + ) -> JSON: # pylint: disable=line-too-long - """Resize a Database Cluster. + """Add a Database User. - To resize a database cluster, send a PUT request to ``/v2/databases/$DATABASE_ID/resize``. The - body of the request must specify both the size and num_nodes attributes. - A successful request will receive a 202 Accepted status code with no body in response. Querying - the database cluster will show that its status attribute will now be set to resizing. This will - transition back to online when the resize operation has completed. + To add a new database user, send a POST request to ``/v2/databases/$DATABASE_ID/users`` + with the desired username. + + Note: User management is not supported for Caching or Valkey clusters. + + When adding a user to a MySQL cluster, additional options can be configured in the + ``mysql_settings`` object. + + When adding a user to a Kafka cluster, additional options can be configured in + the ``settings`` object. + + When adding a user to a MongoDB cluster, additional options can be configured in + the ``settings.mongo_user_settings`` object. + + The response will be a JSON object with a key called ``user``. The value of this will be an + object that contains the standard attributes associated with a database user including + its randomly generated password. :param database_cluster_uuid: A unique identifier for a database cluster. Required. :type database_cluster_uuid: str :param body: Is either a JSON type or a IO[bytes] type. Required. :type body: JSON or IO[bytes] - :return: JSON object or None - :rtype: JSON or None + :return: JSON object + :rtype: JSON :raises ~azure.core.exceptions.HttpResponseError: Example: @@ -122111,17 +131201,158 @@ def update_cluster_size( # JSON input template you can fill out and use as your body input. body = { - "num_nodes": 0, # The number of nodes in the database cluster. Valid values - are are 1-3. In addition to the primary node, up to two standby nodes may be - added for highly available configurations. Required. - "size": "str", # A slug identifier representing desired the size of the - nodes in the database cluster. Required. - "storage_size_mib": 0 # Optional. Additional storage added to the cluster, - in MiB. If null, no additional storage is added to the cluster, beyond what is - provided as a base amount from the 'size' and any previously added additional - storage. + "name": "str", # The name of a database user. Required. + "access_cert": "str", # Optional. Access certificate for TLS client + authentication. (Kafka only). + "access_key": "str", # Optional. Access key for TLS client authentication. + (Kafka only). + "mysql_settings": { + "auth_plugin": "str" # A string specifying the authentication method + to be used for connections to the MySQL user account. The valid values are + ``mysql_native_password`` or ``caching_sha2_password``. If excluded when + creating a new user, the default for the version of MySQL in use will be + used. As of MySQL 8.0, the default is ``caching_sha2_password``. Required. + Known values are: "mysql_native_password" and "caching_sha2_password". + }, + "password": "str", # Optional. A randomly generated password for the + database user.:code:`
`Requires ``database:view_credentials`` scope. + "readonly": bool, # Optional. (To be deprecated: use + settings.mongo_user_settings.role instead for access controls to MongoDB + databases). For MongoDB clusters, set to ``true`` to create a read-only user. + This option is not currently supported for other database engines. + "role": "str", # Optional. A string representing the database user's role. + The value will be either "primary" or "normal". Known values are: "primary" and + "normal". + "settings": { + "acl": [ + { + "permission": "str", # Permission set applied to the + ACL. 'consume' allows for messages to be consumed from the topic. + 'produce' allows for messages to be published to the topic. + 'produceconsume' allows for both 'consume' and 'produce' permission. + 'admin' allows for 'produceconsume' as well as any operations to + administer the topic (delete, update). Required. Known values are: + "admin", "consume", "produce", and "produceconsume". + "topic": "str", # A regex for matching the topic(s) + that this ACL should apply to. Required. + "id": "str" # Optional. An identifier for the ACL. + Will be computed after the ACL is created/updated. + } + ], + "mongo_user_settings": { + "databases": [ + "str" # Optional. A list of databases to which the + user should have access. When the database is set to ``admin``"" , + the user will have access to all databases based on the user's role + i.e. a user with the role ``readOnly`` assigned to the ``admin`` + database will have read access to all databases. + ], + "role": "str" # Optional. The role to assign to the user + with each role mapping to a MongoDB built-in role. ``readOnly`` maps to + a `read + `_ + role. ``readWrite`` maps to a `readWrite + `_ + role. ``dbAdmin`` maps to a `dbAdmin + `_ + role. Known values are: "readOnly", "readWrite", and "dbAdmin". + }, + "opensearch_acl": [ + { + "index": "str", # Optional. A regex for matching the + indexes that this ACL should apply to. + "permission": "str" # Optional. Permission set + applied to the ACL. 'read' allows user to read from the index. + 'write' allows for user to write to the index. 'readwrite' allows for + both 'read' and 'write' permission. 'deny'(default) restricts user + from performing any operation over an index. 'admin' allows for + 'readwrite' as well as any operations to administer the index. Known + values are: "deny", "admin", "read", "readwrite", and "write". + } + ], + "pg_allow_replication": bool # Optional. For Postgres clusters, set + to ``true`` for a user with replication rights. This option is not currently + supported for other database engines. + } } + # response body for status code(s): 201 + response == { + "user": { + "name": "str", # The name of a database user. Required. + "access_cert": "str", # Optional. Access certificate for TLS client + authentication. (Kafka only). + "access_key": "str", # Optional. Access key for TLS client + authentication. (Kafka only). + "mysql_settings": { + "auth_plugin": "str" # A string specifying the + authentication method to be used for connections to the MySQL user + account. The valid values are ``mysql_native_password`` or + ``caching_sha2_password``. If excluded when creating a new user, the + default for the version of MySQL in use will be used. As of MySQL 8.0, + the default is ``caching_sha2_password``. Required. Known values are: + "mysql_native_password" and "caching_sha2_password". + }, + "password": "str", # Optional. A randomly generated password for the + database user.:code:`
`Requires ``database:view_credentials`` scope. + "role": "str", # Optional. A string representing the database user's + role. The value will be either "primary" or "normal". Known values are: + "primary" and "normal". + "settings": { + "acl": [ + { + "permission": "str", # Permission set + applied to the ACL. 'consume' allows for messages to be consumed + from the topic. 'produce' allows for messages to be published to + the topic. 'produceconsume' allows for both 'consume' and + 'produce' permission. 'admin' allows for 'produceconsume' as well + as any operations to administer the topic (delete, update). + Required. Known values are: "admin", "consume", "produce", and + "produceconsume". + "topic": "str", # A regex for matching the + topic(s) that this ACL should apply to. Required. + "id": "str" # Optional. An identifier for + the ACL. Will be computed after the ACL is created/updated. + } + ], + "mongo_user_settings": { + "databases": [ + "str" # Optional. A list of databases to + which the user should have access. When the database is set to + ``admin``"" , the user will have access to all databases based on + the user's role i.e. a user with the role ``readOnly`` assigned + to the ``admin`` database will have read access to all databases. + ], + "role": "str" # Optional. The role to assign to the + user with each role mapping to a MongoDB built-in role. ``readOnly`` + maps to a `read + `_ + role. ``readWrite`` maps to a `readWrite + `_ + role. ``dbAdmin`` maps to a `dbAdmin + `_ + role. Known values are: "readOnly", "readWrite", and "dbAdmin". + }, + "opensearch_acl": [ + { + "index": "str", # Optional. A regex for + matching the indexes that this ACL should apply to. + "permission": "str" # Optional. Permission + set applied to the ACL. 'read' allows user to read from the + index. 'write' allows for user to write to the index. 'readwrite' + allows for both 'read' and 'write' permission. 'deny'(default) + restricts user from performing any operation over an index. + 'admin' allows for 'readwrite' as well as any operations to + administer the index. Known values are: "deny", "admin", "read", + "readwrite", and "write". + } + ], + "pg_allow_replication": bool # Optional. For Postgres + clusters, set to ``true`` for a user with replication rights. This option + is not currently supported for other database engines. + } + } + } # response body for status code(s): 404 response == { "id": "str", # A short identifier corresponding to the HTTP status code @@ -122153,7 +131384,7 @@ def update_cluster_size( content_type: Optional[str] = kwargs.pop( "content_type", _headers.pop("Content-Type", None) ) - cls: ClsType[Optional[JSON]] = kwargs.pop("cls", None) + cls: ClsType[JSON] = kwargs.pop("cls", None) content_type = content_type or "application/json" _json = None @@ -122163,7 +131394,7 @@ def update_cluster_size( else: _json = body - _request = build_databases_update_cluster_size_request( + _request = build_databases_add_user_request( database_cluster_uuid=database_cluster_uuid, content_type=content_type, json=_json, @@ -122182,15 +131413,14 @@ def update_cluster_size( response = pipeline_response.http_response - if response.status_code not in [202, 404]: + if response.status_code not in [201, 404]: if _stream: response.read() # Load the body in memory and close the socket map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore raise HttpResponseError(response=response) - deserialized = None response_headers = {} - if response.status_code == 202: + if response.status_code == 201: response_headers["ratelimit-limit"] = self._deserialize( "int", response.headers.get("ratelimit-limit") ) @@ -122201,6 +131431,11 @@ def update_cluster_size( "int", response.headers.get("ratelimit-reset") ) + if response.content: + deserialized = response.json() + else: + deserialized = None + if response.status_code == 404: response_headers["ratelimit-limit"] = self._deserialize( "int", response.headers.get("ratelimit-limit") @@ -122218,21 +131453,38 @@ def update_cluster_size( deserialized = None if cls: - return cls(pipeline_response, deserialized, response_headers) # type: ignore + return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore - return deserialized # type: ignore + return cast(JSON, deserialized) # type: ignore @distributed_trace - def list_firewall_rules(self, database_cluster_uuid: str, **kwargs: Any) -> JSON: + def get_user( + self, database_cluster_uuid: str, username: str, **kwargs: Any + ) -> JSON: # pylint: disable=line-too-long - """List Firewall Rules (Trusted Sources) for a Database Cluster. + """Retrieve an Existing Database User. - To list all of a database cluster's firewall rules (known as "trusted sources" in the control - panel), send a GET request to ``/v2/databases/$DATABASE_ID/firewall``. - The result will be a JSON object with a ``rules`` key. + To show information about an existing database user, send a GET request to + ``/v2/databases/$DATABASE_ID/users/$USERNAME``. + + Note: User management is not supported for Caching or Valkey clusters. + + The response will be a JSON object with a ``user`` key. This will be set to an object + containing the standard database user attributes. The user's password will not show + up unless the ``database:view_credentials`` scope is present. + + For MySQL clusters, additional options will be contained in the ``mysql_settings`` + object. + + For Kafka clusters, additional options will be contained in the ``settings`` object. + + For MongoDB clusters, additional information will be contained in the mongo_user_settings + object. :param database_cluster_uuid: A unique identifier for a database cluster. Required. :type database_cluster_uuid: str + :param username: The name of the database user. Required. + :type username: str :return: JSON object :rtype: JSON :raises ~azure.core.exceptions.HttpResponseError: @@ -122242,25 +131494,80 @@ def list_firewall_rules(self, database_cluster_uuid: str, **kwargs: Any) -> JSON # response body for status code(s): 200 response == { - "rules": [ - { - "type": "str", # The type of resource that the firewall rule - allows to access the database cluster. Required. Known values are: - "droplet", "k8s", "ip_addr", "tag", and "app". - "value": "str", # The ID of the specific resource, the name - of a tag applied to a group of resources, or the IP address that the - firewall rule allows to access the database cluster. Required. - "cluster_uuid": "str", # Optional. A unique ID for the - database cluster to which the rule is applied. - "created_at": "2020-02-20 00:00:00", # Optional. A time - value given in ISO8601 combined date and time format that represents when - the firewall rule was created. - "description": "str", # Optional. A human-readable - description of the rule. - "uuid": "str" # Optional. A unique ID for the firewall rule - itself. + "user": { + "name": "str", # The name of a database user. Required. + "access_cert": "str", # Optional. Access certificate for TLS client + authentication. (Kafka only). + "access_key": "str", # Optional. Access key for TLS client + authentication. (Kafka only). + "mysql_settings": { + "auth_plugin": "str" # A string specifying the + authentication method to be used for connections to the MySQL user + account. The valid values are ``mysql_native_password`` or + ``caching_sha2_password``. If excluded when creating a new user, the + default for the version of MySQL in use will be used. As of MySQL 8.0, + the default is ``caching_sha2_password``. Required. Known values are: + "mysql_native_password" and "caching_sha2_password". + }, + "password": "str", # Optional. A randomly generated password for the + database user.:code:`
`Requires ``database:view_credentials`` scope. + "role": "str", # Optional. A string representing the database user's + role. The value will be either "primary" or "normal". Known values are: + "primary" and "normal". + "settings": { + "acl": [ + { + "permission": "str", # Permission set + applied to the ACL. 'consume' allows for messages to be consumed + from the topic. 'produce' allows for messages to be published to + the topic. 'produceconsume' allows for both 'consume' and + 'produce' permission. 'admin' allows for 'produceconsume' as well + as any operations to administer the topic (delete, update). + Required. Known values are: "admin", "consume", "produce", and + "produceconsume". + "topic": "str", # A regex for matching the + topic(s) that this ACL should apply to. Required. + "id": "str" # Optional. An identifier for + the ACL. Will be computed after the ACL is created/updated. + } + ], + "mongo_user_settings": { + "databases": [ + "str" # Optional. A list of databases to + which the user should have access. When the database is set to + ``admin``"" , the user will have access to all databases based on + the user's role i.e. a user with the role ``readOnly`` assigned + to the ``admin`` database will have read access to all databases. + ], + "role": "str" # Optional. The role to assign to the + user with each role mapping to a MongoDB built-in role. ``readOnly`` + maps to a `read + `_ + role. ``readWrite`` maps to a `readWrite + `_ + role. ``dbAdmin`` maps to a `dbAdmin + `_ + role. Known values are: "readOnly", "readWrite", and "dbAdmin". + }, + "opensearch_acl": [ + { + "index": "str", # Optional. A regex for + matching the indexes that this ACL should apply to. + "permission": "str" # Optional. Permission + set applied to the ACL. 'read' allows user to read from the + index. 'write' allows for user to write to the index. 'readwrite' + allows for both 'read' and 'write' permission. 'deny'(default) + restricts user from performing any operation over an index. + 'admin' allows for 'readwrite' as well as any operations to + administer the index. Known values are: "deny", "admin", "read", + "readwrite", and "write". + } + ], + "pg_allow_replication": bool # Optional. For Postgres + clusters, set to ``true`` for a user with replication rights. This option + is not currently supported for other database engines. } - ] + } } # response body for status code(s): 404 response == { @@ -122292,8 +131599,9 @@ def list_firewall_rules(self, database_cluster_uuid: str, **kwargs: Any) -> JSON cls: ClsType[JSON] = kwargs.pop("cls", None) - _request = build_databases_list_firewall_rules_request( + _request = build_databases_get_user_request( database_cluster_uuid=database_cluster_uuid, + username=username, headers=_headers, params=_params, ) @@ -122352,148 +131660,25 @@ def list_firewall_rules(self, database_cluster_uuid: str, **kwargs: Any) -> JSON return cast(JSON, deserialized) # type: ignore - @overload - def update_firewall_rules( - self, - database_cluster_uuid: str, - body: JSON, - *, - content_type: str = "application/json", - **kwargs: Any, - ) -> Optional[JSON]: - # pylint: disable=line-too-long - """Update Firewall Rules (Trusted Sources) for a Database. - - To update a database cluster's firewall rules (known as "trusted sources" in the control - panel), send a PUT request to ``/v2/databases/$DATABASE_ID/firewall`` specifying which - resources should be able to open connections to the database. You may limit connections to - specific Droplets, Kubernetes clusters, or IP addresses. When a tag is provided, any Droplet or - Kubernetes node with that tag applied to it will have access. The firewall is limited to 100 - rules (or trusted sources). When possible, we recommend `placing your databases into a VPC - network `_ to limit access to them - instead of using a firewall. - A successful. - - :param database_cluster_uuid: A unique identifier for a database cluster. Required. - :type database_cluster_uuid: str - :param body: Required. - :type body: JSON - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: JSON object or None - :rtype: JSON or None - :raises ~azure.core.exceptions.HttpResponseError: - - Example: - .. code-block:: python - - # JSON input template you can fill out and use as your body input. - body = { - "rules": [ - { - "type": "str", # The type of resource that the firewall rule - allows to access the database cluster. Required. Known values are: - "droplet", "k8s", "ip_addr", "tag", and "app". - "value": "str", # The ID of the specific resource, the name - of a tag applied to a group of resources, or the IP address that the - firewall rule allows to access the database cluster. Required. - "cluster_uuid": "str", # Optional. A unique ID for the - database cluster to which the rule is applied. - "created_at": "2020-02-20 00:00:00", # Optional. A time - value given in ISO8601 combined date and time format that represents when - the firewall rule was created. - "description": "str", # Optional. A human-readable - description of the rule. - "uuid": "str" # Optional. A unique ID for the firewall rule - itself. - } - ] - } - - # response body for status code(s): 404 - response == { - "id": "str", # A short identifier corresponding to the HTTP status code - returned. For example, the ID for a response returning a 404 status code would - be "not_found.". Required. - "message": "str", # A message providing additional information about the - error, including details to help resolve it when possible. Required. - "request_id": "str" # Optional. Optionally, some endpoints may include a - request ID that should be provided when reporting bugs or opening support - tickets to help identify the issue. - } - """ - - @overload - def update_firewall_rules( - self, - database_cluster_uuid: str, - body: IO[bytes], - *, - content_type: str = "application/json", - **kwargs: Any, + @distributed_trace + def delete_user( + self, database_cluster_uuid: str, username: str, **kwargs: Any ) -> Optional[JSON]: # pylint: disable=line-too-long - """Update Firewall Rules (Trusted Sources) for a Database. - - To update a database cluster's firewall rules (known as "trusted sources" in the control - panel), send a PUT request to ``/v2/databases/$DATABASE_ID/firewall`` specifying which - resources should be able to open connections to the database. You may limit connections to - specific Droplets, Kubernetes clusters, or IP addresses. When a tag is provided, any Droplet or - Kubernetes node with that tag applied to it will have access. The firewall is limited to 100 - rules (or trusted sources). When possible, we recommend `placing your databases into a VPC - network `_ to limit access to them - instead of using a firewall. - A successful. - - :param database_cluster_uuid: A unique identifier for a database cluster. Required. - :type database_cluster_uuid: str - :param body: Required. - :type body: IO[bytes] - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str - :return: JSON object or None - :rtype: JSON or None - :raises ~azure.core.exceptions.HttpResponseError: - - Example: - .. code-block:: python + """Remove a Database User. - # response body for status code(s): 404 - response == { - "id": "str", # A short identifier corresponding to the HTTP status code - returned. For example, the ID for a response returning a 404 status code would - be "not_found.". Required. - "message": "str", # A message providing additional information about the - error, including details to help resolve it when possible. Required. - "request_id": "str" # Optional. Optionally, some endpoints may include a - request ID that should be provided when reporting bugs or opening support - tickets to help identify the issue. - } - """ + To remove a specific database user, send a DELETE request to + ``/v2/databases/$DATABASE_ID/users/$USERNAME``. - @distributed_trace - def update_firewall_rules( - self, database_cluster_uuid: str, body: Union[JSON, IO[bytes]], **kwargs: Any - ) -> Optional[JSON]: - # pylint: disable=line-too-long - """Update Firewall Rules (Trusted Sources) for a Database. + A status of 204 will be given. This indicates that the request was processed + successfully, but that no response body is needed. - To update a database cluster's firewall rules (known as "trusted sources" in the control - panel), send a PUT request to ``/v2/databases/$DATABASE_ID/firewall`` specifying which - resources should be able to open connections to the database. You may limit connections to - specific Droplets, Kubernetes clusters, or IP addresses. When a tag is provided, any Droplet or - Kubernetes node with that tag applied to it will have access. The firewall is limited to 100 - rules (or trusted sources). When possible, we recommend `placing your databases into a VPC - network `_ to limit access to them - instead of using a firewall. - A successful. + Note: User management is not supported for Caching or Valkey clusters. :param database_cluster_uuid: A unique identifier for a database cluster. Required. :type database_cluster_uuid: str - :param body: Is either a JSON type or a IO[bytes] type. Required. - :type body: JSON or IO[bytes] + :param username: The name of the database user. Required. + :type username: str :return: JSON object or None :rtype: JSON or None :raises ~azure.core.exceptions.HttpResponseError: @@ -122501,29 +131686,6 @@ def update_firewall_rules( Example: .. code-block:: python - # JSON input template you can fill out and use as your body input. - body = { - "rules": [ - { - "type": "str", # The type of resource that the firewall rule - allows to access the database cluster. Required. Known values are: - "droplet", "k8s", "ip_addr", "tag", and "app". - "value": "str", # The ID of the specific resource, the name - of a tag applied to a group of resources, or the IP address that the - firewall rule allows to access the database cluster. Required. - "cluster_uuid": "str", # Optional. A unique ID for the - database cluster to which the rule is applied. - "created_at": "2020-02-20 00:00:00", # Optional. A time - value given in ISO8601 combined date and time format that represents when - the firewall rule was created. - "description": "str", # Optional. A human-readable - description of the rule. - "uuid": "str" # Optional. A unique ID for the firewall rule - itself. - } - ] - } - # response body for status code(s): 404 response == { "id": "str", # A short identifier corresponding to the HTTP status code @@ -122549,27 +131711,14 @@ def update_firewall_rules( } error_map.update(kwargs.pop("error_map", {}) or {}) - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _headers = kwargs.pop("headers", {}) or {} _params = kwargs.pop("params", {}) or {} - content_type: Optional[str] = kwargs.pop( - "content_type", _headers.pop("Content-Type", None) - ) cls: ClsType[Optional[JSON]] = kwargs.pop("cls", None) - content_type = content_type or "application/json" - _json = None - _content = None - if isinstance(body, (IOBase, bytes)): - _content = body - else: - _json = body - - _request = build_databases_update_firewall_rules_request( + _request = build_databases_delete_user_request( database_cluster_uuid=database_cluster_uuid, - content_type=content_type, - json=_json, - content=_content, + username=username, headers=_headers, params=_params, ) @@ -122625,30 +131774,42 @@ def update_firewall_rules( return deserialized # type: ignore @overload - def update_maintenance_window( + def update_user( self, database_cluster_uuid: str, + username: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any, - ) -> Optional[JSON]: + ) -> JSON: # pylint: disable=line-too-long - """Configure a Database Cluster's Maintenance Window. + """Update a Database User. - To configure the window when automatic maintenance should be performed for a database cluster, - send a PUT request to ``/v2/databases/$DATABASE_ID/maintenance``. - A successful request will receive a 204 No Content status code with no body in response. + To update an existing database user, send a PUT request to + ``/v2/databases/$DATABASE_ID/users/$USERNAME`` + with the desired settings. + + **Note**\\ : only ``settings`` can be updated via this type of request. If you wish to change + the name of a user, + you must recreate a new user. + + The response will be a JSON object with a key called ``user``. The value of this will be an + object that contains the name of the update database user, along with the ``settings`` object + that + has been updated. :param database_cluster_uuid: A unique identifier for a database cluster. Required. :type database_cluster_uuid: str + :param username: The name of the database user. Required. + :type username: str :param body: Required. :type body: JSON :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str - :return: JSON object or None - :rtype: JSON or None + :return: JSON object + :rtype: JSON :raises ~azure.core.exceptions.HttpResponseError: Example: @@ -122656,18 +131817,136 @@ def update_maintenance_window( # JSON input template you can fill out and use as your body input. body = { - "day": "str", # The day of the week on which to apply maintenance updates. - Required. - "hour": "str", # The hour in UTC at which maintenance updates will be - applied in 24 hour format. Required. - "description": [ - "str" # Optional. A list of strings, each containing information - about a pending maintenance update. - ], - "pending": bool # Optional. A boolean value indicating whether any - maintenance is scheduled to be performed in the next window. + "settings": { + "acl": [ + { + "permission": "str", # Permission set applied to the + ACL. 'consume' allows for messages to be consumed from the topic. + 'produce' allows for messages to be published to the topic. + 'produceconsume' allows for both 'consume' and 'produce' permission. + 'admin' allows for 'produceconsume' as well as any operations to + administer the topic (delete, update). Required. Known values are: + "admin", "consume", "produce", and "produceconsume". + "topic": "str", # A regex for matching the topic(s) + that this ACL should apply to. Required. + "id": "str" # Optional. An identifier for the ACL. + Will be computed after the ACL is created/updated. + } + ], + "mongo_user_settings": { + "databases": [ + "str" # Optional. A list of databases to which the + user should have access. When the database is set to ``admin``"" , + the user will have access to all databases based on the user's role + i.e. a user with the role ``readOnly`` assigned to the ``admin`` + database will have read access to all databases. + ], + "role": "str" # Optional. The role to assign to the user + with each role mapping to a MongoDB built-in role. ``readOnly`` maps to + a `read + `_ + role. ``readWrite`` maps to a `readWrite + `_ + role. ``dbAdmin`` maps to a `dbAdmin + `_ + role. Known values are: "readOnly", "readWrite", and "dbAdmin". + }, + "opensearch_acl": [ + { + "index": "str", # Optional. A regex for matching the + indexes that this ACL should apply to. + "permission": "str" # Optional. Permission set + applied to the ACL. 'read' allows user to read from the index. + 'write' allows for user to write to the index. 'readwrite' allows for + both 'read' and 'write' permission. 'deny'(default) restricts user + from performing any operation over an index. 'admin' allows for + 'readwrite' as well as any operations to administer the index. Known + values are: "deny", "admin", "read", "readwrite", and "write". + } + ], + "pg_allow_replication": bool # Optional. For Postgres clusters, set + to ``true`` for a user with replication rights. This option is not currently + supported for other database engines. + } } + # response body for status code(s): 201 + response == { + "user": { + "name": "str", # The name of a database user. Required. + "access_cert": "str", # Optional. Access certificate for TLS client + authentication. (Kafka only). + "access_key": "str", # Optional. Access key for TLS client + authentication. (Kafka only). + "mysql_settings": { + "auth_plugin": "str" # A string specifying the + authentication method to be used for connections to the MySQL user + account. The valid values are ``mysql_native_password`` or + ``caching_sha2_password``. If excluded when creating a new user, the + default for the version of MySQL in use will be used. As of MySQL 8.0, + the default is ``caching_sha2_password``. Required. Known values are: + "mysql_native_password" and "caching_sha2_password". + }, + "password": "str", # Optional. A randomly generated password for the + database user.:code:`
`Requires ``database:view_credentials`` scope. + "role": "str", # Optional. A string representing the database user's + role. The value will be either "primary" or "normal". Known values are: + "primary" and "normal". + "settings": { + "acl": [ + { + "permission": "str", # Permission set + applied to the ACL. 'consume' allows for messages to be consumed + from the topic. 'produce' allows for messages to be published to + the topic. 'produceconsume' allows for both 'consume' and + 'produce' permission. 'admin' allows for 'produceconsume' as well + as any operations to administer the topic (delete, update). + Required. Known values are: "admin", "consume", "produce", and + "produceconsume". + "topic": "str", # A regex for matching the + topic(s) that this ACL should apply to. Required. + "id": "str" # Optional. An identifier for + the ACL. Will be computed after the ACL is created/updated. + } + ], + "mongo_user_settings": { + "databases": [ + "str" # Optional. A list of databases to + which the user should have access. When the database is set to + ``admin``"" , the user will have access to all databases based on + the user's role i.e. a user with the role ``readOnly`` assigned + to the ``admin`` database will have read access to all databases. + ], + "role": "str" # Optional. The role to assign to the + user with each role mapping to a MongoDB built-in role. ``readOnly`` + maps to a `read + `_ + role. ``readWrite`` maps to a `readWrite + `_ + role. ``dbAdmin`` maps to a `dbAdmin + `_ + role. Known values are: "readOnly", "readWrite", and "dbAdmin". + }, + "opensearch_acl": [ + { + "index": "str", # Optional. A regex for + matching the indexes that this ACL should apply to. + "permission": "str" # Optional. Permission + set applied to the ACL. 'read' allows user to read from the + index. 'write' allows for user to write to the index. 'readwrite' + allows for both 'read' and 'write' permission. 'deny'(default) + restricts user from performing any operation over an index. + 'admin' allows for 'readwrite' as well as any operations to + administer the index. Known values are: "deny", "admin", "read", + "readwrite", and "write". + } + ], + "pg_allow_replication": bool # Optional. For Postgres + clusters, set to ``true`` for a user with replication rights. This option + is not currently supported for other database engines. + } + } + } # response body for status code(s): 404 response == { "id": "str", # A short identifier corresponding to the HTTP status code @@ -122682,334 +131961,122 @@ def update_maintenance_window( """ @overload - def update_maintenance_window( + def update_user( self, database_cluster_uuid: str, + username: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any, - ) -> Optional[JSON]: + ) -> JSON: # pylint: disable=line-too-long - """Configure a Database Cluster's Maintenance Window. + """Update a Database User. - To configure the window when automatic maintenance should be performed for a database cluster, - send a PUT request to ``/v2/databases/$DATABASE_ID/maintenance``. - A successful request will receive a 204 No Content status code with no body in response. + To update an existing database user, send a PUT request to + ``/v2/databases/$DATABASE_ID/users/$USERNAME`` + with the desired settings. + + **Note**\\ : only ``settings`` can be updated via this type of request. If you wish to change + the name of a user, + you must recreate a new user. + + The response will be a JSON object with a key called ``user``. The value of this will be an + object that contains the name of the update database user, along with the ``settings`` object + that + has been updated. :param database_cluster_uuid: A unique identifier for a database cluster. Required. :type database_cluster_uuid: str + :param username: The name of the database user. Required. + :type username: str :param body: Required. :type body: IO[bytes] :keyword content_type: Body Parameter content-type. Content type parameter for binary body. Default value is "application/json". :paramtype content_type: str - :return: JSON object or None - :rtype: JSON or None - :raises ~azure.core.exceptions.HttpResponseError: - - Example: - .. code-block:: python - - # response body for status code(s): 404 - response == { - "id": "str", # A short identifier corresponding to the HTTP status code - returned. For example, the ID for a response returning a 404 status code would - be "not_found.". Required. - "message": "str", # A message providing additional information about the - error, including details to help resolve it when possible. Required. - "request_id": "str" # Optional. Optionally, some endpoints may include a - request ID that should be provided when reporting bugs or opening support - tickets to help identify the issue. - } - """ - - @distributed_trace - def update_maintenance_window( - self, database_cluster_uuid: str, body: Union[JSON, IO[bytes]], **kwargs: Any - ) -> Optional[JSON]: - # pylint: disable=line-too-long - """Configure a Database Cluster's Maintenance Window. - - To configure the window when automatic maintenance should be performed for a database cluster, - send a PUT request to ``/v2/databases/$DATABASE_ID/maintenance``. - A successful request will receive a 204 No Content status code with no body in response. - - :param database_cluster_uuid: A unique identifier for a database cluster. Required. - :type database_cluster_uuid: str - :param body: Is either a JSON type or a IO[bytes] type. Required. - :type body: JSON or IO[bytes] - :return: JSON object or None - :rtype: JSON or None - :raises ~azure.core.exceptions.HttpResponseError: - - Example: - .. code-block:: python - - # JSON input template you can fill out and use as your body input. - body = { - "day": "str", # The day of the week on which to apply maintenance updates. - Required. - "hour": "str", # The hour in UTC at which maintenance updates will be - applied in 24 hour format. Required. - "description": [ - "str" # Optional. A list of strings, each containing information - about a pending maintenance update. - ], - "pending": bool # Optional. A boolean value indicating whether any - maintenance is scheduled to be performed in the next window. - } - - # response body for status code(s): 404 - response == { - "id": "str", # A short identifier corresponding to the HTTP status code - returned. For example, the ID for a response returning a 404 status code would - be "not_found.". Required. - "message": "str", # A message providing additional information about the - error, including details to help resolve it when possible. Required. - "request_id": "str" # Optional. Optionally, some endpoints may include a - request ID that should be provided when reporting bugs or opening support - tickets to help identify the issue. - } - """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - 401: cast( - Type[HttpResponseError], - lambda response: ClientAuthenticationError(response=response), - ), - 429: HttpResponseError, - 500: HttpResponseError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = kwargs.pop("params", {}) or {} - - content_type: Optional[str] = kwargs.pop( - "content_type", _headers.pop("Content-Type", None) - ) - cls: ClsType[Optional[JSON]] = kwargs.pop("cls", None) - - content_type = content_type or "application/json" - _json = None - _content = None - if isinstance(body, (IOBase, bytes)): - _content = body - else: - _json = body - - _request = build_databases_update_maintenance_window_request( - database_cluster_uuid=database_cluster_uuid, - content_type=content_type, - json=_json, - content=_content, - headers=_headers, - params=_params, - ) - _request.url = self._client.format_url(_request.url) - - _stream = False - pipeline_response: PipelineResponse = ( - self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - ) - - response = pipeline_response.http_response - - if response.status_code not in [204, 404]: - if _stream: - response.read() # Load the body in memory and close the socket - map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore - raise HttpResponseError(response=response) - - deserialized = None - response_headers = {} - if response.status_code == 204: - response_headers["ratelimit-limit"] = self._deserialize( - "int", response.headers.get("ratelimit-limit") - ) - response_headers["ratelimit-remaining"] = self._deserialize( - "int", response.headers.get("ratelimit-remaining") - ) - response_headers["ratelimit-reset"] = self._deserialize( - "int", response.headers.get("ratelimit-reset") - ) - - if response.status_code == 404: - response_headers["ratelimit-limit"] = self._deserialize( - "int", response.headers.get("ratelimit-limit") - ) - response_headers["ratelimit-remaining"] = self._deserialize( - "int", response.headers.get("ratelimit-remaining") - ) - response_headers["ratelimit-reset"] = self._deserialize( - "int", response.headers.get("ratelimit-reset") - ) - - if response.content: - deserialized = response.json() - else: - deserialized = None - - if cls: - return cls(pipeline_response, deserialized, response_headers) # type: ignore - - return deserialized # type: ignore - - @distributed_trace - def install_update( - self, database_cluster_uuid: str, **kwargs: Any - ) -> Optional[JSON]: - # pylint: disable=line-too-long - """Start Database Maintenance. - - To start the installation of updates for a database cluster, send a PUT request to - ``/v2/databases/$DATABASE_ID/install_update``. - A successful request will receive a 204 No Content status code with no body in response. - - :param database_cluster_uuid: A unique identifier for a database cluster. Required. - :type database_cluster_uuid: str - :return: JSON object or None - :rtype: JSON or None - :raises ~azure.core.exceptions.HttpResponseError: - - Example: - .. code-block:: python - - # response body for status code(s): 404 - response == { - "id": "str", # A short identifier corresponding to the HTTP status code - returned. For example, the ID for a response returning a 404 status code would - be "not_found.". Required. - "message": "str", # A message providing additional information about the - error, including details to help resolve it when possible. Required. - "request_id": "str" # Optional. Optionally, some endpoints may include a - request ID that should be provided when reporting bugs or opening support - tickets to help identify the issue. - } - """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - 401: cast( - Type[HttpResponseError], - lambda response: ClientAuthenticationError(response=response), - ), - 429: HttpResponseError, - 500: HttpResponseError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = kwargs.pop("headers", {}) or {} - _params = kwargs.pop("params", {}) or {} - - cls: ClsType[Optional[JSON]] = kwargs.pop("cls", None) - - _request = build_databases_install_update_request( - database_cluster_uuid=database_cluster_uuid, - headers=_headers, - params=_params, - ) - _request.url = self._client.format_url(_request.url) - - _stream = False - pipeline_response: PipelineResponse = ( - self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - ) - - response = pipeline_response.http_response - - if response.status_code not in [204, 404]: - if _stream: - response.read() # Load the body in memory and close the socket - map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore - raise HttpResponseError(response=response) - - deserialized = None - response_headers = {} - if response.status_code == 204: - response_headers["ratelimit-limit"] = self._deserialize( - "int", response.headers.get("ratelimit-limit") - ) - response_headers["ratelimit-remaining"] = self._deserialize( - "int", response.headers.get("ratelimit-remaining") - ) - response_headers["ratelimit-reset"] = self._deserialize( - "int", response.headers.get("ratelimit-reset") - ) - - if response.status_code == 404: - response_headers["ratelimit-limit"] = self._deserialize( - "int", response.headers.get("ratelimit-limit") - ) - response_headers["ratelimit-remaining"] = self._deserialize( - "int", response.headers.get("ratelimit-remaining") - ) - response_headers["ratelimit-reset"] = self._deserialize( - "int", response.headers.get("ratelimit-reset") - ) - - if response.content: - deserialized = response.json() - else: - deserialized = None - - if cls: - return cls(pipeline_response, deserialized, response_headers) # type: ignore - - return deserialized # type: ignore - - @distributed_trace - def list_backups(self, database_cluster_uuid: str, **kwargs: Any) -> JSON: - # pylint: disable=line-too-long - """List Backups for a Database Cluster. - - To list all of the available backups of a PostgreSQL or MySQL database cluster, send a GET - request to ``/v2/databases/$DATABASE_ID/backups``. - **Note**\\ : Backups are not supported for Caching or Valkey clusters. - The result will be a JSON object with a ``backups key``. This will be set to an array of backup - objects, each of which will contain the size of the backup and the timestamp at which it was - created. - - :param database_cluster_uuid: A unique identifier for a database cluster. Required. - :type database_cluster_uuid: str :return: JSON object :rtype: JSON :raises ~azure.core.exceptions.HttpResponseError: - Example: - .. code-block:: python - - # response body for status code(s): 200 - response == { - "backups": [ - { - "created_at": "2020-02-20 00:00:00", # A time value given in - ISO8601 combined date and time format at which the backup was created. - Required. - "size_gigabytes": 0.0, # The size of the database backup in - GBs. Required. - "incremental": bool # Optional. Indicates if this backup is - a full or an incremental one (available only for MySQL). + Example: + .. code-block:: python + + # response body for status code(s): 201 + response == { + "user": { + "name": "str", # The name of a database user. Required. + "access_cert": "str", # Optional. Access certificate for TLS client + authentication. (Kafka only). + "access_key": "str", # Optional. Access key for TLS client + authentication. (Kafka only). + "mysql_settings": { + "auth_plugin": "str" # A string specifying the + authentication method to be used for connections to the MySQL user + account. The valid values are ``mysql_native_password`` or + ``caching_sha2_password``. If excluded when creating a new user, the + default for the version of MySQL in use will be used. As of MySQL 8.0, + the default is ``caching_sha2_password``. Required. Known values are: + "mysql_native_password" and "caching_sha2_password". + }, + "password": "str", # Optional. A randomly generated password for the + database user.:code:`
`Requires ``database:view_credentials`` scope. + "role": "str", # Optional. A string representing the database user's + role. The value will be either "primary" or "normal". Known values are: + "primary" and "normal". + "settings": { + "acl": [ + { + "permission": "str", # Permission set + applied to the ACL. 'consume' allows for messages to be consumed + from the topic. 'produce' allows for messages to be published to + the topic. 'produceconsume' allows for both 'consume' and + 'produce' permission. 'admin' allows for 'produceconsume' as well + as any operations to administer the topic (delete, update). + Required. Known values are: "admin", "consume", "produce", and + "produceconsume". + "topic": "str", # A regex for matching the + topic(s) that this ACL should apply to. Required. + "id": "str" # Optional. An identifier for + the ACL. Will be computed after the ACL is created/updated. + } + ], + "mongo_user_settings": { + "databases": [ + "str" # Optional. A list of databases to + which the user should have access. When the database is set to + ``admin``"" , the user will have access to all databases based on + the user's role i.e. a user with the role ``readOnly`` assigned + to the ``admin`` database will have read access to all databases. + ], + "role": "str" # Optional. The role to assign to the + user with each role mapping to a MongoDB built-in role. ``readOnly`` + maps to a `read + `_ + role. ``readWrite`` maps to a `readWrite + `_ + role. ``dbAdmin`` maps to a `dbAdmin + `_ + role. Known values are: "readOnly", "readWrite", and "dbAdmin". + }, + "opensearch_acl": [ + { + "index": "str", # Optional. A regex for + matching the indexes that this ACL should apply to. + "permission": "str" # Optional. Permission + set applied to the ACL. 'read' allows user to read from the + index. 'write' allows for user to write to the index. 'readwrite' + allows for both 'read' and 'write' permission. 'deny'(default) + restricts user from performing any operation over an index. + 'admin' allows for 'readwrite' as well as any operations to + administer the index. Known values are: "deny", "admin", "read", + "readwrite", and "write". + } + ], + "pg_allow_replication": bool # Optional. For Postgres + clusters, set to ``true`` for a user with replication rights. This option + is not currently supported for other database engines. } - ], - "backup_progress": "str", # Optional. If a backup is currently in progress, - this attribute shows the percentage of completion. If no backup is in progress, - this attribute will be hidden. - "scheduled_backup_time": { - "backup_hour": 0, # Optional. The hour of the day when the backup is - scheduled (in UTC). - "backup_interval_hours": 0, # Optional. The frequency, in hours, at - which backups are taken. - "backup_minute": 0 # Optional. The minute of the hour when the - backup is scheduled. } } # response body for status code(s): 404 @@ -123024,99 +132091,37 @@ def list_backups(self, database_cluster_uuid: str, **kwargs: Any) -> JSON: tickets to help identify the issue. } """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - 401: cast( - Type[HttpResponseError], - lambda response: ClientAuthenticationError(response=response), - ), - 429: HttpResponseError, - 500: HttpResponseError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = kwargs.pop("headers", {}) or {} - _params = kwargs.pop("params", {}) or {} - - cls: ClsType[JSON] = kwargs.pop("cls", None) - - _request = build_databases_list_backups_request( - database_cluster_uuid=database_cluster_uuid, - headers=_headers, - params=_params, - ) - _request.url = self._client.format_url(_request.url) - - _stream = False - pipeline_response: PipelineResponse = ( - self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - ) - - response = pipeline_response.http_response - - if response.status_code not in [200, 404]: - if _stream: - response.read() # Load the body in memory and close the socket - map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore - raise HttpResponseError(response=response) - - response_headers = {} - if response.status_code == 200: - response_headers["ratelimit-limit"] = self._deserialize( - "int", response.headers.get("ratelimit-limit") - ) - response_headers["ratelimit-remaining"] = self._deserialize( - "int", response.headers.get("ratelimit-remaining") - ) - response_headers["ratelimit-reset"] = self._deserialize( - "int", response.headers.get("ratelimit-reset") - ) - - if response.content: - deserialized = response.json() - else: - deserialized = None - - if response.status_code == 404: - response_headers["ratelimit-limit"] = self._deserialize( - "int", response.headers.get("ratelimit-limit") - ) - response_headers["ratelimit-remaining"] = self._deserialize( - "int", response.headers.get("ratelimit-remaining") - ) - response_headers["ratelimit-reset"] = self._deserialize( - "int", response.headers.get("ratelimit-reset") - ) - - if response.content: - deserialized = response.json() - else: - deserialized = None - - if cls: - return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore - - return cast(JSON, deserialized) # type: ignore @distributed_trace - def list_replicas(self, database_cluster_uuid: str, **kwargs: Any) -> JSON: + def update_user( + self, + database_cluster_uuid: str, + username: str, + body: Union[JSON, IO[bytes]], + **kwargs: Any, + ) -> JSON: # pylint: disable=line-too-long - """List All Read-only Replicas. + """Update a Database User. - To list all of the read-only replicas associated with a database cluster, send a GET request to - ``/v2/databases/$DATABASE_ID/replicas``. + To update an existing database user, send a PUT request to + ``/v2/databases/$DATABASE_ID/users/$USERNAME`` + with the desired settings. - **Note**\\ : Read-only replicas are not supported for Caching or Valkey clusters. + **Note**\\ : only ``settings`` can be updated via this type of request. If you wish to change + the name of a user, + you must recreate a new user. - The result will be a JSON object with a ``replicas`` key. This will be set to an array of - database replica objects, each of which will contain the standard database replica attributes. + The response will be a JSON object with a key called ``user``. The value of this will be an + object that contains the name of the update database user, along with the ``settings`` object + that + has been updated. :param database_cluster_uuid: A unique identifier for a database cluster. Required. :type database_cluster_uuid: str + :param username: The name of the database user. Required. + :type username: str + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] :return: JSON object :rtype: JSON :raises ~azure.core.exceptions.HttpResponseError: @@ -123124,93 +132129,137 @@ def list_replicas(self, database_cluster_uuid: str, **kwargs: Any) -> JSON: Example: .. code-block:: python - # response body for status code(s): 200 + # JSON input template you can fill out and use as your body input. + body = { + "settings": { + "acl": [ + { + "permission": "str", # Permission set applied to the + ACL. 'consume' allows for messages to be consumed from the topic. + 'produce' allows for messages to be published to the topic. + 'produceconsume' allows for both 'consume' and 'produce' permission. + 'admin' allows for 'produceconsume' as well as any operations to + administer the topic (delete, update). Required. Known values are: + "admin", "consume", "produce", and "produceconsume". + "topic": "str", # A regex for matching the topic(s) + that this ACL should apply to. Required. + "id": "str" # Optional. An identifier for the ACL. + Will be computed after the ACL is created/updated. + } + ], + "mongo_user_settings": { + "databases": [ + "str" # Optional. A list of databases to which the + user should have access. When the database is set to ``admin``"" , + the user will have access to all databases based on the user's role + i.e. a user with the role ``readOnly`` assigned to the ``admin`` + database will have read access to all databases. + ], + "role": "str" # Optional. The role to assign to the user + with each role mapping to a MongoDB built-in role. ``readOnly`` maps to + a `read + `_ + role. ``readWrite`` maps to a `readWrite + `_ + role. ``dbAdmin`` maps to a `dbAdmin + `_ + role. Known values are: "readOnly", "readWrite", and "dbAdmin". + }, + "opensearch_acl": [ + { + "index": "str", # Optional. A regex for matching the + indexes that this ACL should apply to. + "permission": "str" # Optional. Permission set + applied to the ACL. 'read' allows user to read from the index. + 'write' allows for user to write to the index. 'readwrite' allows for + both 'read' and 'write' permission. 'deny'(default) restricts user + from performing any operation over an index. 'admin' allows for + 'readwrite' as well as any operations to administer the index. Known + values are: "deny", "admin", "read", "readwrite", and "write". + } + ], + "pg_allow_replication": bool # Optional. For Postgres clusters, set + to ``true`` for a user with replication rights. This option is not currently + supported for other database engines. + } + } + + # response body for status code(s): 201 response == { - "replicas": [ - { - "name": "str", # The name to give the read-only replicating. - Required. - "connection": { - "database": "str", # Optional. The name of the - default database. - "host": "str", # Optional. The FQDN pointing to the - database cluster's current primary node. - "password": "str", # Optional. The randomly - generated password for the default - user.:code:`
`:code:`
`Requires ``database:view_credentials`` - scope. - "port": 0, # Optional. The port on which the - database cluster is listening. - "ssl": bool, # Optional. A boolean value indicating - if the connection should be made over SSL. - "uri": "str", # Optional. A connection string in the - format accepted by the ``psql`` command. This is provided as a - convenience and should be able to be constructed by the other - attributes. - "user": "str" # Optional. The default user for the - database.:code:`
`:code:`
`Requires - ``database:view_credentials`` scope. - }, - "created_at": "2020-02-20 00:00:00", # Optional. A time - value given in ISO8601 combined date and time format that represents when - the database cluster was created. - "do_settings": { - "service_cnames": [ - "str" # Optional. An array of custom CNAMEs - for the database cluster. Each CNAME must be a valid RFC 1123 - hostname (e.g., "db.example.com"). Maximum of 16 CNAMEs allowed, - each up to 253 characters. - ] - }, - "id": "str", # Optional. A unique ID that can be used to - identify and reference a database replica. - "private_connection": { - "database": "str", # Optional. The name of the - default database. - "host": "str", # Optional. The FQDN pointing to the - database cluster's current primary node. - "password": "str", # Optional. The randomly - generated password for the default - user.:code:`
`:code:`
`Requires ``database:view_credentials`` - scope. - "port": 0, # Optional. The port on which the - database cluster is listening. - "ssl": bool, # Optional. A boolean value indicating - if the connection should be made over SSL. - "uri": "str", # Optional. A connection string in the - format accepted by the ``psql`` command. This is provided as a - convenience and should be able to be constructed by the other - attributes. - "user": "str" # Optional. The default user for the - database.:code:`
`:code:`
`Requires - ``database:view_credentials`` scope. + "user": { + "name": "str", # The name of a database user. Required. + "access_cert": "str", # Optional. Access certificate for TLS client + authentication. (Kafka only). + "access_key": "str", # Optional. Access key for TLS client + authentication. (Kafka only). + "mysql_settings": { + "auth_plugin": "str" # A string specifying the + authentication method to be used for connections to the MySQL user + account. The valid values are ``mysql_native_password`` or + ``caching_sha2_password``. If excluded when creating a new user, the + default for the version of MySQL in use will be used. As of MySQL 8.0, + the default is ``caching_sha2_password``. Required. Known values are: + "mysql_native_password" and "caching_sha2_password". + }, + "password": "str", # Optional. A randomly generated password for the + database user.:code:`
`Requires ``database:view_credentials`` scope. + "role": "str", # Optional. A string representing the database user's + role. The value will be either "primary" or "normal". Known values are: + "primary" and "normal". + "settings": { + "acl": [ + { + "permission": "str", # Permission set + applied to the ACL. 'consume' allows for messages to be consumed + from the topic. 'produce' allows for messages to be published to + the topic. 'produceconsume' allows for both 'consume' and + 'produce' permission. 'admin' allows for 'produceconsume' as well + as any operations to administer the topic (delete, update). + Required. Known values are: "admin", "consume", "produce", and + "produceconsume". + "topic": "str", # A regex for matching the + topic(s) that this ACL should apply to. Required. + "id": "str" # Optional. An identifier for + the ACL. Will be computed after the ACL is created/updated. + } + ], + "mongo_user_settings": { + "databases": [ + "str" # Optional. A list of databases to + which the user should have access. When the database is set to + ``admin``"" , the user will have access to all databases based on + the user's role i.e. a user with the role ``readOnly`` assigned + to the ``admin`` database will have read access to all databases. + ], + "role": "str" # Optional. The role to assign to the + user with each role mapping to a MongoDB built-in role. ``readOnly`` + maps to a `read + `_ + role. ``readWrite`` maps to a `readWrite + `_ + role. ``dbAdmin`` maps to a `dbAdmin + `_ + role. Known values are: "readOnly", "readWrite", and "dbAdmin". }, - "private_network_uuid": "str", # Optional. A string - specifying the UUID of the VPC to which the read-only replica will be - assigned. If excluded, the replica will be assigned to your account's - default VPC for the region. :code:`
`:code:`
`Requires ``vpc:read`` - scope. - "region": "str", # Optional. A slug identifier for the - region where the read-only replica will be located. If excluded, the - replica will be placed in the same region as the cluster. - "size": "str", # Optional. A slug identifier representing - the size of the node for the read-only replica. The size of the replica - must be at least as large as the node size for the database cluster from - which it is replicating. - "status": "str", # Optional. A string representing the - current status of the database cluster. Known values are: "creating", - "online", "resizing", "migrating", and "forking". - "storage_size_mib": 0, # Optional. Additional storage added - to the cluster, in MiB. If null, no additional storage is added to the - cluster, beyond what is provided as a base amount from the 'size' and any - previously added additional storage. - "tags": [ - "str" # Optional. A flat array of tag names as - strings applied to the read-only - replica.:code:`
`:code:`
`Requires ``tag:read`` scope. - ] + "opensearch_acl": [ + { + "index": "str", # Optional. A regex for + matching the indexes that this ACL should apply to. + "permission": "str" # Optional. Permission + set applied to the ACL. 'read' allows user to read from the + index. 'write' allows for user to write to the index. 'readwrite' + allows for both 'read' and 'write' permission. 'deny'(default) + restricts user from performing any operation over an index. + 'admin' allows for 'readwrite' as well as any operations to + administer the index. Known values are: "deny", "admin", "read", + "readwrite", and "write". + } + ], + "pg_allow_replication": bool # Optional. For Postgres + clusters, set to ``true`` for a user with replication rights. This option + is not currently supported for other database engines. } - ] + } } # response body for status code(s): 404 response == { @@ -123237,13 +132286,28 @@ def list_replicas(self, database_cluster_uuid: str, **kwargs: Any) -> JSON: } error_map.update(kwargs.pop("error_map", {}) or {}) - _headers = kwargs.pop("headers", {}) or {} + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = kwargs.pop("params", {}) or {} + content_type: Optional[str] = kwargs.pop( + "content_type", _headers.pop("Content-Type", None) + ) cls: ClsType[JSON] = kwargs.pop("cls", None) - _request = build_databases_list_replicas_request( + content_type = content_type or "application/json" + _json = None + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _json = body + + _request = build_databases_update_user_request( database_cluster_uuid=database_cluster_uuid, + username=username, + content_type=content_type, + json=_json, + content=_content, headers=_headers, params=_params, ) @@ -123258,14 +132322,14 @@ def list_replicas(self, database_cluster_uuid: str, **kwargs: Any) -> JSON: response = pipeline_response.http_response - if response.status_code not in [200, 404]: + if response.status_code not in [201, 404]: if _stream: response.read() # Load the body in memory and close the socket map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore raise HttpResponseError(response=response) response_headers = {} - if response.status_code == 200: + if response.status_code == 201: response_headers["ratelimit-limit"] = self._deserialize( "int", response.headers.get("ratelimit-limit") ) @@ -123303,31 +132367,33 @@ def list_replicas(self, database_cluster_uuid: str, **kwargs: Any) -> JSON: return cast(JSON, deserialized) # type: ignore @overload - def create_replica( + def reset_auth( self, database_cluster_uuid: str, - body: Optional[JSON] = None, + username: str, + body: JSON, *, content_type: str = "application/json", **kwargs: Any, ) -> JSON: # pylint: disable=line-too-long - """Create a Read-only Replica. + """Reset a Database User's Password or Authentication Method. - To create a read-only replica for a PostgreSQL or MySQL database cluster, send a POST request - to ``/v2/databases/$DATABASE_ID/replicas`` specifying the name it should be given, the size of - the node to be used, and the region where it will be located. + To reset the password for a database user, send a POST request to + ``/v2/databases/$DATABASE_ID/users/$USERNAME/reset_auth``. - **Note**\\ : Read-only replicas are not supported for Caching or Valkey clusters. + For ``mysql`` databases, the authentication method can be specifying by + including a key in the JSON body called ``mysql_settings`` with the ``auth_plugin`` + value specified. - The response will be a JSON object with a key called ``replica``. The value of this will be an - object that contains the standard attributes associated with a database replica. The initial - value of the read-only replica's ``status`` attribute will be ``forking``. When the replica is - ready to receive traffic, this will transition to ``active``. + The response will be a JSON object with a ``user`` key. This will be set to an + object containing the standard database user attributes. :param database_cluster_uuid: A unique identifier for a database cluster. Required. :type database_cluster_uuid: str - :param body: Default value is None. + :param username: The name of the database user. Required. + :type username: str + :param body: Required. :type body: JSON :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". @@ -123341,159 +132407,91 @@ def create_replica( # JSON input template you can fill out and use as your body input. body = { - "name": "str", # The name to give the read-only replicating. Required. - "connection": { - "database": "str", # Optional. The name of the default database. - "host": "str", # Optional. The FQDN pointing to the database - cluster's current primary node. - "password": "str", # Optional. The randomly generated password for - the default user.:code:`
`:code:`
`Requires - ``database:view_credentials`` scope. - "port": 0, # Optional. The port on which the database cluster is - listening. - "ssl": bool, # Optional. A boolean value indicating if the - connection should be made over SSL. - "uri": "str", # Optional. A connection string in the format accepted - by the ``psql`` command. This is provided as a convenience and should be able - to be constructed by the other attributes. - "user": "str" # Optional. The default user for the - database.:code:`
`:code:`
`Requires ``database:view_credentials`` - scope. - }, - "created_at": "2020-02-20 00:00:00", # Optional. A time value given in - ISO8601 combined date and time format that represents when the database cluster - was created. - "do_settings": { - "service_cnames": [ - "str" # Optional. An array of custom CNAMEs for the database - cluster. Each CNAME must be a valid RFC 1123 hostname (e.g., - "db.example.com"). Maximum of 16 CNAMEs allowed, each up to 253 - characters. - ] - }, - "id": "str", # Optional. A unique ID that can be used to identify and - reference a database replica. - "private_connection": { - "database": "str", # Optional. The name of the default database. - "host": "str", # Optional. The FQDN pointing to the database - cluster's current primary node. - "password": "str", # Optional. The randomly generated password for - the default user.:code:`
`:code:`
`Requires - ``database:view_credentials`` scope. - "port": 0, # Optional. The port on which the database cluster is - listening. - "ssl": bool, # Optional. A boolean value indicating if the - connection should be made over SSL. - "uri": "str", # Optional. A connection string in the format accepted - by the ``psql`` command. This is provided as a convenience and should be able - to be constructed by the other attributes. - "user": "str" # Optional. The default user for the - database.:code:`
`:code:`
`Requires ``database:view_credentials`` - scope. - }, - "private_network_uuid": "str", # Optional. A string specifying the UUID of - the VPC to which the read-only replica will be assigned. If excluded, the replica - will be assigned to your account's default VPC for the region. - :code:`
`:code:`
`Requires ``vpc:read`` scope. - "region": "str", # Optional. A slug identifier for the region where the - read-only replica will be located. If excluded, the replica will be placed in the - same region as the cluster. - "size": "str", # Optional. A slug identifier representing the size of the - node for the read-only replica. The size of the replica must be at least as large - as the node size for the database cluster from which it is replicating. - "status": "str", # Optional. A string representing the current status of the - database cluster. Known values are: "creating", "online", "resizing", - "migrating", and "forking". - "storage_size_mib": 0, # Optional. Additional storage added to the cluster, - in MiB. If null, no additional storage is added to the cluster, beyond what is - provided as a base amount from the 'size' and any previously added additional - storage. - "tags": [ - "str" # Optional. A flat array of tag names as strings to apply to - the read-only replica after it is created. Tag names can either be existing - or new tags. :code:`
`:code:`
`Requires ``tag:create`` scope. - ] + "mysql_settings": { + "auth_plugin": "str" # A string specifying the authentication method + to be used for connections to the MySQL user account. The valid values are + ``mysql_native_password`` or ``caching_sha2_password``. If excluded when + creating a new user, the default for the version of MySQL in use will be + used. As of MySQL 8.0, the default is ``caching_sha2_password``. Required. + Known values are: "mysql_native_password" and "caching_sha2_password". + } } - # response body for status code(s): 201 + # response body for status code(s): 200 response == { - "replica": { - "name": "str", # The name to give the read-only replicating. - Required. - "connection": { - "database": "str", # Optional. The name of the default - database. - "host": "str", # Optional. The FQDN pointing to the database - cluster's current primary node. - "password": "str", # Optional. The randomly generated - password for the default user.:code:`
`:code:`
`Requires - ``database:view_credentials`` scope. - "port": 0, # Optional. The port on which the database - cluster is listening. - "ssl": bool, # Optional. A boolean value indicating if the - connection should be made over SSL. - "uri": "str", # Optional. A connection string in the format - accepted by the ``psql`` command. This is provided as a convenience and - should be able to be constructed by the other attributes. - "user": "str" # Optional. The default user for the - database.:code:`
`:code:`
`Requires ``database:view_credentials`` - scope. - }, - "created_at": "2020-02-20 00:00:00", # Optional. A time value given - in ISO8601 combined date and time format that represents when the database - cluster was created. - "do_settings": { - "service_cnames": [ - "str" # Optional. An array of custom CNAMEs for the - database cluster. Each CNAME must be a valid RFC 1123 hostname (e.g., - "db.example.com"). Maximum of 16 CNAMEs allowed, each up to 253 - characters. - ] - }, - "id": "str", # Optional. A unique ID that can be used to identify - and reference a database replica. - "private_connection": { - "database": "str", # Optional. The name of the default - database. - "host": "str", # Optional. The FQDN pointing to the database - cluster's current primary node. - "password": "str", # Optional. The randomly generated - password for the default user.:code:`
`:code:`
`Requires - ``database:view_credentials`` scope. - "port": 0, # Optional. The port on which the database - cluster is listening. - "ssl": bool, # Optional. A boolean value indicating if the - connection should be made over SSL. - "uri": "str", # Optional. A connection string in the format - accepted by the ``psql`` command. This is provided as a convenience and - should be able to be constructed by the other attributes. - "user": "str" # Optional. The default user for the - database.:code:`
`:code:`
`Requires ``database:view_credentials`` - scope. + "user": { + "name": "str", # The name of a database user. Required. + "access_cert": "str", # Optional. Access certificate for TLS client + authentication. (Kafka only). + "access_key": "str", # Optional. Access key for TLS client + authentication. (Kafka only). + "mysql_settings": { + "auth_plugin": "str" # A string specifying the + authentication method to be used for connections to the MySQL user + account. The valid values are ``mysql_native_password`` or + ``caching_sha2_password``. If excluded when creating a new user, the + default for the version of MySQL in use will be used. As of MySQL 8.0, + the default is ``caching_sha2_password``. Required. Known values are: + "mysql_native_password" and "caching_sha2_password". }, - "private_network_uuid": "str", # Optional. A string specifying the - UUID of the VPC to which the read-only replica will be assigned. If excluded, - the replica will be assigned to your account's default VPC for the region. - :code:`
`:code:`
`Requires ``vpc:read`` scope. - "region": "str", # Optional. A slug identifier for the region where - the read-only replica will be located. If excluded, the replica will be - placed in the same region as the cluster. - "size": "str", # Optional. A slug identifier representing the size - of the node for the read-only replica. The size of the replica must be at - least as large as the node size for the database cluster from which it is - replicating. - "status": "str", # Optional. A string representing the current - status of the database cluster. Known values are: "creating", "online", - "resizing", "migrating", and "forking". - "storage_size_mib": 0, # Optional. Additional storage added to the - cluster, in MiB. If null, no additional storage is added to the cluster, - beyond what is provided as a base amount from the 'size' and any previously - added additional storage. - "tags": [ - "str" # Optional. A flat array of tag names as strings - applied to the read-only replica.:code:`
`:code:`
`Requires - ``tag:read`` scope. - ] + "password": "str", # Optional. A randomly generated password for the + database user.:code:`
`Requires ``database:view_credentials`` scope. + "role": "str", # Optional. A string representing the database user's + role. The value will be either "primary" or "normal". Known values are: + "primary" and "normal". + "settings": { + "acl": [ + { + "permission": "str", # Permission set + applied to the ACL. 'consume' allows for messages to be consumed + from the topic. 'produce' allows for messages to be published to + the topic. 'produceconsume' allows for both 'consume' and + 'produce' permission. 'admin' allows for 'produceconsume' as well + as any operations to administer the topic (delete, update). + Required. Known values are: "admin", "consume", "produce", and + "produceconsume". + "topic": "str", # A regex for matching the + topic(s) that this ACL should apply to. Required. + "id": "str" # Optional. An identifier for + the ACL. Will be computed after the ACL is created/updated. + } + ], + "mongo_user_settings": { + "databases": [ + "str" # Optional. A list of databases to + which the user should have access. When the database is set to + ``admin``"" , the user will have access to all databases based on + the user's role i.e. a user with the role ``readOnly`` assigned + to the ``admin`` database will have read access to all databases. + ], + "role": "str" # Optional. The role to assign to the + user with each role mapping to a MongoDB built-in role. ``readOnly`` + maps to a `read + `_ + role. ``readWrite`` maps to a `readWrite + `_ + role. ``dbAdmin`` maps to a `dbAdmin + `_ + role. Known values are: "readOnly", "readWrite", and "dbAdmin". + }, + "opensearch_acl": [ + { + "index": "str", # Optional. A regex for + matching the indexes that this ACL should apply to. + "permission": "str" # Optional. Permission + set applied to the ACL. 'read' allows user to read from the + index. 'write' allows for user to write to the index. 'readwrite' + allows for both 'read' and 'write' permission. 'deny'(default) + restricts user from performing any operation over an index. + 'admin' allows for 'readwrite' as well as any operations to + administer the index. Known values are: "deny", "admin", "read", + "readwrite", and "write". + } + ], + "pg_allow_replication": bool # Optional. For Postgres + clusters, set to ``true`` for a user with replication rights. This option + is not currently supported for other database engines. + } } } # response body for status code(s): 404 @@ -123510,31 +132508,33 @@ def create_replica( """ @overload - def create_replica( + def reset_auth( self, database_cluster_uuid: str, - body: Optional[IO[bytes]] = None, + username: str, + body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any, ) -> JSON: # pylint: disable=line-too-long - """Create a Read-only Replica. + """Reset a Database User's Password or Authentication Method. - To create a read-only replica for a PostgreSQL or MySQL database cluster, send a POST request - to ``/v2/databases/$DATABASE_ID/replicas`` specifying the name it should be given, the size of - the node to be used, and the region where it will be located. + To reset the password for a database user, send a POST request to + ``/v2/databases/$DATABASE_ID/users/$USERNAME/reset_auth``. - **Note**\\ : Read-only replicas are not supported for Caching or Valkey clusters. + For ``mysql`` databases, the authentication method can be specifying by + including a key in the JSON body called ``mysql_settings`` with the ``auth_plugin`` + value specified. - The response will be a JSON object with a key called ``replica``. The value of this will be an - object that contains the standard attributes associated with a database replica. The initial - value of the read-only replica's ``status`` attribute will be ``forking``. When the replica is - ready to receive traffic, this will transition to ``active``. + The response will be a JSON object with a ``user`` key. This will be set to an + object containing the standard database user attributes. :param database_cluster_uuid: A unique identifier for a database cluster. Required. :type database_cluster_uuid: str - :param body: Default value is None. + :param username: The name of the database user. Required. + :type username: str + :param body: Required. :type body: IO[bytes] :keyword content_type: Body Parameter content-type. Content type parameter for binary body. Default value is "application/json". @@ -123546,85 +132546,81 @@ def create_replica( Example: .. code-block:: python - # response body for status code(s): 201 + # response body for status code(s): 200 response == { - "replica": { - "name": "str", # The name to give the read-only replicating. - Required. - "connection": { - "database": "str", # Optional. The name of the default - database. - "host": "str", # Optional. The FQDN pointing to the database - cluster's current primary node. - "password": "str", # Optional. The randomly generated - password for the default user.:code:`
`:code:`
`Requires - ``database:view_credentials`` scope. - "port": 0, # Optional. The port on which the database - cluster is listening. - "ssl": bool, # Optional. A boolean value indicating if the - connection should be made over SSL. - "uri": "str", # Optional. A connection string in the format - accepted by the ``psql`` command. This is provided as a convenience and - should be able to be constructed by the other attributes. - "user": "str" # Optional. The default user for the - database.:code:`
`:code:`
`Requires ``database:view_credentials`` - scope. - }, - "created_at": "2020-02-20 00:00:00", # Optional. A time value given - in ISO8601 combined date and time format that represents when the database - cluster was created. - "do_settings": { - "service_cnames": [ - "str" # Optional. An array of custom CNAMEs for the - database cluster. Each CNAME must be a valid RFC 1123 hostname (e.g., - "db.example.com"). Maximum of 16 CNAMEs allowed, each up to 253 - characters. - ] - }, - "id": "str", # Optional. A unique ID that can be used to identify - and reference a database replica. - "private_connection": { - "database": "str", # Optional. The name of the default - database. - "host": "str", # Optional. The FQDN pointing to the database - cluster's current primary node. - "password": "str", # Optional. The randomly generated - password for the default user.:code:`
`:code:`
`Requires - ``database:view_credentials`` scope. - "port": 0, # Optional. The port on which the database - cluster is listening. - "ssl": bool, # Optional. A boolean value indicating if the - connection should be made over SSL. - "uri": "str", # Optional. A connection string in the format - accepted by the ``psql`` command. This is provided as a convenience and - should be able to be constructed by the other attributes. - "user": "str" # Optional. The default user for the - database.:code:`
`:code:`
`Requires ``database:view_credentials`` - scope. + "user": { + "name": "str", # The name of a database user. Required. + "access_cert": "str", # Optional. Access certificate for TLS client + authentication. (Kafka only). + "access_key": "str", # Optional. Access key for TLS client + authentication. (Kafka only). + "mysql_settings": { + "auth_plugin": "str" # A string specifying the + authentication method to be used for connections to the MySQL user + account. The valid values are ``mysql_native_password`` or + ``caching_sha2_password``. If excluded when creating a new user, the + default for the version of MySQL in use will be used. As of MySQL 8.0, + the default is ``caching_sha2_password``. Required. Known values are: + "mysql_native_password" and "caching_sha2_password". }, - "private_network_uuid": "str", # Optional. A string specifying the - UUID of the VPC to which the read-only replica will be assigned. If excluded, - the replica will be assigned to your account's default VPC for the region. - :code:`
`:code:`
`Requires ``vpc:read`` scope. - "region": "str", # Optional. A slug identifier for the region where - the read-only replica will be located. If excluded, the replica will be - placed in the same region as the cluster. - "size": "str", # Optional. A slug identifier representing the size - of the node for the read-only replica. The size of the replica must be at - least as large as the node size for the database cluster from which it is - replicating. - "status": "str", # Optional. A string representing the current - status of the database cluster. Known values are: "creating", "online", - "resizing", "migrating", and "forking". - "storage_size_mib": 0, # Optional. Additional storage added to the - cluster, in MiB. If null, no additional storage is added to the cluster, - beyond what is provided as a base amount from the 'size' and any previously - added additional storage. - "tags": [ - "str" # Optional. A flat array of tag names as strings - applied to the read-only replica.:code:`
`:code:`
`Requires - ``tag:read`` scope. - ] + "password": "str", # Optional. A randomly generated password for the + database user.:code:`
`Requires ``database:view_credentials`` scope. + "role": "str", # Optional. A string representing the database user's + role. The value will be either "primary" or "normal". Known values are: + "primary" and "normal". + "settings": { + "acl": [ + { + "permission": "str", # Permission set + applied to the ACL. 'consume' allows for messages to be consumed + from the topic. 'produce' allows for messages to be published to + the topic. 'produceconsume' allows for both 'consume' and + 'produce' permission. 'admin' allows for 'produceconsume' as well + as any operations to administer the topic (delete, update). + Required. Known values are: "admin", "consume", "produce", and + "produceconsume". + "topic": "str", # A regex for matching the + topic(s) that this ACL should apply to. Required. + "id": "str" # Optional. An identifier for + the ACL. Will be computed after the ACL is created/updated. + } + ], + "mongo_user_settings": { + "databases": [ + "str" # Optional. A list of databases to + which the user should have access. When the database is set to + ``admin``"" , the user will have access to all databases based on + the user's role i.e. a user with the role ``readOnly`` assigned + to the ``admin`` database will have read access to all databases. + ], + "role": "str" # Optional. The role to assign to the + user with each role mapping to a MongoDB built-in role. ``readOnly`` + maps to a `read + `_ + role. ``readWrite`` maps to a `readWrite + `_ + role. ``dbAdmin`` maps to a `dbAdmin + `_ + role. Known values are: "readOnly", "readWrite", and "dbAdmin". + }, + "opensearch_acl": [ + { + "index": "str", # Optional. A regex for + matching the indexes that this ACL should apply to. + "permission": "str" # Optional. Permission + set applied to the ACL. 'read' allows user to read from the + index. 'write' allows for user to write to the index. 'readwrite' + allows for both 'read' and 'write' permission. 'deny'(default) + restricts user from performing any operation over an index. + 'admin' allows for 'readwrite' as well as any operations to + administer the index. Known values are: "deny", "admin", "read", + "readwrite", and "write". + } + ], + "pg_allow_replication": bool # Optional. For Postgres + clusters, set to ``true`` for a user with replication rights. This option + is not currently supported for other database engines. + } } } # response body for status code(s): 404 @@ -123641,192 +132637,126 @@ def create_replica( """ @distributed_trace - def create_replica( + def reset_auth( self, database_cluster_uuid: str, - body: Optional[Union[JSON, IO[bytes]]] = None, - **kwargs: Any, - ) -> JSON: - # pylint: disable=line-too-long - """Create a Read-only Replica. - - To create a read-only replica for a PostgreSQL or MySQL database cluster, send a POST request - to ``/v2/databases/$DATABASE_ID/replicas`` specifying the name it should be given, the size of - the node to be used, and the region where it will be located. - - **Note**\\ : Read-only replicas are not supported for Caching or Valkey clusters. - - The response will be a JSON object with a key called ``replica``. The value of this will be an - object that contains the standard attributes associated with a database replica. The initial - value of the read-only replica's ``status`` attribute will be ``forking``. When the replica is - ready to receive traffic, this will transition to ``active``. - - :param database_cluster_uuid: A unique identifier for a database cluster. Required. - :type database_cluster_uuid: str - :param body: Is either a JSON type or a IO[bytes] type. Default value is None. - :type body: JSON or IO[bytes] - :return: JSON object - :rtype: JSON - :raises ~azure.core.exceptions.HttpResponseError: - - Example: - .. code-block:: python - - # JSON input template you can fill out and use as your body input. - body = { - "name": "str", # The name to give the read-only replicating. Required. - "connection": { - "database": "str", # Optional. The name of the default database. - "host": "str", # Optional. The FQDN pointing to the database - cluster's current primary node. - "password": "str", # Optional. The randomly generated password for - the default user.:code:`
`:code:`
`Requires - ``database:view_credentials`` scope. - "port": 0, # Optional. The port on which the database cluster is - listening. - "ssl": bool, # Optional. A boolean value indicating if the - connection should be made over SSL. - "uri": "str", # Optional. A connection string in the format accepted - by the ``psql`` command. This is provided as a convenience and should be able - to be constructed by the other attributes. - "user": "str" # Optional. The default user for the - database.:code:`
`:code:`
`Requires ``database:view_credentials`` - scope. - }, - "created_at": "2020-02-20 00:00:00", # Optional. A time value given in - ISO8601 combined date and time format that represents when the database cluster - was created. - "do_settings": { - "service_cnames": [ - "str" # Optional. An array of custom CNAMEs for the database - cluster. Each CNAME must be a valid RFC 1123 hostname (e.g., - "db.example.com"). Maximum of 16 CNAMEs allowed, each up to 253 - characters. - ] - }, - "id": "str", # Optional. A unique ID that can be used to identify and - reference a database replica. - "private_connection": { - "database": "str", # Optional. The name of the default database. - "host": "str", # Optional. The FQDN pointing to the database - cluster's current primary node. - "password": "str", # Optional. The randomly generated password for - the default user.:code:`
`:code:`
`Requires - ``database:view_credentials`` scope. - "port": 0, # Optional. The port on which the database cluster is - listening. - "ssl": bool, # Optional. A boolean value indicating if the - connection should be made over SSL. - "uri": "str", # Optional. A connection string in the format accepted - by the ``psql`` command. This is provided as a convenience and should be able - to be constructed by the other attributes. - "user": "str" # Optional. The default user for the - database.:code:`
`:code:`
`Requires ``database:view_credentials`` - scope. - }, - "private_network_uuid": "str", # Optional. A string specifying the UUID of - the VPC to which the read-only replica will be assigned. If excluded, the replica - will be assigned to your account's default VPC for the region. - :code:`
`:code:`
`Requires ``vpc:read`` scope. - "region": "str", # Optional. A slug identifier for the region where the - read-only replica will be located. If excluded, the replica will be placed in the - same region as the cluster. - "size": "str", # Optional. A slug identifier representing the size of the - node for the read-only replica. The size of the replica must be at least as large - as the node size for the database cluster from which it is replicating. - "status": "str", # Optional. A string representing the current status of the - database cluster. Known values are: "creating", "online", "resizing", - "migrating", and "forking". - "storage_size_mib": 0, # Optional. Additional storage added to the cluster, - in MiB. If null, no additional storage is added to the cluster, beyond what is - provided as a base amount from the 'size' and any previously added additional - storage. - "tags": [ - "str" # Optional. A flat array of tag names as strings to apply to - the read-only replica after it is created. Tag names can either be existing - or new tags. :code:`
`:code:`
`Requires ``tag:create`` scope. - ] - } - - # response body for status code(s): 201 - response == { - "replica": { - "name": "str", # The name to give the read-only replicating. - Required. - "connection": { - "database": "str", # Optional. The name of the default - database. - "host": "str", # Optional. The FQDN pointing to the database - cluster's current primary node. - "password": "str", # Optional. The randomly generated - password for the default user.:code:`
`:code:`
`Requires - ``database:view_credentials`` scope. - "port": 0, # Optional. The port on which the database - cluster is listening. - "ssl": bool, # Optional. A boolean value indicating if the - connection should be made over SSL. - "uri": "str", # Optional. A connection string in the format - accepted by the ``psql`` command. This is provided as a convenience and - should be able to be constructed by the other attributes. - "user": "str" # Optional. The default user for the - database.:code:`
`:code:`
`Requires ``database:view_credentials`` - scope. - }, - "created_at": "2020-02-20 00:00:00", # Optional. A time value given - in ISO8601 combined date and time format that represents when the database - cluster was created. - "do_settings": { - "service_cnames": [ - "str" # Optional. An array of custom CNAMEs for the - database cluster. Each CNAME must be a valid RFC 1123 hostname (e.g., - "db.example.com"). Maximum of 16 CNAMEs allowed, each up to 253 - characters. - ] - }, - "id": "str", # Optional. A unique ID that can be used to identify - and reference a database replica. - "private_connection": { - "database": "str", # Optional. The name of the default - database. - "host": "str", # Optional. The FQDN pointing to the database - cluster's current primary node. - "password": "str", # Optional. The randomly generated - password for the default user.:code:`
`:code:`
`Requires - ``database:view_credentials`` scope. - "port": 0, # Optional. The port on which the database - cluster is listening. - "ssl": bool, # Optional. A boolean value indicating if the - connection should be made over SSL. - "uri": "str", # Optional. A connection string in the format - accepted by the ``psql`` command. This is provided as a convenience and - should be able to be constructed by the other attributes. - "user": "str" # Optional. The default user for the - database.:code:`
`:code:`
`Requires ``database:view_credentials`` - scope. + username: str, + body: Union[JSON, IO[bytes]], + **kwargs: Any, + ) -> JSON: + # pylint: disable=line-too-long + """Reset a Database User's Password or Authentication Method. + + To reset the password for a database user, send a POST request to + ``/v2/databases/$DATABASE_ID/users/$USERNAME/reset_auth``. + + For ``mysql`` databases, the authentication method can be specifying by + including a key in the JSON body called ``mysql_settings`` with the ``auth_plugin`` + value specified. + + The response will be a JSON object with a ``user`` key. This will be set to an + object containing the standard database user attributes. + + :param database_cluster_uuid: A unique identifier for a database cluster. Required. + :type database_cluster_uuid: str + :param username: The name of the database user. Required. + :type username: str + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :return: JSON object + :rtype: JSON + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "mysql_settings": { + "auth_plugin": "str" # A string specifying the authentication method + to be used for connections to the MySQL user account. The valid values are + ``mysql_native_password`` or ``caching_sha2_password``. If excluded when + creating a new user, the default for the version of MySQL in use will be + used. As of MySQL 8.0, the default is ``caching_sha2_password``. Required. + Known values are: "mysql_native_password" and "caching_sha2_password". + } + } + + # response body for status code(s): 200 + response == { + "user": { + "name": "str", # The name of a database user. Required. + "access_cert": "str", # Optional. Access certificate for TLS client + authentication. (Kafka only). + "access_key": "str", # Optional. Access key for TLS client + authentication. (Kafka only). + "mysql_settings": { + "auth_plugin": "str" # A string specifying the + authentication method to be used for connections to the MySQL user + account. The valid values are ``mysql_native_password`` or + ``caching_sha2_password``. If excluded when creating a new user, the + default for the version of MySQL in use will be used. As of MySQL 8.0, + the default is ``caching_sha2_password``. Required. Known values are: + "mysql_native_password" and "caching_sha2_password". }, - "private_network_uuid": "str", # Optional. A string specifying the - UUID of the VPC to which the read-only replica will be assigned. If excluded, - the replica will be assigned to your account's default VPC for the region. - :code:`
`:code:`
`Requires ``vpc:read`` scope. - "region": "str", # Optional. A slug identifier for the region where - the read-only replica will be located. If excluded, the replica will be - placed in the same region as the cluster. - "size": "str", # Optional. A slug identifier representing the size - of the node for the read-only replica. The size of the replica must be at - least as large as the node size for the database cluster from which it is - replicating. - "status": "str", # Optional. A string representing the current - status of the database cluster. Known values are: "creating", "online", - "resizing", "migrating", and "forking". - "storage_size_mib": 0, # Optional. Additional storage added to the - cluster, in MiB. If null, no additional storage is added to the cluster, - beyond what is provided as a base amount from the 'size' and any previously - added additional storage. - "tags": [ - "str" # Optional. A flat array of tag names as strings - applied to the read-only replica.:code:`
`:code:`
`Requires - ``tag:read`` scope. - ] + "password": "str", # Optional. A randomly generated password for the + database user.:code:`
`Requires ``database:view_credentials`` scope. + "role": "str", # Optional. A string representing the database user's + role. The value will be either "primary" or "normal". Known values are: + "primary" and "normal". + "settings": { + "acl": [ + { + "permission": "str", # Permission set + applied to the ACL. 'consume' allows for messages to be consumed + from the topic. 'produce' allows for messages to be published to + the topic. 'produceconsume' allows for both 'consume' and + 'produce' permission. 'admin' allows for 'produceconsume' as well + as any operations to administer the topic (delete, update). + Required. Known values are: "admin", "consume", "produce", and + "produceconsume". + "topic": "str", # A regex for matching the + topic(s) that this ACL should apply to. Required. + "id": "str" # Optional. An identifier for + the ACL. Will be computed after the ACL is created/updated. + } + ], + "mongo_user_settings": { + "databases": [ + "str" # Optional. A list of databases to + which the user should have access. When the database is set to + ``admin``"" , the user will have access to all databases based on + the user's role i.e. a user with the role ``readOnly`` assigned + to the ``admin`` database will have read access to all databases. + ], + "role": "str" # Optional. The role to assign to the + user with each role mapping to a MongoDB built-in role. ``readOnly`` + maps to a `read + `_ + role. ``readWrite`` maps to a `readWrite + `_ + role. ``dbAdmin`` maps to a `dbAdmin + `_ + role. Known values are: "readOnly", "readWrite", and "dbAdmin". + }, + "opensearch_acl": [ + { + "index": "str", # Optional. A regex for + matching the indexes that this ACL should apply to. + "permission": "str" # Optional. Permission + set applied to the ACL. 'read' allows user to read from the + index. 'write' allows for user to write to the index. 'readwrite' + allows for both 'read' and 'write' permission. 'deny'(default) + restricts user from performing any operation over an index. + 'admin' allows for 'readwrite' as well as any operations to + administer the index. Known values are: "deny", "admin", "read", + "readwrite", and "write". + } + ], + "pg_allow_replication": bool # Optional. For Postgres + clusters, set to ``true`` for a user with replication rights. This option + is not currently supported for other database engines. + } } } # response body for status code(s): 404 @@ -123868,13 +132798,11 @@ def create_replica( if isinstance(body, (IOBase, bytes)): _content = body else: - if body is not None: - _json = body - else: - _json = None + _json = body - _request = build_databases_create_replica_request( + _request = build_databases_reset_auth_request( database_cluster_uuid=database_cluster_uuid, + username=username, content_type=content_type, json=_json, content=_content, @@ -123892,14 +132820,14 @@ def create_replica( response = pipeline_response.http_response - if response.status_code not in [201, 404]: + if response.status_code not in [200, 404]: if _stream: response.read() # Load the body in memory and close the socket map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore raise HttpResponseError(response=response) response_headers = {} - if response.status_code == 201: + if response.status_code == 200: response_headers["ratelimit-limit"] = self._deserialize( "int", response.headers.get("ratelimit-limit") ) @@ -123937,14 +132865,17 @@ def create_replica( return cast(JSON, deserialized) # type: ignore @distributed_trace - def list_events_logs(self, database_cluster_uuid: str, **kwargs: Any) -> JSON: + def list(self, database_cluster_uuid: str, **kwargs: Any) -> JSON: # pylint: disable=line-too-long - """List all Events Logs. + """List All Databases. - To list all of the cluster events, send a GET request to - ``/v2/databases/$DATABASE_ID/events``. + To list all of the databases in a clusters, send a GET request to + ``/v2/databases/$DATABASE_ID/dbs``. - The result will be a JSON object with a ``events`` key. + The result will be a JSON object with a ``dbs`` key. This will be set to an array + of database objects, each of which will contain the standard database attributes. + + Note: Database management is not supported for Caching or Valkey clusters. :param database_cluster_uuid: A unique identifier for a database cluster. Required. :type database_cluster_uuid: str @@ -123957,16 +132888,9 @@ def list_events_logs(self, database_cluster_uuid: str, **kwargs: Any) -> JSON: # response body for status code(s): 200 response == { - "events": [ + "dbs": [ { - "cluster_name": "str", # Optional. The name of cluster. - "create_time": "str", # Optional. The time of the generation - of a event. - "event_type": "str", # Optional. Type of the event. Known - values are: "cluster_maintenance_perform", "cluster_master_promotion", - "cluster_create", "cluster_update", "cluster_delete", "cluster_poweron", - and "cluster_poweroff". - "id": "str" # Optional. ID of the particular event. + "name": "str" # The name of the database. Required. } ] } @@ -124000,7 +132924,7 @@ def list_events_logs(self, database_cluster_uuid: str, **kwargs: Any) -> JSON: cls: ClsType[JSON] = kwargs.pop("cls", None) - _request = build_databases_list_events_logs_request( + _request = build_databases_list_request( database_cluster_uuid=database_cluster_uuid, headers=_headers, params=_params, @@ -124060,25 +132984,33 @@ def list_events_logs(self, database_cluster_uuid: str, **kwargs: Any) -> JSON: return cast(JSON, deserialized) # type: ignore - @distributed_trace - def get_replica( - self, database_cluster_uuid: str, replica_name: str, **kwargs: Any + @overload + def add( + self, + database_cluster_uuid: str, + body: JSON, + *, + content_type: str = "application/json", + **kwargs: Any, ) -> JSON: # pylint: disable=line-too-long - """Retrieve an Existing Read-only Replica. + """Add a New Database. - To show information about an existing database replica, send a GET request to - ``/v2/databases/$DATABASE_ID/replicas/$REPLICA_NAME``. + To add a new database to an existing cluster, send a POST request to + ``/v2/databases/$DATABASE_ID/dbs``. - **Note**\\ : Read-only replicas are not supported for Caching or Valkey clusters. + Note: Database management is not supported for Caching or Valkey clusters. - The response will be a JSON object with a ``replica key``. This will be set to an object - containing the standard database replica attributes. + The response will be a JSON object with a key called ``db``. The value of this will be + an object that contains the standard attributes associated with a database. :param database_cluster_uuid: A unique identifier for a database cluster. Required. :type database_cluster_uuid: str - :param replica_name: The name of the database replica. Required. - :type replica_name: str + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str :return: JSON object :rtype: JSON :raises ~azure.core.exceptions.HttpResponseError: @@ -124086,85 +133018,15 @@ def get_replica( Example: .. code-block:: python - # response body for status code(s): 200 + # JSON input template you can fill out and use as your body input. + body = { + "name": "str" # The name of the database. Required. + } + + # response body for status code(s): 201 response == { - "replica": { - "name": "str", # The name to give the read-only replicating. - Required. - "connection": { - "database": "str", # Optional. The name of the default - database. - "host": "str", # Optional. The FQDN pointing to the database - cluster's current primary node. - "password": "str", # Optional. The randomly generated - password for the default user.:code:`
`:code:`
`Requires - ``database:view_credentials`` scope. - "port": 0, # Optional. The port on which the database - cluster is listening. - "ssl": bool, # Optional. A boolean value indicating if the - connection should be made over SSL. - "uri": "str", # Optional. A connection string in the format - accepted by the ``psql`` command. This is provided as a convenience and - should be able to be constructed by the other attributes. - "user": "str" # Optional. The default user for the - database.:code:`
`:code:`
`Requires ``database:view_credentials`` - scope. - }, - "created_at": "2020-02-20 00:00:00", # Optional. A time value given - in ISO8601 combined date and time format that represents when the database - cluster was created. - "do_settings": { - "service_cnames": [ - "str" # Optional. An array of custom CNAMEs for the - database cluster. Each CNAME must be a valid RFC 1123 hostname (e.g., - "db.example.com"). Maximum of 16 CNAMEs allowed, each up to 253 - characters. - ] - }, - "id": "str", # Optional. A unique ID that can be used to identify - and reference a database replica. - "private_connection": { - "database": "str", # Optional. The name of the default - database. - "host": "str", # Optional. The FQDN pointing to the database - cluster's current primary node. - "password": "str", # Optional. The randomly generated - password for the default user.:code:`
`:code:`
`Requires - ``database:view_credentials`` scope. - "port": 0, # Optional. The port on which the database - cluster is listening. - "ssl": bool, # Optional. A boolean value indicating if the - connection should be made over SSL. - "uri": "str", # Optional. A connection string in the format - accepted by the ``psql`` command. This is provided as a convenience and - should be able to be constructed by the other attributes. - "user": "str" # Optional. The default user for the - database.:code:`
`:code:`
`Requires ``database:view_credentials`` - scope. - }, - "private_network_uuid": "str", # Optional. A string specifying the - UUID of the VPC to which the read-only replica will be assigned. If excluded, - the replica will be assigned to your account's default VPC for the region. - :code:`
`:code:`
`Requires ``vpc:read`` scope. - "region": "str", # Optional. A slug identifier for the region where - the read-only replica will be located. If excluded, the replica will be - placed in the same region as the cluster. - "size": "str", # Optional. A slug identifier representing the size - of the node for the read-only replica. The size of the replica must be at - least as large as the node size for the database cluster from which it is - replicating. - "status": "str", # Optional. A string representing the current - status of the database cluster. Known values are: "creating", "online", - "resizing", "migrating", and "forking". - "storage_size_mib": 0, # Optional. Additional storage added to the - cluster, in MiB. If null, no additional storage is added to the cluster, - beyond what is provided as a base amount from the 'size' and any previously - added additional storage. - "tags": [ - "str" # Optional. A flat array of tag names as strings - applied to the read-only replica.:code:`
`:code:`
`Requires - ``tag:read`` scope. - ] + "db": { + "name": "str" # The name of the database. Required. } } # response body for status code(s): 404 @@ -124179,111 +133041,97 @@ def get_replica( tickets to help identify the issue. } """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - 401: cast( - Type[HttpResponseError], - lambda response: ClientAuthenticationError(response=response), - ), - 429: HttpResponseError, - 500: HttpResponseError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = kwargs.pop("headers", {}) or {} - _params = kwargs.pop("params", {}) or {} - - cls: ClsType[JSON] = kwargs.pop("cls", None) - - _request = build_databases_get_replica_request( - database_cluster_uuid=database_cluster_uuid, - replica_name=replica_name, - headers=_headers, - params=_params, - ) - _request.url = self._client.format_url(_request.url) - - _stream = False - pipeline_response: PipelineResponse = ( - self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - ) - - response = pipeline_response.http_response - if response.status_code not in [200, 404]: - if _stream: - response.read() # Load the body in memory and close the socket - map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore - raise HttpResponseError(response=response) + @overload + def add( + self, + database_cluster_uuid: str, + body: IO[bytes], + *, + content_type: str = "application/json", + **kwargs: Any, + ) -> JSON: + # pylint: disable=line-too-long + """Add a New Database. - response_headers = {} - if response.status_code == 200: - response_headers["ratelimit-limit"] = self._deserialize( - "int", response.headers.get("ratelimit-limit") - ) - response_headers["ratelimit-remaining"] = self._deserialize( - "int", response.headers.get("ratelimit-remaining") - ) - response_headers["ratelimit-reset"] = self._deserialize( - "int", response.headers.get("ratelimit-reset") - ) + To add a new database to an existing cluster, send a POST request to + ``/v2/databases/$DATABASE_ID/dbs``. - if response.content: - deserialized = response.json() - else: - deserialized = None + Note: Database management is not supported for Caching or Valkey clusters. - if response.status_code == 404: - response_headers["ratelimit-limit"] = self._deserialize( - "int", response.headers.get("ratelimit-limit") - ) - response_headers["ratelimit-remaining"] = self._deserialize( - "int", response.headers.get("ratelimit-remaining") - ) - response_headers["ratelimit-reset"] = self._deserialize( - "int", response.headers.get("ratelimit-reset") - ) + The response will be a JSON object with a key called ``db``. The value of this will be + an object that contains the standard attributes associated with a database. - if response.content: - deserialized = response.json() - else: - deserialized = None + :param database_cluster_uuid: A unique identifier for a database cluster. Required. + :type database_cluster_uuid: str + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: JSON object + :rtype: JSON + :raises ~azure.core.exceptions.HttpResponseError: - if cls: - return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore + Example: + .. code-block:: python - return cast(JSON, deserialized) # type: ignore + # response body for status code(s): 201 + response == { + "db": { + "name": "str" # The name of the database. Required. + } + } + # response body for status code(s): 404 + response == { + "id": "str", # A short identifier corresponding to the HTTP status code + returned. For example, the ID for a response returning a 404 status code would + be "not_found.". Required. + "message": "str", # A message providing additional information about the + error, including details to help resolve it when possible. Required. + "request_id": "str" # Optional. Optionally, some endpoints may include a + request ID that should be provided when reporting bugs or opening support + tickets to help identify the issue. + } + """ @distributed_trace - def destroy_replica( - self, database_cluster_uuid: str, replica_name: str, **kwargs: Any - ) -> Optional[JSON]: + def add( + self, database_cluster_uuid: str, body: Union[JSON, IO[bytes]], **kwargs: Any + ) -> JSON: # pylint: disable=line-too-long - """Destroy a Read-only Replica. + """Add a New Database. - To destroy a specific read-only replica, send a DELETE request to - ``/v2/databases/$DATABASE_ID/replicas/$REPLICA_NAME``. + To add a new database to an existing cluster, send a POST request to + ``/v2/databases/$DATABASE_ID/dbs``. - **Note**\\ : Read-only replicas are not supported for Caching or Valkey clusters. + Note: Database management is not supported for Caching or Valkey clusters. - A status of 204 will be given. This indicates that the request was processed successfully, but - that no response body is needed. + The response will be a JSON object with a key called ``db``. The value of this will be + an object that contains the standard attributes associated with a database. :param database_cluster_uuid: A unique identifier for a database cluster. Required. :type database_cluster_uuid: str - :param replica_name: The name of the database replica. Required. - :type replica_name: str - :return: JSON object or None - :rtype: JSON or None + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :return: JSON object + :rtype: JSON :raises ~azure.core.exceptions.HttpResponseError: Example: .. code-block:: python + # JSON input template you can fill out and use as your body input. + body = { + "name": "str" # The name of the database. Required. + } + + # response body for status code(s): 201 + response == { + "db": { + "name": "str" # The name of the database. Required. + } + } # response body for status code(s): 404 response == { "id": "str", # A short identifier corresponding to the HTTP status code @@ -124309,14 +133157,27 @@ def destroy_replica( } error_map.update(kwargs.pop("error_map", {}) or {}) - _headers = kwargs.pop("headers", {}) or {} + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = kwargs.pop("params", {}) or {} - cls: ClsType[Optional[JSON]] = kwargs.pop("cls", None) + content_type: Optional[str] = kwargs.pop( + "content_type", _headers.pop("Content-Type", None) + ) + cls: ClsType[JSON] = kwargs.pop("cls", None) - _request = build_databases_destroy_replica_request( + content_type = content_type or "application/json" + _json = None + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _json = body + + _request = build_databases_add_request( database_cluster_uuid=database_cluster_uuid, - replica_name=replica_name, + content_type=content_type, + json=_json, + content=_content, headers=_headers, params=_params, ) @@ -124331,15 +133192,14 @@ def destroy_replica( response = pipeline_response.http_response - if response.status_code not in [204, 404]: + if response.status_code not in [201, 404]: if _stream: response.read() # Load the body in memory and close the socket map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore raise HttpResponseError(response=response) - deserialized = None response_headers = {} - if response.status_code == 204: + if response.status_code == 201: response_headers["ratelimit-limit"] = self._deserialize( "int", response.headers.get("ratelimit-limit") ) @@ -124350,6 +133210,11 @@ def destroy_replica( "int", response.headers.get("ratelimit-reset") ) + if response.content: + deserialized = response.json() + else: + deserialized = None + if response.status_code == 404: response_headers["ratelimit-limit"] = self._deserialize( "int", response.headers.get("ratelimit-limit") @@ -124367,36 +133232,42 @@ def destroy_replica( deserialized = None if cls: - return cls(pipeline_response, deserialized, response_headers) # type: ignore + return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore - return deserialized # type: ignore + return cast(JSON, deserialized) # type: ignore @distributed_trace - def promote_replica( - self, database_cluster_uuid: str, replica_name: str, **kwargs: Any - ) -> Optional[JSON]: + def get( + self, database_cluster_uuid: str, database_name: str, **kwargs: Any + ) -> JSON: # pylint: disable=line-too-long - """Promote a Read-only Replica to become a Primary Cluster. + """Retrieve an Existing Database. - To promote a specific read-only replica, send a PUT request to - ``/v2/databases/$DATABASE_ID/replicas/$REPLICA_NAME/promote``. + To show information about an existing database cluster, send a GET request to + ``/v2/databases/$DATABASE_ID/dbs/$DB_NAME``. - **Note**\\ : Read-only replicas are not supported for Caching or Valkey clusters. + Note: Database management is not supported for Caching or Valkey clusters. - A status of 204 will be given. This indicates that the request was processed successfully, but - that no response body is needed. + The response will be a JSON object with a ``db`` key. This will be set to an object + containing the standard database attributes. :param database_cluster_uuid: A unique identifier for a database cluster. Required. :type database_cluster_uuid: str - :param replica_name: The name of the database replica. Required. - :type replica_name: str - :return: JSON object or None - :rtype: JSON or None + :param database_name: The name of the database. Required. + :type database_name: str + :return: JSON object + :rtype: JSON :raises ~azure.core.exceptions.HttpResponseError: Example: .. code-block:: python + # response body for status code(s): 200 + response == { + "db": { + "name": "str" # The name of the database. Required. + } + } # response body for status code(s): 404 response == { "id": "str", # A short identifier corresponding to the HTTP status code @@ -124425,11 +133296,11 @@ def promote_replica( _headers = kwargs.pop("headers", {}) or {} _params = kwargs.pop("params", {}) or {} - cls: ClsType[Optional[JSON]] = kwargs.pop("cls", None) + cls: ClsType[JSON] = kwargs.pop("cls", None) - _request = build_databases_promote_replica_request( + _request = build_databases_get_request( database_cluster_uuid=database_cluster_uuid, - replica_name=replica_name, + database_name=database_name, headers=_headers, params=_params, ) @@ -124444,15 +133315,14 @@ def promote_replica( response = pipeline_response.http_response - if response.status_code not in [204, 404]: + if response.status_code not in [200, 404]: if _stream: response.read() # Load the body in memory and close the socket map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore raise HttpResponseError(response=response) - deserialized = None response_headers = {} - if response.status_code == 204: + if response.status_code == 200: response_headers["ratelimit-limit"] = self._deserialize( "int", response.headers.get("ratelimit-limit") ) @@ -124463,6 +133333,11 @@ def promote_replica( "int", response.headers.get("ratelimit-reset") ) + if response.content: + deserialized = response.json() + else: + deserialized = None + if response.status_code == 404: response_headers["ratelimit-limit"] = self._deserialize( "int", response.headers.get("ratelimit-limit") @@ -124480,123 +133355,36 @@ def promote_replica( deserialized = None if cls: - return cls(pipeline_response, deserialized, response_headers) # type: ignore + return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore - return deserialized # type: ignore + return cast(JSON, deserialized) # type: ignore @distributed_trace - def list_users(self, database_cluster_uuid: str, **kwargs: Any) -> JSON: + def delete( + self, database_cluster_uuid: str, database_name: str, **kwargs: Any + ) -> Optional[JSON]: # pylint: disable=line-too-long - """List all Database Users. - - To list all of the users for your database cluster, send a GET request to - ``/v2/databases/$DATABASE_ID/users``. - - Note: User management is not supported for Caching or Valkey clusters. + """Delete a Database. - The result will be a JSON object with a ``users`` key. This will be set to an array - of database user objects, each of which will contain the standard database user attributes. - User passwords will not show without the ``database:view_credentials`` scope. + To delete a specific database, send a DELETE request to + ``/v2/databases/$DATABASE_ID/dbs/$DB_NAME``. - For MySQL clusters, additional options will be contained in the mysql_settings object. + A status of 204 will be given. This indicates that the request was processed + successfully, but that no response body is needed. - For MongoDB clusters, additional information will be contained in the mongo_user_settings - object. + Note: Database management is not supported for Caching or Valkey clusters. :param database_cluster_uuid: A unique identifier for a database cluster. Required. :type database_cluster_uuid: str - :return: JSON object - :rtype: JSON + :param database_name: The name of the database. Required. + :type database_name: str + :return: JSON object or None + :rtype: JSON or None :raises ~azure.core.exceptions.HttpResponseError: Example: .. code-block:: python - # response body for status code(s): 200 - response == { - "users": [ - { - "name": "str", # The name of a database user. Required. - "access_cert": "str", # Optional. Access certificate for TLS - client authentication. (Kafka only). - "access_key": "str", # Optional. Access key for TLS client - authentication. (Kafka only). - "mysql_settings": { - "auth_plugin": "str" # A string specifying the - authentication method to be used for connections to the MySQL user - account. The valid values are ``mysql_native_password`` or - ``caching_sha2_password``. If excluded when creating a new user, the - default for the version of MySQL in use will be used. As of MySQL - 8.0, the default is ``caching_sha2_password``. Required. Known values - are: "mysql_native_password" and "caching_sha2_password". - }, - "password": "str", # Optional. A randomly generated password - for the database user.:code:`
`Requires ``database:view_credentials`` - scope. - "role": "str", # Optional. A string representing the - database user's role. The value will be either "primary" or "normal". - Known values are: "primary" and "normal". - "settings": { - "acl": [ - { - "permission": "str", # Permission - set applied to the ACL. 'consume' allows for messages to be - consumed from the topic. 'produce' allows for messages to be - published to the topic. 'produceconsume' allows for both - 'consume' and 'produce' permission. 'admin' allows for - 'produceconsume' as well as any operations to administer the - topic (delete, update). Required. Known values are: "admin", - "consume", "produce", and "produceconsume". - "topic": "str", # A regex for - matching the topic(s) that this ACL should apply to. - Required. - "id": "str" # Optional. An - identifier for the ACL. Will be computed after the ACL is - created/updated. - } - ], - "mongo_user_settings": { - "databases": [ - "str" # Optional. A list of - databases to which the user should have access. When the - database is set to ``admin``"" , the user will have access to - all databases based on the user's role i.e. a user with the - role ``readOnly`` assigned to the ``admin`` database will - have read access to all databases. - ], - "role": "str" # Optional. The role to assign - to the user with each role mapping to a MongoDB built-in role. - ``readOnly`` maps to a `read - `_ - role. ``readWrite`` maps to a `readWrite - `_ - role. ``dbAdmin`` maps to a `dbAdmin - `_ - role. Known values are: "readOnly", "readWrite", and "dbAdmin". - }, - "opensearch_acl": [ - { - "index": "str", # Optional. A regex - for matching the indexes that this ACL should apply to. - "permission": "str" # Optional. - Permission set applied to the ACL. 'read' allows user to read - from the index. 'write' allows for user to write to the - index. 'readwrite' allows for both 'read' and 'write' - permission. 'deny'(default) restricts user from performing - any operation over an index. 'admin' allows for 'readwrite' - as well as any operations to administer the index. Known - values are: "deny", "admin", "read", "readwrite", and - "write". - } - ], - "pg_allow_replication": bool # Optional. For - Postgres clusters, set to ``true`` for a user with replication - rights. This option is not currently supported for other database - engines. - } - } - ] - } # response body for status code(s): 404 response == { "id": "str", # A short identifier corresponding to the HTTP status code @@ -124625,10 +133413,11 @@ def list_users(self, database_cluster_uuid: str, **kwargs: Any) -> JSON: _headers = kwargs.pop("headers", {}) or {} _params = kwargs.pop("params", {}) or {} - cls: ClsType[JSON] = kwargs.pop("cls", None) + cls: ClsType[Optional[JSON]] = kwargs.pop("cls", None) - _request = build_databases_list_users_request( + _request = build_databases_delete_request( database_cluster_uuid=database_cluster_uuid, + database_name=database_name, headers=_headers, params=_params, ) @@ -124643,14 +133432,15 @@ def list_users(self, database_cluster_uuid: str, **kwargs: Any) -> JSON: response = pipeline_response.http_response - if response.status_code not in [200, 404]: + if response.status_code not in [204, 404]: if _stream: response.read() # Load the body in memory and close the socket map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore raise HttpResponseError(response=response) + deserialized = None response_headers = {} - if response.status_code == 200: + if response.status_code == 204: response_headers["ratelimit-limit"] = self._deserialize( "int", response.headers.get("ratelimit-limit") ) @@ -124661,11 +133451,6 @@ def list_users(self, database_cluster_uuid: str, **kwargs: Any) -> JSON: "int", response.headers.get("ratelimit-reset") ) - if response.content: - deserialized = response.json() - else: - deserialized = None - if response.status_code == 404: response_headers["ratelimit-limit"] = self._deserialize( "int", response.headers.get("ratelimit-limit") @@ -124682,385 +133467,23 @@ def list_users(self, database_cluster_uuid: str, **kwargs: Any) -> JSON: else: deserialized = None - if cls: - return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore - - return cast(JSON, deserialized) # type: ignore - - @overload - def add_user( - self, - database_cluster_uuid: str, - body: JSON, - *, - content_type: str = "application/json", - **kwargs: Any, - ) -> JSON: - # pylint: disable=line-too-long - """Add a Database User. - - To add a new database user, send a POST request to ``/v2/databases/$DATABASE_ID/users`` - with the desired username. - - Note: User management is not supported for Caching or Valkey clusters. - - When adding a user to a MySQL cluster, additional options can be configured in the - ``mysql_settings`` object. - - When adding a user to a Kafka cluster, additional options can be configured in - the ``settings`` object. - - When adding a user to a MongoDB cluster, additional options can be configured in - the ``settings.mongo_user_settings`` object. - - The response will be a JSON object with a key called ``user``. The value of this will be an - object that contains the standard attributes associated with a database user including - its randomly generated password. - - :param database_cluster_uuid: A unique identifier for a database cluster. Required. - :type database_cluster_uuid: str - :param body: Required. - :type body: JSON - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: JSON object - :rtype: JSON - :raises ~azure.core.exceptions.HttpResponseError: - - Example: - .. code-block:: python - - # JSON input template you can fill out and use as your body input. - body = { - "name": "str", # The name of a database user. Required. - "access_cert": "str", # Optional. Access certificate for TLS client - authentication. (Kafka only). - "access_key": "str", # Optional. Access key for TLS client authentication. - (Kafka only). - "mysql_settings": { - "auth_plugin": "str" # A string specifying the authentication method - to be used for connections to the MySQL user account. The valid values are - ``mysql_native_password`` or ``caching_sha2_password``. If excluded when - creating a new user, the default for the version of MySQL in use will be - used. As of MySQL 8.0, the default is ``caching_sha2_password``. Required. - Known values are: "mysql_native_password" and "caching_sha2_password". - }, - "password": "str", # Optional. A randomly generated password for the - database user.:code:`
`Requires ``database:view_credentials`` scope. - "readonly": bool, # Optional. (To be deprecated: use - settings.mongo_user_settings.role instead for access controls to MongoDB - databases). For MongoDB clusters, set to ``true`` to create a read-only user. - This option is not currently supported for other database engines. - "role": "str", # Optional. A string representing the database user's role. - The value will be either "primary" or "normal". Known values are: "primary" and - "normal". - "settings": { - "acl": [ - { - "permission": "str", # Permission set applied to the - ACL. 'consume' allows for messages to be consumed from the topic. - 'produce' allows for messages to be published to the topic. - 'produceconsume' allows for both 'consume' and 'produce' permission. - 'admin' allows for 'produceconsume' as well as any operations to - administer the topic (delete, update). Required. Known values are: - "admin", "consume", "produce", and "produceconsume". - "topic": "str", # A regex for matching the topic(s) - that this ACL should apply to. Required. - "id": "str" # Optional. An identifier for the ACL. - Will be computed after the ACL is created/updated. - } - ], - "mongo_user_settings": { - "databases": [ - "str" # Optional. A list of databases to which the - user should have access. When the database is set to ``admin``"" , - the user will have access to all databases based on the user's role - i.e. a user with the role ``readOnly`` assigned to the ``admin`` - database will have read access to all databases. - ], - "role": "str" # Optional. The role to assign to the user - with each role mapping to a MongoDB built-in role. ``readOnly`` maps to - a `read - `_ - role. ``readWrite`` maps to a `readWrite - `_ - role. ``dbAdmin`` maps to a `dbAdmin - `_ - role. Known values are: "readOnly", "readWrite", and "dbAdmin". - }, - "opensearch_acl": [ - { - "index": "str", # Optional. A regex for matching the - indexes that this ACL should apply to. - "permission": "str" # Optional. Permission set - applied to the ACL. 'read' allows user to read from the index. - 'write' allows for user to write to the index. 'readwrite' allows for - both 'read' and 'write' permission. 'deny'(default) restricts user - from performing any operation over an index. 'admin' allows for - 'readwrite' as well as any operations to administer the index. Known - values are: "deny", "admin", "read", "readwrite", and "write". - } - ], - "pg_allow_replication": bool # Optional. For Postgres clusters, set - to ``true`` for a user with replication rights. This option is not currently - supported for other database engines. - } - } - - # response body for status code(s): 201 - response == { - "user": { - "name": "str", # The name of a database user. Required. - "access_cert": "str", # Optional. Access certificate for TLS client - authentication. (Kafka only). - "access_key": "str", # Optional. Access key for TLS client - authentication. (Kafka only). - "mysql_settings": { - "auth_plugin": "str" # A string specifying the - authentication method to be used for connections to the MySQL user - account. The valid values are ``mysql_native_password`` or - ``caching_sha2_password``. If excluded when creating a new user, the - default for the version of MySQL in use will be used. As of MySQL 8.0, - the default is ``caching_sha2_password``. Required. Known values are: - "mysql_native_password" and "caching_sha2_password". - }, - "password": "str", # Optional. A randomly generated password for the - database user.:code:`
`Requires ``database:view_credentials`` scope. - "role": "str", # Optional. A string representing the database user's - role. The value will be either "primary" or "normal". Known values are: - "primary" and "normal". - "settings": { - "acl": [ - { - "permission": "str", # Permission set - applied to the ACL. 'consume' allows for messages to be consumed - from the topic. 'produce' allows for messages to be published to - the topic. 'produceconsume' allows for both 'consume' and - 'produce' permission. 'admin' allows for 'produceconsume' as well - as any operations to administer the topic (delete, update). - Required. Known values are: "admin", "consume", "produce", and - "produceconsume". - "topic": "str", # A regex for matching the - topic(s) that this ACL should apply to. Required. - "id": "str" # Optional. An identifier for - the ACL. Will be computed after the ACL is created/updated. - } - ], - "mongo_user_settings": { - "databases": [ - "str" # Optional. A list of databases to - which the user should have access. When the database is set to - ``admin``"" , the user will have access to all databases based on - the user's role i.e. a user with the role ``readOnly`` assigned - to the ``admin`` database will have read access to all databases. - ], - "role": "str" # Optional. The role to assign to the - user with each role mapping to a MongoDB built-in role. ``readOnly`` - maps to a `read - `_ - role. ``readWrite`` maps to a `readWrite - `_ - role. ``dbAdmin`` maps to a `dbAdmin - `_ - role. Known values are: "readOnly", "readWrite", and "dbAdmin". - }, - "opensearch_acl": [ - { - "index": "str", # Optional. A regex for - matching the indexes that this ACL should apply to. - "permission": "str" # Optional. Permission - set applied to the ACL. 'read' allows user to read from the - index. 'write' allows for user to write to the index. 'readwrite' - allows for both 'read' and 'write' permission. 'deny'(default) - restricts user from performing any operation over an index. - 'admin' allows for 'readwrite' as well as any operations to - administer the index. Known values are: "deny", "admin", "read", - "readwrite", and "write". - } - ], - "pg_allow_replication": bool # Optional. For Postgres - clusters, set to ``true`` for a user with replication rights. This option - is not currently supported for other database engines. - } - } - } - # response body for status code(s): 404 - response == { - "id": "str", # A short identifier corresponding to the HTTP status code - returned. For example, the ID for a response returning a 404 status code would - be "not_found.". Required. - "message": "str", # A message providing additional information about the - error, including details to help resolve it when possible. Required. - "request_id": "str" # Optional. Optionally, some endpoints may include a - request ID that should be provided when reporting bugs or opening support - tickets to help identify the issue. - } - """ - - @overload - def add_user( - self, - database_cluster_uuid: str, - body: IO[bytes], - *, - content_type: str = "application/json", - **kwargs: Any, - ) -> JSON: - # pylint: disable=line-too-long - """Add a Database User. - - To add a new database user, send a POST request to ``/v2/databases/$DATABASE_ID/users`` - with the desired username. - - Note: User management is not supported for Caching or Valkey clusters. - - When adding a user to a MySQL cluster, additional options can be configured in the - ``mysql_settings`` object. - - When adding a user to a Kafka cluster, additional options can be configured in - the ``settings`` object. - - When adding a user to a MongoDB cluster, additional options can be configured in - the ``settings.mongo_user_settings`` object. - - The response will be a JSON object with a key called ``user``. The value of this will be an - object that contains the standard attributes associated with a database user including - its randomly generated password. - - :param database_cluster_uuid: A unique identifier for a database cluster. Required. - :type database_cluster_uuid: str - :param body: Required. - :type body: IO[bytes] - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str - :return: JSON object - :rtype: JSON - :raises ~azure.core.exceptions.HttpResponseError: - - Example: - .. code-block:: python - - # response body for status code(s): 201 - response == { - "user": { - "name": "str", # The name of a database user. Required. - "access_cert": "str", # Optional. Access certificate for TLS client - authentication. (Kafka only). - "access_key": "str", # Optional. Access key for TLS client - authentication. (Kafka only). - "mysql_settings": { - "auth_plugin": "str" # A string specifying the - authentication method to be used for connections to the MySQL user - account. The valid values are ``mysql_native_password`` or - ``caching_sha2_password``. If excluded when creating a new user, the - default for the version of MySQL in use will be used. As of MySQL 8.0, - the default is ``caching_sha2_password``. Required. Known values are: - "mysql_native_password" and "caching_sha2_password". - }, - "password": "str", # Optional. A randomly generated password for the - database user.:code:`
`Requires ``database:view_credentials`` scope. - "role": "str", # Optional. A string representing the database user's - role. The value will be either "primary" or "normal". Known values are: - "primary" and "normal". - "settings": { - "acl": [ - { - "permission": "str", # Permission set - applied to the ACL. 'consume' allows for messages to be consumed - from the topic. 'produce' allows for messages to be published to - the topic. 'produceconsume' allows for both 'consume' and - 'produce' permission. 'admin' allows for 'produceconsume' as well - as any operations to administer the topic (delete, update). - Required. Known values are: "admin", "consume", "produce", and - "produceconsume". - "topic": "str", # A regex for matching the - topic(s) that this ACL should apply to. Required. - "id": "str" # Optional. An identifier for - the ACL. Will be computed after the ACL is created/updated. - } - ], - "mongo_user_settings": { - "databases": [ - "str" # Optional. A list of databases to - which the user should have access. When the database is set to - ``admin``"" , the user will have access to all databases based on - the user's role i.e. a user with the role ``readOnly`` assigned - to the ``admin`` database will have read access to all databases. - ], - "role": "str" # Optional. The role to assign to the - user with each role mapping to a MongoDB built-in role. ``readOnly`` - maps to a `read - `_ - role. ``readWrite`` maps to a `readWrite - `_ - role. ``dbAdmin`` maps to a `dbAdmin - `_ - role. Known values are: "readOnly", "readWrite", and "dbAdmin". - }, - "opensearch_acl": [ - { - "index": "str", # Optional. A regex for - matching the indexes that this ACL should apply to. - "permission": "str" # Optional. Permission - set applied to the ACL. 'read' allows user to read from the - index. 'write' allows for user to write to the index. 'readwrite' - allows for both 'read' and 'write' permission. 'deny'(default) - restricts user from performing any operation over an index. - 'admin' allows for 'readwrite' as well as any operations to - administer the index. Known values are: "deny", "admin", "read", - "readwrite", and "write". - } - ], - "pg_allow_replication": bool # Optional. For Postgres - clusters, set to ``true`` for a user with replication rights. This option - is not currently supported for other database engines. - } - } - } - # response body for status code(s): 404 - response == { - "id": "str", # A short identifier corresponding to the HTTP status code - returned. For example, the ID for a response returning a 404 status code would - be "not_found.". Required. - "message": "str", # A message providing additional information about the - error, including details to help resolve it when possible. Required. - "request_id": "str" # Optional. Optionally, some endpoints may include a - request ID that should be provided when reporting bugs or opening support - tickets to help identify the issue. - } - """ - - @distributed_trace - def add_user( - self, database_cluster_uuid: str, body: Union[JSON, IO[bytes]], **kwargs: Any - ) -> JSON: - # pylint: disable=line-too-long - """Add a Database User. - - To add a new database user, send a POST request to ``/v2/databases/$DATABASE_ID/users`` - with the desired username. - - Note: User management is not supported for Caching or Valkey clusters. - - When adding a user to a MySQL cluster, additional options can be configured in the - ``mysql_settings`` object. + if cls: + return cls(pipeline_response, deserialized, response_headers) # type: ignore - When adding a user to a Kafka cluster, additional options can be configured in - the ``settings`` object. + return deserialized # type: ignore - When adding a user to a MongoDB cluster, additional options can be configured in - the ``settings.mongo_user_settings`` object. + @distributed_trace + def list_connection_pools(self, database_cluster_uuid: str, **kwargs: Any) -> JSON: + # pylint: disable=line-too-long + """List Connection Pools (PostgreSQL). - The response will be a JSON object with a key called ``user``. The value of this will be an - object that contains the standard attributes associated with a database user including - its randomly generated password. + To list all of the connection pools available to a PostgreSQL database cluster, send a GET + request to ``/v2/databases/$DATABASE_ID/pools``. + The result will be a JSON object with a ``pools`` key. This will be set to an array of + connection pool objects. :param database_cluster_uuid: A unique identifier for a database cluster. Required. :type database_cluster_uuid: str - :param body: Is either a JSON type or a IO[bytes] type. Required. - :type body: JSON or IO[bytes] :return: JSON object :rtype: JSON :raises ~azure.core.exceptions.HttpResponseError: @@ -125068,159 +133491,113 @@ def add_user( Example: .. code-block:: python - # JSON input template you can fill out and use as your body input. - body = { - "name": "str", # The name of a database user. Required. - "access_cert": "str", # Optional. Access certificate for TLS client - authentication. (Kafka only). - "access_key": "str", # Optional. Access key for TLS client authentication. - (Kafka only). - "mysql_settings": { - "auth_plugin": "str" # A string specifying the authentication method - to be used for connections to the MySQL user account. The valid values are - ``mysql_native_password`` or ``caching_sha2_password``. If excluded when - creating a new user, the default for the version of MySQL in use will be - used. As of MySQL 8.0, the default is ``caching_sha2_password``. Required. - Known values are: "mysql_native_password" and "caching_sha2_password". - }, - "password": "str", # Optional. A randomly generated password for the - database user.:code:`
`Requires ``database:view_credentials`` scope. - "readonly": bool, # Optional. (To be deprecated: use - settings.mongo_user_settings.role instead for access controls to MongoDB - databases). For MongoDB clusters, set to ``true`` to create a read-only user. - This option is not currently supported for other database engines. - "role": "str", # Optional. A string representing the database user's role. - The value will be either "primary" or "normal". Known values are: "primary" and - "normal". - "settings": { - "acl": [ - { - "permission": "str", # Permission set applied to the - ACL. 'consume' allows for messages to be consumed from the topic. - 'produce' allows for messages to be published to the topic. - 'produceconsume' allows for both 'consume' and 'produce' permission. - 'admin' allows for 'produceconsume' as well as any operations to - administer the topic (delete, update). Required. Known values are: - "admin", "consume", "produce", and "produceconsume". - "topic": "str", # A regex for matching the topic(s) - that this ACL should apply to. Required. - "id": "str" # Optional. An identifier for the ACL. - Will be computed after the ACL is created/updated. - } - ], - "mongo_user_settings": { - "databases": [ - "str" # Optional. A list of databases to which the - user should have access. When the database is set to ``admin``"" , - the user will have access to all databases based on the user's role - i.e. a user with the role ``readOnly`` assigned to the ``admin`` - database will have read access to all databases. - ], - "role": "str" # Optional. The role to assign to the user - with each role mapping to a MongoDB built-in role. ``readOnly`` maps to - a `read - `_ - role. ``readWrite`` maps to a `readWrite - `_ - role. ``dbAdmin`` maps to a `dbAdmin - `_ - role. Known values are: "readOnly", "readWrite", and "dbAdmin". - }, - "opensearch_acl": [ - { - "index": "str", # Optional. A regex for matching the - indexes that this ACL should apply to. - "permission": "str" # Optional. Permission set - applied to the ACL. 'read' allows user to read from the index. - 'write' allows for user to write to the index. 'readwrite' allows for - both 'read' and 'write' permission. 'deny'(default) restricts user - from performing any operation over an index. 'admin' allows for - 'readwrite' as well as any operations to administer the index. Known - values are: "deny", "admin", "read", "readwrite", and "write". - } - ], - "pg_allow_replication": bool # Optional. For Postgres clusters, set - to ``true`` for a user with replication rights. This option is not currently - supported for other database engines. - } - } - - # response body for status code(s): 201 + # response body for status code(s): 200 response == { - "user": { - "name": "str", # The name of a database user. Required. - "access_cert": "str", # Optional. Access certificate for TLS client - authentication. (Kafka only). - "access_key": "str", # Optional. Access key for TLS client - authentication. (Kafka only). - "mysql_settings": { - "auth_plugin": "str" # A string specifying the - authentication method to be used for connections to the MySQL user - account. The valid values are ``mysql_native_password`` or - ``caching_sha2_password``. If excluded when creating a new user, the - default for the version of MySQL in use will be used. As of MySQL 8.0, - the default is ``caching_sha2_password``. Required. Known values are: - "mysql_native_password" and "caching_sha2_password". - }, - "password": "str", # Optional. A randomly generated password for the - database user.:code:`
`Requires ``database:view_credentials`` scope. - "role": "str", # Optional. A string representing the database user's - role. The value will be either "primary" or "normal". Known values are: - "primary" and "normal". - "settings": { - "acl": [ - { - "permission": "str", # Permission set - applied to the ACL. 'consume' allows for messages to be consumed - from the topic. 'produce' allows for messages to be published to - the topic. 'produceconsume' allows for both 'consume' and - 'produce' permission. 'admin' allows for 'produceconsume' as well - as any operations to administer the topic (delete, update). - Required. Known values are: "admin", "consume", "produce", and - "produceconsume". - "topic": "str", # A regex for matching the - topic(s) that this ACL should apply to. Required. - "id": "str" # Optional. An identifier for - the ACL. Will be computed after the ACL is created/updated. - } - ], - "mongo_user_settings": { - "databases": [ - "str" # Optional. A list of databases to - which the user should have access. When the database is set to - ``admin``"" , the user will have access to all databases based on - the user's role i.e. a user with the role ``readOnly`` assigned - to the ``admin`` database will have read access to all databases. - ], - "role": "str" # Optional. The role to assign to the - user with each role mapping to a MongoDB built-in role. ``readOnly`` - maps to a `read - `_ - role. ``readWrite`` maps to a `readWrite - `_ - role. ``dbAdmin`` maps to a `dbAdmin - `_ - role. Known values are: "readOnly", "readWrite", and "dbAdmin". + "pools": [ + { + "db": "str", # The database for use with the connection + pool. Required. + "mode": "str", # The PGBouncer transaction mode for the + connection pool. The allowed values are session, transaction, and + statement. Required. + "name": "str", # A unique name for the connection pool. Must + be between 3 and 60 characters. Required. + "size": 0, # The desired size of the PGBouncer connection + pool. The maximum allowed size is determined by the size of the cluster's + primary node. 25 backend server connections are allowed for every 1GB of + RAM. Three are reserved for maintenance. For example, a primary node with + 1 GB of RAM allows for a maximum of 22 backend server connections while + one with 4 GB would allow for 97. Note that these are shared across all + connection pools in a cluster. Required. + "connection": { + "database": "str", # Optional. The name of the + default database. + "host": "str", # Optional. The FQDN pointing to the + database cluster's current primary node. + "password": "str", # Optional. The randomly + generated password for the default + user.:code:`
`:code:`
`Requires ``database:view_credentials`` + scope. + "port": 0, # Optional. The port on which the + database cluster is listening. + "ssl": bool, # Optional. A boolean value indicating + if the connection should be made over SSL. + "uri": "str", # Optional. A connection string in the + format accepted by the ``psql`` command. This is provided as a + convenience and should be able to be constructed by the other + attributes. + "user": "str" # Optional. The default user for the + database.:code:`
`:code:`
`Requires + ``database:view_credentials`` scope. }, - "opensearch_acl": [ - { - "index": "str", # Optional. A regex for - matching the indexes that this ACL should apply to. - "permission": "str" # Optional. Permission - set applied to the ACL. 'read' allows user to read from the - index. 'write' allows for user to write to the index. 'readwrite' - allows for both 'read' and 'write' permission. 'deny'(default) - restricts user from performing any operation over an index. - 'admin' allows for 'readwrite' as well as any operations to - administer the index. Known values are: "deny", "admin", "read", - "readwrite", and "write". - } - ], - "pg_allow_replication": bool # Optional. For Postgres - clusters, set to ``true`` for a user with replication rights. This option - is not currently supported for other database engines. + "private_connection": { + "database": "str", # Optional. The name of the + default database. + "host": "str", # Optional. The FQDN pointing to the + database cluster's current primary node. + "password": "str", # Optional. The randomly + generated password for the default + user.:code:`
`:code:`
`Requires ``database:view_credentials`` + scope. + "port": 0, # Optional. The port on which the + database cluster is listening. + "ssl": bool, # Optional. A boolean value indicating + if the connection should be made over SSL. + "uri": "str", # Optional. A connection string in the + format accepted by the ``psql`` command. This is provided as a + convenience and should be able to be constructed by the other + attributes. + "user": "str" # Optional. The default user for the + database.:code:`
`:code:`
`Requires + ``database:view_credentials`` scope. + }, + "standby_connection": { + "database": "str", # Optional. The name of the + default database. + "host": "str", # Optional. The FQDN pointing to the + database cluster's current primary node. + "password": "str", # Optional. The randomly + generated password for the default + user.:code:`
`:code:`
`Requires ``database:view_credentials`` + scope. + "port": 0, # Optional. The port on which the + database cluster is listening. + "ssl": bool, # Optional. A boolean value indicating + if the connection should be made over SSL. + "uri": "str", # Optional. A connection string in the + format accepted by the ``psql`` command. This is provided as a + convenience and should be able to be constructed by the other + attributes. + "user": "str" # Optional. The default user for the + database.:code:`
`:code:`
`Requires + ``database:view_credentials`` scope. + }, + "standby_private_connection": { + "database": "str", # Optional. The name of the + default database. + "host": "str", # Optional. The FQDN pointing to the + database cluster's current primary node. + "password": "str", # Optional. The randomly + generated password for the default + user.:code:`
`:code:`
`Requires ``database:view_credentials`` + scope. + "port": 0, # Optional. The port on which the + database cluster is listening. + "ssl": bool, # Optional. A boolean value indicating + if the connection should be made over SSL. + "uri": "str", # Optional. A connection string in the + format accepted by the ``psql`` command. This is provided as a + convenience and should be able to be constructed by the other + attributes. + "user": "str" # Optional. The default user for the + database.:code:`
`:code:`
`Requires + ``database:view_credentials`` scope. + }, + "user": "str" # Optional. The name of the user for use with + the connection pool. When excluded, all sessions connect to the database + as the inbound user. } - } + ] } # response body for status code(s): 404 response == { @@ -125247,27 +133624,13 @@ def add_user( } error_map.update(kwargs.pop("error_map", {}) or {}) - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _headers = kwargs.pop("headers", {}) or {} _params = kwargs.pop("params", {}) or {} - content_type: Optional[str] = kwargs.pop( - "content_type", _headers.pop("Content-Type", None) - ) cls: ClsType[JSON] = kwargs.pop("cls", None) - content_type = content_type or "application/json" - _json = None - _content = None - if isinstance(body, (IOBase, bytes)): - _content = body - else: - _json = body - - _request = build_databases_add_user_request( + _request = build_databases_list_connection_pools_request( database_cluster_uuid=database_cluster_uuid, - content_type=content_type, - json=_json, - content=_content, headers=_headers, params=_params, ) @@ -125282,14 +133645,14 @@ def add_user( response = pipeline_response.http_response - if response.status_code not in [201, 404]: + if response.status_code not in [200, 404]: if _stream: response.read() # Load the body in memory and close the socket map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore raise HttpResponseError(response=response) response_headers = {} - if response.status_code == 201: + if response.status_code == 200: response_headers["ratelimit-limit"] = self._deserialize( "int", response.headers.get("ratelimit-limit") ) @@ -125326,34 +133689,274 @@ def add_user( return cast(JSON, deserialized) # type: ignore - @distributed_trace - def get_user( - self, database_cluster_uuid: str, username: str, **kwargs: Any + @overload + def add_connection_pool( + self, + database_cluster_uuid: str, + body: JSON, + *, + content_type: str = "application/json", + **kwargs: Any, ) -> JSON: # pylint: disable=line-too-long - """Retrieve an Existing Database User. + """Add a New Connection Pool (PostgreSQL). - To show information about an existing database user, send a GET request to - ``/v2/databases/$DATABASE_ID/users/$USERNAME``. + For PostgreSQL database clusters, connection pools can be used to allow a + database to share its idle connections. The popular PostgreSQL connection + pooling utility PgBouncer is used to provide this service. `See here for more information + `_ + about how and why to use PgBouncer connection pooling including + details about the available transaction modes. - Note: User management is not supported for Caching or Valkey clusters. + To add a new connection pool to a PostgreSQL database cluster, send a POST + request to ``/v2/databases/$DATABASE_ID/pools`` specifying a name for the pool, + the user to connect with, the database to connect to, as well as its desired + size and transaction mode. - The response will be a JSON object with a ``user`` key. This will be set to an object - containing the standard database user attributes. The user's password will not show - up unless the ``database:view_credentials`` scope is present. + :param database_cluster_uuid: A unique identifier for a database cluster. Required. + :type database_cluster_uuid: str + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: JSON object + :rtype: JSON + :raises ~azure.core.exceptions.HttpResponseError: - For MySQL clusters, additional options will be contained in the ``mysql_settings`` - object. + Example: + .. code-block:: python - For Kafka clusters, additional options will be contained in the ``settings`` object. + # JSON input template you can fill out and use as your body input. + body = { + "db": "str", # The database for use with the connection pool. Required. + "mode": "str", # The PGBouncer transaction mode for the connection pool. The + allowed values are session, transaction, and statement. Required. + "name": "str", # A unique name for the connection pool. Must be between 3 + and 60 characters. Required. + "size": 0, # The desired size of the PGBouncer connection pool. The maximum + allowed size is determined by the size of the cluster's primary node. 25 backend + server connections are allowed for every 1GB of RAM. Three are reserved for + maintenance. For example, a primary node with 1 GB of RAM allows for a maximum of + 22 backend server connections while one with 4 GB would allow for 97. Note that + these are shared across all connection pools in a cluster. Required. + "connection": { + "database": "str", # Optional. The name of the default database. + "host": "str", # Optional. The FQDN pointing to the database + cluster's current primary node. + "password": "str", # Optional. The randomly generated password for + the default user.:code:`
`:code:`
`Requires + ``database:view_credentials`` scope. + "port": 0, # Optional. The port on which the database cluster is + listening. + "ssl": bool, # Optional. A boolean value indicating if the + connection should be made over SSL. + "uri": "str", # Optional. A connection string in the format accepted + by the ``psql`` command. This is provided as a convenience and should be able + to be constructed by the other attributes. + "user": "str" # Optional. The default user for the + database.:code:`
`:code:`
`Requires ``database:view_credentials`` + scope. + }, + "private_connection": { + "database": "str", # Optional. The name of the default database. + "host": "str", # Optional. The FQDN pointing to the database + cluster's current primary node. + "password": "str", # Optional. The randomly generated password for + the default user.:code:`
`:code:`
`Requires + ``database:view_credentials`` scope. + "port": 0, # Optional. The port on which the database cluster is + listening. + "ssl": bool, # Optional. A boolean value indicating if the + connection should be made over SSL. + "uri": "str", # Optional. A connection string in the format accepted + by the ``psql`` command. This is provided as a convenience and should be able + to be constructed by the other attributes. + "user": "str" # Optional. The default user for the + database.:code:`
`:code:`
`Requires ``database:view_credentials`` + scope. + }, + "standby_connection": { + "database": "str", # Optional. The name of the default database. + "host": "str", # Optional. The FQDN pointing to the database + cluster's current primary node. + "password": "str", # Optional. The randomly generated password for + the default user.:code:`
`:code:`
`Requires + ``database:view_credentials`` scope. + "port": 0, # Optional. The port on which the database cluster is + listening. + "ssl": bool, # Optional. A boolean value indicating if the + connection should be made over SSL. + "uri": "str", # Optional. A connection string in the format accepted + by the ``psql`` command. This is provided as a convenience and should be able + to be constructed by the other attributes. + "user": "str" # Optional. The default user for the + database.:code:`
`:code:`
`Requires ``database:view_credentials`` + scope. + }, + "standby_private_connection": { + "database": "str", # Optional. The name of the default database. + "host": "str", # Optional. The FQDN pointing to the database + cluster's current primary node. + "password": "str", # Optional. The randomly generated password for + the default user.:code:`
`:code:`
`Requires + ``database:view_credentials`` scope. + "port": 0, # Optional. The port on which the database cluster is + listening. + "ssl": bool, # Optional. A boolean value indicating if the + connection should be made over SSL. + "uri": "str", # Optional. A connection string in the format accepted + by the ``psql`` command. This is provided as a convenience and should be able + to be constructed by the other attributes. + "user": "str" # Optional. The default user for the + database.:code:`
`:code:`
`Requires ``database:view_credentials`` + scope. + }, + "user": "str" # Optional. The name of the user for use with the connection + pool. When excluded, all sessions connect to the database as the inbound user. + } - For MongoDB clusters, additional information will be contained in the mongo_user_settings - object. + # response body for status code(s): 201 + response == { + "pool": { + "db": "str", # The database for use with the connection pool. + Required. + "mode": "str", # The PGBouncer transaction mode for the connection + pool. The allowed values are session, transaction, and statement. Required. + "name": "str", # A unique name for the connection pool. Must be + between 3 and 60 characters. Required. + "size": 0, # The desired size of the PGBouncer connection pool. The + maximum allowed size is determined by the size of the cluster's primary node. + 25 backend server connections are allowed for every 1GB of RAM. Three are + reserved for maintenance. For example, a primary node with 1 GB of RAM allows + for a maximum of 22 backend server connections while one with 4 GB would + allow for 97. Note that these are shared across all connection pools in a + cluster. Required. + "connection": { + "database": "str", # Optional. The name of the default + database. + "host": "str", # Optional. The FQDN pointing to the database + cluster's current primary node. + "password": "str", # Optional. The randomly generated + password for the default user.:code:`
`:code:`
`Requires + ``database:view_credentials`` scope. + "port": 0, # Optional. The port on which the database + cluster is listening. + "ssl": bool, # Optional. A boolean value indicating if the + connection should be made over SSL. + "uri": "str", # Optional. A connection string in the format + accepted by the ``psql`` command. This is provided as a convenience and + should be able to be constructed by the other attributes. + "user": "str" # Optional. The default user for the + database.:code:`
`:code:`
`Requires ``database:view_credentials`` + scope. + }, + "private_connection": { + "database": "str", # Optional. The name of the default + database. + "host": "str", # Optional. The FQDN pointing to the database + cluster's current primary node. + "password": "str", # Optional. The randomly generated + password for the default user.:code:`
`:code:`
`Requires + ``database:view_credentials`` scope. + "port": 0, # Optional. The port on which the database + cluster is listening. + "ssl": bool, # Optional. A boolean value indicating if the + connection should be made over SSL. + "uri": "str", # Optional. A connection string in the format + accepted by the ``psql`` command. This is provided as a convenience and + should be able to be constructed by the other attributes. + "user": "str" # Optional. The default user for the + database.:code:`
`:code:`
`Requires ``database:view_credentials`` + scope. + }, + "standby_connection": { + "database": "str", # Optional. The name of the default + database. + "host": "str", # Optional. The FQDN pointing to the database + cluster's current primary node. + "password": "str", # Optional. The randomly generated + password for the default user.:code:`
`:code:`
`Requires + ``database:view_credentials`` scope. + "port": 0, # Optional. The port on which the database + cluster is listening. + "ssl": bool, # Optional. A boolean value indicating if the + connection should be made over SSL. + "uri": "str", # Optional. A connection string in the format + accepted by the ``psql`` command. This is provided as a convenience and + should be able to be constructed by the other attributes. + "user": "str" # Optional. The default user for the + database.:code:`
`:code:`
`Requires ``database:view_credentials`` + scope. + }, + "standby_private_connection": { + "database": "str", # Optional. The name of the default + database. + "host": "str", # Optional. The FQDN pointing to the database + cluster's current primary node. + "password": "str", # Optional. The randomly generated + password for the default user.:code:`
`:code:`
`Requires + ``database:view_credentials`` scope. + "port": 0, # Optional. The port on which the database + cluster is listening. + "ssl": bool, # Optional. A boolean value indicating if the + connection should be made over SSL. + "uri": "str", # Optional. A connection string in the format + accepted by the ``psql`` command. This is provided as a convenience and + should be able to be constructed by the other attributes. + "user": "str" # Optional. The default user for the + database.:code:`
`:code:`
`Requires ``database:view_credentials`` + scope. + }, + "user": "str" # Optional. The name of the user for use with the + connection pool. When excluded, all sessions connect to the database as the + inbound user. + } + } + # response body for status code(s): 404 + response == { + "id": "str", # A short identifier corresponding to the HTTP status code + returned. For example, the ID for a response returning a 404 status code would + be "not_found.". Required. + "message": "str", # A message providing additional information about the + error, including details to help resolve it when possible. Required. + "request_id": "str" # Optional. Optionally, some endpoints may include a + request ID that should be provided when reporting bugs or opening support + tickets to help identify the issue. + } + """ + + @overload + def add_connection_pool( + self, + database_cluster_uuid: str, + body: IO[bytes], + *, + content_type: str = "application/json", + **kwargs: Any, + ) -> JSON: + # pylint: disable=line-too-long + """Add a New Connection Pool (PostgreSQL). + + For PostgreSQL database clusters, connection pools can be used to allow a + database to share its idle connections. The popular PostgreSQL connection + pooling utility PgBouncer is used to provide this service. `See here for more information + `_ + about how and why to use PgBouncer connection pooling including + details about the available transaction modes. + + To add a new connection pool to a PostgreSQL database cluster, send a POST + request to ``/v2/databases/$DATABASE_ID/pools`` specifying a name for the pool, + the user to connect with, the database to connect to, as well as its desired + size and transaction mode. :param database_cluster_uuid: A unique identifier for a database cluster. Required. :type database_cluster_uuid: str - :param username: The name of the database user. Required. - :type username: str + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str :return: JSON object :rtype: JSON :raises ~azure.core.exceptions.HttpResponseError: @@ -125361,81 +133964,330 @@ def get_user( Example: .. code-block:: python - # response body for status code(s): 200 + # response body for status code(s): 201 response == { - "user": { - "name": "str", # The name of a database user. Required. - "access_cert": "str", # Optional. Access certificate for TLS client - authentication. (Kafka only). - "access_key": "str", # Optional. Access key for TLS client - authentication. (Kafka only). - "mysql_settings": { - "auth_plugin": "str" # A string specifying the - authentication method to be used for connections to the MySQL user - account. The valid values are ``mysql_native_password`` or - ``caching_sha2_password``. If excluded when creating a new user, the - default for the version of MySQL in use will be used. As of MySQL 8.0, - the default is ``caching_sha2_password``. Required. Known values are: - "mysql_native_password" and "caching_sha2_password". + "pool": { + "db": "str", # The database for use with the connection pool. + Required. + "mode": "str", # The PGBouncer transaction mode for the connection + pool. The allowed values are session, transaction, and statement. Required. + "name": "str", # A unique name for the connection pool. Must be + between 3 and 60 characters. Required. + "size": 0, # The desired size of the PGBouncer connection pool. The + maximum allowed size is determined by the size of the cluster's primary node. + 25 backend server connections are allowed for every 1GB of RAM. Three are + reserved for maintenance. For example, a primary node with 1 GB of RAM allows + for a maximum of 22 backend server connections while one with 4 GB would + allow for 97. Note that these are shared across all connection pools in a + cluster. Required. + "connection": { + "database": "str", # Optional. The name of the default + database. + "host": "str", # Optional. The FQDN pointing to the database + cluster's current primary node. + "password": "str", # Optional. The randomly generated + password for the default user.:code:`
`:code:`
`Requires + ``database:view_credentials`` scope. + "port": 0, # Optional. The port on which the database + cluster is listening. + "ssl": bool, # Optional. A boolean value indicating if the + connection should be made over SSL. + "uri": "str", # Optional. A connection string in the format + accepted by the ``psql`` command. This is provided as a convenience and + should be able to be constructed by the other attributes. + "user": "str" # Optional. The default user for the + database.:code:`
`:code:`
`Requires ``database:view_credentials`` + scope. }, - "password": "str", # Optional. A randomly generated password for the - database user.:code:`
`Requires ``database:view_credentials`` scope. - "role": "str", # Optional. A string representing the database user's - role. The value will be either "primary" or "normal". Known values are: - "primary" and "normal". - "settings": { - "acl": [ - { - "permission": "str", # Permission set - applied to the ACL. 'consume' allows for messages to be consumed - from the topic. 'produce' allows for messages to be published to - the topic. 'produceconsume' allows for both 'consume' and - 'produce' permission. 'admin' allows for 'produceconsume' as well - as any operations to administer the topic (delete, update). - Required. Known values are: "admin", "consume", "produce", and - "produceconsume". - "topic": "str", # A regex for matching the - topic(s) that this ACL should apply to. Required. - "id": "str" # Optional. An identifier for - the ACL. Will be computed after the ACL is created/updated. - } - ], - "mongo_user_settings": { - "databases": [ - "str" # Optional. A list of databases to - which the user should have access. When the database is set to - ``admin``"" , the user will have access to all databases based on - the user's role i.e. a user with the role ``readOnly`` assigned - to the ``admin`` database will have read access to all databases. - ], - "role": "str" # Optional. The role to assign to the - user with each role mapping to a MongoDB built-in role. ``readOnly`` - maps to a `read - `_ - role. ``readWrite`` maps to a `readWrite - `_ - role. ``dbAdmin`` maps to a `dbAdmin - `_ - role. Known values are: "readOnly", "readWrite", and "dbAdmin". - }, - "opensearch_acl": [ - { - "index": "str", # Optional. A regex for - matching the indexes that this ACL should apply to. - "permission": "str" # Optional. Permission - set applied to the ACL. 'read' allows user to read from the - index. 'write' allows for user to write to the index. 'readwrite' - allows for both 'read' and 'write' permission. 'deny'(default) - restricts user from performing any operation over an index. - 'admin' allows for 'readwrite' as well as any operations to - administer the index. Known values are: "deny", "admin", "read", - "readwrite", and "write". - } - ], - "pg_allow_replication": bool # Optional. For Postgres - clusters, set to ``true`` for a user with replication rights. This option - is not currently supported for other database engines. - } + "private_connection": { + "database": "str", # Optional. The name of the default + database. + "host": "str", # Optional. The FQDN pointing to the database + cluster's current primary node. + "password": "str", # Optional. The randomly generated + password for the default user.:code:`
`:code:`
`Requires + ``database:view_credentials`` scope. + "port": 0, # Optional. The port on which the database + cluster is listening. + "ssl": bool, # Optional. A boolean value indicating if the + connection should be made over SSL. + "uri": "str", # Optional. A connection string in the format + accepted by the ``psql`` command. This is provided as a convenience and + should be able to be constructed by the other attributes. + "user": "str" # Optional. The default user for the + database.:code:`
`:code:`
`Requires ``database:view_credentials`` + scope. + }, + "standby_connection": { + "database": "str", # Optional. The name of the default + database. + "host": "str", # Optional. The FQDN pointing to the database + cluster's current primary node. + "password": "str", # Optional. The randomly generated + password for the default user.:code:`
`:code:`
`Requires + ``database:view_credentials`` scope. + "port": 0, # Optional. The port on which the database + cluster is listening. + "ssl": bool, # Optional. A boolean value indicating if the + connection should be made over SSL. + "uri": "str", # Optional. A connection string in the format + accepted by the ``psql`` command. This is provided as a convenience and + should be able to be constructed by the other attributes. + "user": "str" # Optional. The default user for the + database.:code:`
`:code:`
`Requires ``database:view_credentials`` + scope. + }, + "standby_private_connection": { + "database": "str", # Optional. The name of the default + database. + "host": "str", # Optional. The FQDN pointing to the database + cluster's current primary node. + "password": "str", # Optional. The randomly generated + password for the default user.:code:`
`:code:`
`Requires + ``database:view_credentials`` scope. + "port": 0, # Optional. The port on which the database + cluster is listening. + "ssl": bool, # Optional. A boolean value indicating if the + connection should be made over SSL. + "uri": "str", # Optional. A connection string in the format + accepted by the ``psql`` command. This is provided as a convenience and + should be able to be constructed by the other attributes. + "user": "str" # Optional. The default user for the + database.:code:`
`:code:`
`Requires ``database:view_credentials`` + scope. + }, + "user": "str" # Optional. The name of the user for use with the + connection pool. When excluded, all sessions connect to the database as the + inbound user. + } + } + # response body for status code(s): 404 + response == { + "id": "str", # A short identifier corresponding to the HTTP status code + returned. For example, the ID for a response returning a 404 status code would + be "not_found.". Required. + "message": "str", # A message providing additional information about the + error, including details to help resolve it when possible. Required. + "request_id": "str" # Optional. Optionally, some endpoints may include a + request ID that should be provided when reporting bugs or opening support + tickets to help identify the issue. + } + """ + + @distributed_trace + def add_connection_pool( + self, database_cluster_uuid: str, body: Union[JSON, IO[bytes]], **kwargs: Any + ) -> JSON: + # pylint: disable=line-too-long + """Add a New Connection Pool (PostgreSQL). + + For PostgreSQL database clusters, connection pools can be used to allow a + database to share its idle connections. The popular PostgreSQL connection + pooling utility PgBouncer is used to provide this service. `See here for more information + `_ + about how and why to use PgBouncer connection pooling including + details about the available transaction modes. + + To add a new connection pool to a PostgreSQL database cluster, send a POST + request to ``/v2/databases/$DATABASE_ID/pools`` specifying a name for the pool, + the user to connect with, the database to connect to, as well as its desired + size and transaction mode. + + :param database_cluster_uuid: A unique identifier for a database cluster. Required. + :type database_cluster_uuid: str + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :return: JSON object + :rtype: JSON + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "db": "str", # The database for use with the connection pool. Required. + "mode": "str", # The PGBouncer transaction mode for the connection pool. The + allowed values are session, transaction, and statement. Required. + "name": "str", # A unique name for the connection pool. Must be between 3 + and 60 characters. Required. + "size": 0, # The desired size of the PGBouncer connection pool. The maximum + allowed size is determined by the size of the cluster's primary node. 25 backend + server connections are allowed for every 1GB of RAM. Three are reserved for + maintenance. For example, a primary node with 1 GB of RAM allows for a maximum of + 22 backend server connections while one with 4 GB would allow for 97. Note that + these are shared across all connection pools in a cluster. Required. + "connection": { + "database": "str", # Optional. The name of the default database. + "host": "str", # Optional. The FQDN pointing to the database + cluster's current primary node. + "password": "str", # Optional. The randomly generated password for + the default user.:code:`
`:code:`
`Requires + ``database:view_credentials`` scope. + "port": 0, # Optional. The port on which the database cluster is + listening. + "ssl": bool, # Optional. A boolean value indicating if the + connection should be made over SSL. + "uri": "str", # Optional. A connection string in the format accepted + by the ``psql`` command. This is provided as a convenience and should be able + to be constructed by the other attributes. + "user": "str" # Optional. The default user for the + database.:code:`
`:code:`
`Requires ``database:view_credentials`` + scope. + }, + "private_connection": { + "database": "str", # Optional. The name of the default database. + "host": "str", # Optional. The FQDN pointing to the database + cluster's current primary node. + "password": "str", # Optional. The randomly generated password for + the default user.:code:`
`:code:`
`Requires + ``database:view_credentials`` scope. + "port": 0, # Optional. The port on which the database cluster is + listening. + "ssl": bool, # Optional. A boolean value indicating if the + connection should be made over SSL. + "uri": "str", # Optional. A connection string in the format accepted + by the ``psql`` command. This is provided as a convenience and should be able + to be constructed by the other attributes. + "user": "str" # Optional. The default user for the + database.:code:`
`:code:`
`Requires ``database:view_credentials`` + scope. + }, + "standby_connection": { + "database": "str", # Optional. The name of the default database. + "host": "str", # Optional. The FQDN pointing to the database + cluster's current primary node. + "password": "str", # Optional. The randomly generated password for + the default user.:code:`
`:code:`
`Requires + ``database:view_credentials`` scope. + "port": 0, # Optional. The port on which the database cluster is + listening. + "ssl": bool, # Optional. A boolean value indicating if the + connection should be made over SSL. + "uri": "str", # Optional. A connection string in the format accepted + by the ``psql`` command. This is provided as a convenience and should be able + to be constructed by the other attributes. + "user": "str" # Optional. The default user for the + database.:code:`
`:code:`
`Requires ``database:view_credentials`` + scope. + }, + "standby_private_connection": { + "database": "str", # Optional. The name of the default database. + "host": "str", # Optional. The FQDN pointing to the database + cluster's current primary node. + "password": "str", # Optional. The randomly generated password for + the default user.:code:`
`:code:`
`Requires + ``database:view_credentials`` scope. + "port": 0, # Optional. The port on which the database cluster is + listening. + "ssl": bool, # Optional. A boolean value indicating if the + connection should be made over SSL. + "uri": "str", # Optional. A connection string in the format accepted + by the ``psql`` command. This is provided as a convenience and should be able + to be constructed by the other attributes. + "user": "str" # Optional. The default user for the + database.:code:`
`:code:`
`Requires ``database:view_credentials`` + scope. + }, + "user": "str" # Optional. The name of the user for use with the connection + pool. When excluded, all sessions connect to the database as the inbound user. + } + + # response body for status code(s): 201 + response == { + "pool": { + "db": "str", # The database for use with the connection pool. + Required. + "mode": "str", # The PGBouncer transaction mode for the connection + pool. The allowed values are session, transaction, and statement. Required. + "name": "str", # A unique name for the connection pool. Must be + between 3 and 60 characters. Required. + "size": 0, # The desired size of the PGBouncer connection pool. The + maximum allowed size is determined by the size of the cluster's primary node. + 25 backend server connections are allowed for every 1GB of RAM. Three are + reserved for maintenance. For example, a primary node with 1 GB of RAM allows + for a maximum of 22 backend server connections while one with 4 GB would + allow for 97. Note that these are shared across all connection pools in a + cluster. Required. + "connection": { + "database": "str", # Optional. The name of the default + database. + "host": "str", # Optional. The FQDN pointing to the database + cluster's current primary node. + "password": "str", # Optional. The randomly generated + password for the default user.:code:`
`:code:`
`Requires + ``database:view_credentials`` scope. + "port": 0, # Optional. The port on which the database + cluster is listening. + "ssl": bool, # Optional. A boolean value indicating if the + connection should be made over SSL. + "uri": "str", # Optional. A connection string in the format + accepted by the ``psql`` command. This is provided as a convenience and + should be able to be constructed by the other attributes. + "user": "str" # Optional. The default user for the + database.:code:`
`:code:`
`Requires ``database:view_credentials`` + scope. + }, + "private_connection": { + "database": "str", # Optional. The name of the default + database. + "host": "str", # Optional. The FQDN pointing to the database + cluster's current primary node. + "password": "str", # Optional. The randomly generated + password for the default user.:code:`
`:code:`
`Requires + ``database:view_credentials`` scope. + "port": 0, # Optional. The port on which the database + cluster is listening. + "ssl": bool, # Optional. A boolean value indicating if the + connection should be made over SSL. + "uri": "str", # Optional. A connection string in the format + accepted by the ``psql`` command. This is provided as a convenience and + should be able to be constructed by the other attributes. + "user": "str" # Optional. The default user for the + database.:code:`
`:code:`
`Requires ``database:view_credentials`` + scope. + }, + "standby_connection": { + "database": "str", # Optional. The name of the default + database. + "host": "str", # Optional. The FQDN pointing to the database + cluster's current primary node. + "password": "str", # Optional. The randomly generated + password for the default user.:code:`
`:code:`
`Requires + ``database:view_credentials`` scope. + "port": 0, # Optional. The port on which the database + cluster is listening. + "ssl": bool, # Optional. A boolean value indicating if the + connection should be made over SSL. + "uri": "str", # Optional. A connection string in the format + accepted by the ``psql`` command. This is provided as a convenience and + should be able to be constructed by the other attributes. + "user": "str" # Optional. The default user for the + database.:code:`
`:code:`
`Requires ``database:view_credentials`` + scope. + }, + "standby_private_connection": { + "database": "str", # Optional. The name of the default + database. + "host": "str", # Optional. The FQDN pointing to the database + cluster's current primary node. + "password": "str", # Optional. The randomly generated + password for the default user.:code:`
`:code:`
`Requires + ``database:view_credentials`` scope. + "port": 0, # Optional. The port on which the database + cluster is listening. + "ssl": bool, # Optional. A boolean value indicating if the + connection should be made over SSL. + "uri": "str", # Optional. A connection string in the format + accepted by the ``psql`` command. This is provided as a convenience and + should be able to be constructed by the other attributes. + "user": "str" # Optional. The default user for the + database.:code:`
`:code:`
`Requires ``database:view_credentials`` + scope. + }, + "user": "str" # Optional. The name of the user for use with the + connection pool. When excluded, all sessions connect to the database as the + inbound user. } } # response body for status code(s): 404 @@ -125463,14 +134315,27 @@ def get_user( } error_map.update(kwargs.pop("error_map", {}) or {}) - _headers = kwargs.pop("headers", {}) or {} + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = kwargs.pop("params", {}) or {} + content_type: Optional[str] = kwargs.pop( + "content_type", _headers.pop("Content-Type", None) + ) cls: ClsType[JSON] = kwargs.pop("cls", None) - _request = build_databases_get_user_request( + content_type = content_type or "application/json" + _json = None + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _json = body + + _request = build_databases_add_connection_pool_request( database_cluster_uuid=database_cluster_uuid, - username=username, + content_type=content_type, + json=_json, + content=_content, headers=_headers, params=_params, ) @@ -125485,14 +134350,14 @@ def get_user( response = pipeline_response.http_response - if response.status_code not in [200, 404]: + if response.status_code not in [201, 404]: if _stream: response.read() # Load the body in memory and close the socket map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore raise HttpResponseError(response=response) response_headers = {} - if response.status_code == 200: + if response.status_code == 201: response_headers["ratelimit-limit"] = self._deserialize( "int", response.headers.get("ratelimit-limit") ) @@ -125530,31 +134395,124 @@ def get_user( return cast(JSON, deserialized) # type: ignore @distributed_trace - def delete_user( - self, database_cluster_uuid: str, username: str, **kwargs: Any - ) -> Optional[JSON]: + def get_connection_pool( + self, database_cluster_uuid: str, pool_name: str, **kwargs: Any + ) -> JSON: # pylint: disable=line-too-long - """Remove a Database User. - - To remove a specific database user, send a DELETE request to - ``/v2/databases/$DATABASE_ID/users/$USERNAME``. - - A status of 204 will be given. This indicates that the request was processed - successfully, but that no response body is needed. + """Retrieve Existing Connection Pool (PostgreSQL). - Note: User management is not supported for Caching or Valkey clusters. + To show information about an existing connection pool for a PostgreSQL database cluster, send a + GET request to ``/v2/databases/$DATABASE_ID/pools/$POOL_NAME``. + The response will be a JSON object with a ``pool`` key. :param database_cluster_uuid: A unique identifier for a database cluster. Required. :type database_cluster_uuid: str - :param username: The name of the database user. Required. - :type username: str - :return: JSON object or None - :rtype: JSON or None + :param pool_name: The name used to identify the connection pool. Required. + :type pool_name: str + :return: JSON object + :rtype: JSON :raises ~azure.core.exceptions.HttpResponseError: Example: .. code-block:: python + # response body for status code(s): 200 + response == { + "pool": { + "db": "str", # The database for use with the connection pool. + Required. + "mode": "str", # The PGBouncer transaction mode for the connection + pool. The allowed values are session, transaction, and statement. Required. + "name": "str", # A unique name for the connection pool. Must be + between 3 and 60 characters. Required. + "size": 0, # The desired size of the PGBouncer connection pool. The + maximum allowed size is determined by the size of the cluster's primary node. + 25 backend server connections are allowed for every 1GB of RAM. Three are + reserved for maintenance. For example, a primary node with 1 GB of RAM allows + for a maximum of 22 backend server connections while one with 4 GB would + allow for 97. Note that these are shared across all connection pools in a + cluster. Required. + "connection": { + "database": "str", # Optional. The name of the default + database. + "host": "str", # Optional. The FQDN pointing to the database + cluster's current primary node. + "password": "str", # Optional. The randomly generated + password for the default user.:code:`
`:code:`
`Requires + ``database:view_credentials`` scope. + "port": 0, # Optional. The port on which the database + cluster is listening. + "ssl": bool, # Optional. A boolean value indicating if the + connection should be made over SSL. + "uri": "str", # Optional. A connection string in the format + accepted by the ``psql`` command. This is provided as a convenience and + should be able to be constructed by the other attributes. + "user": "str" # Optional. The default user for the + database.:code:`
`:code:`
`Requires ``database:view_credentials`` + scope. + }, + "private_connection": { + "database": "str", # Optional. The name of the default + database. + "host": "str", # Optional. The FQDN pointing to the database + cluster's current primary node. + "password": "str", # Optional. The randomly generated + password for the default user.:code:`
`:code:`
`Requires + ``database:view_credentials`` scope. + "port": 0, # Optional. The port on which the database + cluster is listening. + "ssl": bool, # Optional. A boolean value indicating if the + connection should be made over SSL. + "uri": "str", # Optional. A connection string in the format + accepted by the ``psql`` command. This is provided as a convenience and + should be able to be constructed by the other attributes. + "user": "str" # Optional. The default user for the + database.:code:`
`:code:`
`Requires ``database:view_credentials`` + scope. + }, + "standby_connection": { + "database": "str", # Optional. The name of the default + database. + "host": "str", # Optional. The FQDN pointing to the database + cluster's current primary node. + "password": "str", # Optional. The randomly generated + password for the default user.:code:`
`:code:`
`Requires + ``database:view_credentials`` scope. + "port": 0, # Optional. The port on which the database + cluster is listening. + "ssl": bool, # Optional. A boolean value indicating if the + connection should be made over SSL. + "uri": "str", # Optional. A connection string in the format + accepted by the ``psql`` command. This is provided as a convenience and + should be able to be constructed by the other attributes. + "user": "str" # Optional. The default user for the + database.:code:`
`:code:`
`Requires ``database:view_credentials`` + scope. + }, + "standby_private_connection": { + "database": "str", # Optional. The name of the default + database. + "host": "str", # Optional. The FQDN pointing to the database + cluster's current primary node. + "password": "str", # Optional. The randomly generated + password for the default user.:code:`
`:code:`
`Requires + ``database:view_credentials`` scope. + "port": 0, # Optional. The port on which the database + cluster is listening. + "ssl": bool, # Optional. A boolean value indicating if the + connection should be made over SSL. + "uri": "str", # Optional. A connection string in the format + accepted by the ``psql`` command. This is provided as a convenience and + should be able to be constructed by the other attributes. + "user": "str" # Optional. The default user for the + database.:code:`
`:code:`
`Requires ``database:view_credentials`` + scope. + }, + "user": "str" # Optional. The name of the user for use with the + connection pool. When excluded, all sessions connect to the database as the + inbound user. + } + } # response body for status code(s): 404 response == { "id": "str", # A short identifier corresponding to the HTTP status code @@ -125583,11 +134541,11 @@ def delete_user( _headers = kwargs.pop("headers", {}) or {} _params = kwargs.pop("params", {}) or {} - cls: ClsType[Optional[JSON]] = kwargs.pop("cls", None) + cls: ClsType[JSON] = kwargs.pop("cls", None) - _request = build_databases_delete_user_request( + _request = build_databases_get_connection_pool_request( database_cluster_uuid=database_cluster_uuid, - username=username, + pool_name=pool_name, headers=_headers, params=_params, ) @@ -125602,15 +134560,14 @@ def delete_user( response = pipeline_response.http_response - if response.status_code not in [204, 404]: + if response.status_code not in [200, 404]: if _stream: response.read() # Load the body in memory and close the socket map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore raise HttpResponseError(response=response) - deserialized = None response_headers = {} - if response.status_code == 204: + if response.status_code == 200: response_headers["ratelimit-limit"] = self._deserialize( "int", response.headers.get("ratelimit-limit") ) @@ -125621,6 +134578,11 @@ def delete_user( "int", response.headers.get("ratelimit-reset") ) + if response.content: + deserialized = response.json() + else: + deserialized = None + if response.status_code == 404: response_headers["ratelimit-limit"] = self._deserialize( "int", response.headers.get("ratelimit-limit") @@ -125638,47 +134600,37 @@ def delete_user( deserialized = None if cls: - return cls(pipeline_response, deserialized, response_headers) # type: ignore + return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore - return deserialized # type: ignore + return cast(JSON, deserialized) # type: ignore @overload - def update_user( + def update_connection_pool( self, database_cluster_uuid: str, - username: str, + pool_name: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any, - ) -> JSON: + ) -> Optional[JSON]: # pylint: disable=line-too-long - """Update a Database User. - - To update an existing database user, send a PUT request to - ``/v2/databases/$DATABASE_ID/users/$USERNAME`` - with the desired settings. - - **Note**\\ : only ``settings`` can be updated via this type of request. If you wish to change - the name of a user, - you must recreate a new user. + """Update Connection Pools (PostgreSQL). - The response will be a JSON object with a key called ``user``. The value of this will be an - object that contains the name of the update database user, along with the ``settings`` object - that - has been updated. + To update a connection pool for a PostgreSQL database cluster, send a PUT request to + ``/v2/databases/$DATABASE_ID/pools/$POOL_NAME``. :param database_cluster_uuid: A unique identifier for a database cluster. Required. :type database_cluster_uuid: str - :param username: The name of the database user. Required. - :type username: str + :param pool_name: The name used to identify the connection pool. Required. + :type pool_name: str :param body: Required. :type body: JSON :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str - :return: JSON object - :rtype: JSON + :return: JSON object or None + :rtype: JSON or None :raises ~azure.core.exceptions.HttpResponseError: Example: @@ -125686,136 +134638,19 @@ def update_user( # JSON input template you can fill out and use as your body input. body = { - "settings": { - "acl": [ - { - "permission": "str", # Permission set applied to the - ACL. 'consume' allows for messages to be consumed from the topic. - 'produce' allows for messages to be published to the topic. - 'produceconsume' allows for both 'consume' and 'produce' permission. - 'admin' allows for 'produceconsume' as well as any operations to - administer the topic (delete, update). Required. Known values are: - "admin", "consume", "produce", and "produceconsume". - "topic": "str", # A regex for matching the topic(s) - that this ACL should apply to. Required. - "id": "str" # Optional. An identifier for the ACL. - Will be computed after the ACL is created/updated. - } - ], - "mongo_user_settings": { - "databases": [ - "str" # Optional. A list of databases to which the - user should have access. When the database is set to ``admin``"" , - the user will have access to all databases based on the user's role - i.e. a user with the role ``readOnly`` assigned to the ``admin`` - database will have read access to all databases. - ], - "role": "str" # Optional. The role to assign to the user - with each role mapping to a MongoDB built-in role. ``readOnly`` maps to - a `read - `_ - role. ``readWrite`` maps to a `readWrite - `_ - role. ``dbAdmin`` maps to a `dbAdmin - `_ - role. Known values are: "readOnly", "readWrite", and "dbAdmin". - }, - "opensearch_acl": [ - { - "index": "str", # Optional. A regex for matching the - indexes that this ACL should apply to. - "permission": "str" # Optional. Permission set - applied to the ACL. 'read' allows user to read from the index. - 'write' allows for user to write to the index. 'readwrite' allows for - both 'read' and 'write' permission. 'deny'(default) restricts user - from performing any operation over an index. 'admin' allows for - 'readwrite' as well as any operations to administer the index. Known - values are: "deny", "admin", "read", "readwrite", and "write". - } - ], - "pg_allow_replication": bool # Optional. For Postgres clusters, set - to ``true`` for a user with replication rights. This option is not currently - supported for other database engines. - } + "db": "str", # The database for use with the connection pool. Required. + "mode": "str", # The PGBouncer transaction mode for the connection pool. The + allowed values are session, transaction, and statement. Required. + "size": 0, # The desired size of the PGBouncer connection pool. The maximum + allowed size is determined by the size of the cluster's primary node. 25 backend + server connections are allowed for every 1GB of RAM. Three are reserved for + maintenance. For example, a primary node with 1 GB of RAM allows for a maximum of + 22 backend server connections while one with 4 GB would allow for 97. Note that + these are shared across all connection pools in a cluster. Required. + "user": "str" # Optional. The name of the user for use with the connection + pool. When excluded, all sessions connect to the database as the inbound user. } - # response body for status code(s): 201 - response == { - "user": { - "name": "str", # The name of a database user. Required. - "access_cert": "str", # Optional. Access certificate for TLS client - authentication. (Kafka only). - "access_key": "str", # Optional. Access key for TLS client - authentication. (Kafka only). - "mysql_settings": { - "auth_plugin": "str" # A string specifying the - authentication method to be used for connections to the MySQL user - account. The valid values are ``mysql_native_password`` or - ``caching_sha2_password``. If excluded when creating a new user, the - default for the version of MySQL in use will be used. As of MySQL 8.0, - the default is ``caching_sha2_password``. Required. Known values are: - "mysql_native_password" and "caching_sha2_password". - }, - "password": "str", # Optional. A randomly generated password for the - database user.:code:`
`Requires ``database:view_credentials`` scope. - "role": "str", # Optional. A string representing the database user's - role. The value will be either "primary" or "normal". Known values are: - "primary" and "normal". - "settings": { - "acl": [ - { - "permission": "str", # Permission set - applied to the ACL. 'consume' allows for messages to be consumed - from the topic. 'produce' allows for messages to be published to - the topic. 'produceconsume' allows for both 'consume' and - 'produce' permission. 'admin' allows for 'produceconsume' as well - as any operations to administer the topic (delete, update). - Required. Known values are: "admin", "consume", "produce", and - "produceconsume". - "topic": "str", # A regex for matching the - topic(s) that this ACL should apply to. Required. - "id": "str" # Optional. An identifier for - the ACL. Will be computed after the ACL is created/updated. - } - ], - "mongo_user_settings": { - "databases": [ - "str" # Optional. A list of databases to - which the user should have access. When the database is set to - ``admin``"" , the user will have access to all databases based on - the user's role i.e. a user with the role ``readOnly`` assigned - to the ``admin`` database will have read access to all databases. - ], - "role": "str" # Optional. The role to assign to the - user with each role mapping to a MongoDB built-in role. ``readOnly`` - maps to a `read - `_ - role. ``readWrite`` maps to a `readWrite - `_ - role. ``dbAdmin`` maps to a `dbAdmin - `_ - role. Known values are: "readOnly", "readWrite", and "dbAdmin". - }, - "opensearch_acl": [ - { - "index": "str", # Optional. A regex for - matching the indexes that this ACL should apply to. - "permission": "str" # Optional. Permission - set applied to the ACL. 'read' allows user to read from the - index. 'write' allows for user to write to the index. 'readwrite' - allows for both 'read' and 'write' permission. 'deny'(default) - restricts user from performing any operation over an index. - 'admin' allows for 'readwrite' as well as any operations to - administer the index. Known values are: "deny", "admin", "read", - "readwrite", and "write". - } - ], - "pg_allow_replication": bool # Optional. For Postgres - clusters, set to ``true`` for a user with replication rights. This option - is not currently supported for other database engines. - } - } - } # response body for status code(s): 404 response == { "id": "str", # A short identifier corresponding to the HTTP status code @@ -125830,124 +134665,37 @@ def update_user( """ @overload - def update_user( + def update_connection_pool( self, database_cluster_uuid: str, - username: str, + pool_name: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any, - ) -> JSON: + ) -> Optional[JSON]: # pylint: disable=line-too-long - """Update a Database User. - - To update an existing database user, send a PUT request to - ``/v2/databases/$DATABASE_ID/users/$USERNAME`` - with the desired settings. - - **Note**\\ : only ``settings`` can be updated via this type of request. If you wish to change - the name of a user, - you must recreate a new user. + """Update Connection Pools (PostgreSQL). - The response will be a JSON object with a key called ``user``. The value of this will be an - object that contains the name of the update database user, along with the ``settings`` object - that - has been updated. + To update a connection pool for a PostgreSQL database cluster, send a PUT request to + ``/v2/databases/$DATABASE_ID/pools/$POOL_NAME``. :param database_cluster_uuid: A unique identifier for a database cluster. Required. :type database_cluster_uuid: str - :param username: The name of the database user. Required. - :type username: str + :param pool_name: The name used to identify the connection pool. Required. + :type pool_name: str :param body: Required. :type body: IO[bytes] :keyword content_type: Body Parameter content-type. Content type parameter for binary body. Default value is "application/json". :paramtype content_type: str - :return: JSON object - :rtype: JSON + :return: JSON object or None + :rtype: JSON or None :raises ~azure.core.exceptions.HttpResponseError: Example: .. code-block:: python - # response body for status code(s): 201 - response == { - "user": { - "name": "str", # The name of a database user. Required. - "access_cert": "str", # Optional. Access certificate for TLS client - authentication. (Kafka only). - "access_key": "str", # Optional. Access key for TLS client - authentication. (Kafka only). - "mysql_settings": { - "auth_plugin": "str" # A string specifying the - authentication method to be used for connections to the MySQL user - account. The valid values are ``mysql_native_password`` or - ``caching_sha2_password``. If excluded when creating a new user, the - default for the version of MySQL in use will be used. As of MySQL 8.0, - the default is ``caching_sha2_password``. Required. Known values are: - "mysql_native_password" and "caching_sha2_password". - }, - "password": "str", # Optional. A randomly generated password for the - database user.:code:`
`Requires ``database:view_credentials`` scope. - "role": "str", # Optional. A string representing the database user's - role. The value will be either "primary" or "normal". Known values are: - "primary" and "normal". - "settings": { - "acl": [ - { - "permission": "str", # Permission set - applied to the ACL. 'consume' allows for messages to be consumed - from the topic. 'produce' allows for messages to be published to - the topic. 'produceconsume' allows for both 'consume' and - 'produce' permission. 'admin' allows for 'produceconsume' as well - as any operations to administer the topic (delete, update). - Required. Known values are: "admin", "consume", "produce", and - "produceconsume". - "topic": "str", # A regex for matching the - topic(s) that this ACL should apply to. Required. - "id": "str" # Optional. An identifier for - the ACL. Will be computed after the ACL is created/updated. - } - ], - "mongo_user_settings": { - "databases": [ - "str" # Optional. A list of databases to - which the user should have access. When the database is set to - ``admin``"" , the user will have access to all databases based on - the user's role i.e. a user with the role ``readOnly`` assigned - to the ``admin`` database will have read access to all databases. - ], - "role": "str" # Optional. The role to assign to the - user with each role mapping to a MongoDB built-in role. ``readOnly`` - maps to a `read - `_ - role. ``readWrite`` maps to a `readWrite - `_ - role. ``dbAdmin`` maps to a `dbAdmin - `_ - role. Known values are: "readOnly", "readWrite", and "dbAdmin". - }, - "opensearch_acl": [ - { - "index": "str", # Optional. A regex for - matching the indexes that this ACL should apply to. - "permission": "str" # Optional. Permission - set applied to the ACL. 'read' allows user to read from the - index. 'write' allows for user to write to the index. 'readwrite' - allows for both 'read' and 'write' permission. 'deny'(default) - restricts user from performing any operation over an index. - 'admin' allows for 'readwrite' as well as any operations to - administer the index. Known values are: "deny", "admin", "read", - "readwrite", and "write". - } - ], - "pg_allow_replication": bool # Optional. For Postgres - clusters, set to ``true`` for a user with replication rights. This option - is not currently supported for other database engines. - } - } - } # response body for status code(s): 404 response == { "id": "str", # A short identifier corresponding to the HTTP status code @@ -125962,37 +134710,27 @@ def update_user( """ @distributed_trace - def update_user( + def update_connection_pool( self, database_cluster_uuid: str, - username: str, + pool_name: str, body: Union[JSON, IO[bytes]], **kwargs: Any, - ) -> JSON: + ) -> Optional[JSON]: # pylint: disable=line-too-long - """Update a Database User. - - To update an existing database user, send a PUT request to - ``/v2/databases/$DATABASE_ID/users/$USERNAME`` - with the desired settings. - - **Note**\\ : only ``settings`` can be updated via this type of request. If you wish to change - the name of a user, - you must recreate a new user. + """Update Connection Pools (PostgreSQL). - The response will be a JSON object with a key called ``user``. The value of this will be an - object that contains the name of the update database user, along with the ``settings`` object - that - has been updated. + To update a connection pool for a PostgreSQL database cluster, send a PUT request to + ``/v2/databases/$DATABASE_ID/pools/$POOL_NAME``. :param database_cluster_uuid: A unique identifier for a database cluster. Required. :type database_cluster_uuid: str - :param username: The name of the database user. Required. - :type username: str + :param pool_name: The name used to identify the connection pool. Required. + :type pool_name: str :param body: Is either a JSON type or a IO[bytes] type. Required. :type body: JSON or IO[bytes] - :return: JSON object - :rtype: JSON + :return: JSON object or None + :rtype: JSON or None :raises ~azure.core.exceptions.HttpResponseError: Example: @@ -126000,136 +134738,19 @@ def update_user( # JSON input template you can fill out and use as your body input. body = { - "settings": { - "acl": [ - { - "permission": "str", # Permission set applied to the - ACL. 'consume' allows for messages to be consumed from the topic. - 'produce' allows for messages to be published to the topic. - 'produceconsume' allows for both 'consume' and 'produce' permission. - 'admin' allows for 'produceconsume' as well as any operations to - administer the topic (delete, update). Required. Known values are: - "admin", "consume", "produce", and "produceconsume". - "topic": "str", # A regex for matching the topic(s) - that this ACL should apply to. Required. - "id": "str" # Optional. An identifier for the ACL. - Will be computed after the ACL is created/updated. - } - ], - "mongo_user_settings": { - "databases": [ - "str" # Optional. A list of databases to which the - user should have access. When the database is set to ``admin``"" , - the user will have access to all databases based on the user's role - i.e. a user with the role ``readOnly`` assigned to the ``admin`` - database will have read access to all databases. - ], - "role": "str" # Optional. The role to assign to the user - with each role mapping to a MongoDB built-in role. ``readOnly`` maps to - a `read - `_ - role. ``readWrite`` maps to a `readWrite - `_ - role. ``dbAdmin`` maps to a `dbAdmin - `_ - role. Known values are: "readOnly", "readWrite", and "dbAdmin". - }, - "opensearch_acl": [ - { - "index": "str", # Optional. A regex for matching the - indexes that this ACL should apply to. - "permission": "str" # Optional. Permission set - applied to the ACL. 'read' allows user to read from the index. - 'write' allows for user to write to the index. 'readwrite' allows for - both 'read' and 'write' permission. 'deny'(default) restricts user - from performing any operation over an index. 'admin' allows for - 'readwrite' as well as any operations to administer the index. Known - values are: "deny", "admin", "read", "readwrite", and "write". - } - ], - "pg_allow_replication": bool # Optional. For Postgres clusters, set - to ``true`` for a user with replication rights. This option is not currently - supported for other database engines. - } + "db": "str", # The database for use with the connection pool. Required. + "mode": "str", # The PGBouncer transaction mode for the connection pool. The + allowed values are session, transaction, and statement. Required. + "size": 0, # The desired size of the PGBouncer connection pool. The maximum + allowed size is determined by the size of the cluster's primary node. 25 backend + server connections are allowed for every 1GB of RAM. Three are reserved for + maintenance. For example, a primary node with 1 GB of RAM allows for a maximum of + 22 backend server connections while one with 4 GB would allow for 97. Note that + these are shared across all connection pools in a cluster. Required. + "user": "str" # Optional. The name of the user for use with the connection + pool. When excluded, all sessions connect to the database as the inbound user. } - # response body for status code(s): 201 - response == { - "user": { - "name": "str", # The name of a database user. Required. - "access_cert": "str", # Optional. Access certificate for TLS client - authentication. (Kafka only). - "access_key": "str", # Optional. Access key for TLS client - authentication. (Kafka only). - "mysql_settings": { - "auth_plugin": "str" # A string specifying the - authentication method to be used for connections to the MySQL user - account. The valid values are ``mysql_native_password`` or - ``caching_sha2_password``. If excluded when creating a new user, the - default for the version of MySQL in use will be used. As of MySQL 8.0, - the default is ``caching_sha2_password``. Required. Known values are: - "mysql_native_password" and "caching_sha2_password". - }, - "password": "str", # Optional. A randomly generated password for the - database user.:code:`
`Requires ``database:view_credentials`` scope. - "role": "str", # Optional. A string representing the database user's - role. The value will be either "primary" or "normal". Known values are: - "primary" and "normal". - "settings": { - "acl": [ - { - "permission": "str", # Permission set - applied to the ACL. 'consume' allows for messages to be consumed - from the topic. 'produce' allows for messages to be published to - the topic. 'produceconsume' allows for both 'consume' and - 'produce' permission. 'admin' allows for 'produceconsume' as well - as any operations to administer the topic (delete, update). - Required. Known values are: "admin", "consume", "produce", and - "produceconsume". - "topic": "str", # A regex for matching the - topic(s) that this ACL should apply to. Required. - "id": "str" # Optional. An identifier for - the ACL. Will be computed after the ACL is created/updated. - } - ], - "mongo_user_settings": { - "databases": [ - "str" # Optional. A list of databases to - which the user should have access. When the database is set to - ``admin``"" , the user will have access to all databases based on - the user's role i.e. a user with the role ``readOnly`` assigned - to the ``admin`` database will have read access to all databases. - ], - "role": "str" # Optional. The role to assign to the - user with each role mapping to a MongoDB built-in role. ``readOnly`` - maps to a `read - `_ - role. ``readWrite`` maps to a `readWrite - `_ - role. ``dbAdmin`` maps to a `dbAdmin - `_ - role. Known values are: "readOnly", "readWrite", and "dbAdmin". - }, - "opensearch_acl": [ - { - "index": "str", # Optional. A regex for - matching the indexes that this ACL should apply to. - "permission": "str" # Optional. Permission - set applied to the ACL. 'read' allows user to read from the - index. 'write' allows for user to write to the index. 'readwrite' - allows for both 'read' and 'write' permission. 'deny'(default) - restricts user from performing any operation over an index. - 'admin' allows for 'readwrite' as well as any operations to - administer the index. Known values are: "deny", "admin", "read", - "readwrite", and "write". - } - ], - "pg_allow_replication": bool # Optional. For Postgres - clusters, set to ``true`` for a user with replication rights. This option - is not currently supported for other database engines. - } - } - } # response body for status code(s): 404 response == { "id": "str", # A short identifier corresponding to the HTTP status code @@ -126161,7 +134782,7 @@ def update_user( content_type: Optional[str] = kwargs.pop( "content_type", _headers.pop("Content-Type", None) ) - cls: ClsType[JSON] = kwargs.pop("cls", None) + cls: ClsType[Optional[JSON]] = kwargs.pop("cls", None) content_type = content_type or "application/json" _json = None @@ -126171,9 +134792,9 @@ def update_user( else: _json = body - _request = build_databases_update_user_request( + _request = build_databases_update_connection_pool_request( database_cluster_uuid=database_cluster_uuid, - username=username, + pool_name=pool_name, content_type=content_type, json=_json, content=_content, @@ -126191,14 +134812,15 @@ def update_user( response = pipeline_response.http_response - if response.status_code not in [201, 404]: + if response.status_code not in [204, 404]: if _stream: response.read() # Load the body in memory and close the socket map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore raise HttpResponseError(response=response) + deserialized = None response_headers = {} - if response.status_code == 201: + if response.status_code == 204: response_headers["ratelimit-limit"] = self._deserialize( "int", response.headers.get("ratelimit-limit") ) @@ -126209,11 +134831,6 @@ def update_user( "int", response.headers.get("ratelimit-reset") ) - if response.content: - deserialized = response.json() - else: - deserialized = None - if response.status_code == 404: response_headers["ratelimit-limit"] = self._deserialize( "int", response.headers.get("ratelimit-limit") @@ -126231,403 +134848,34 @@ def update_user( deserialized = None if cls: - return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore - - return cast(JSON, deserialized) # type: ignore - - @overload - def reset_auth( - self, - database_cluster_uuid: str, - username: str, - body: JSON, - *, - content_type: str = "application/json", - **kwargs: Any, - ) -> JSON: - # pylint: disable=line-too-long - """Reset a Database User's Password or Authentication Method. - - To reset the password for a database user, send a POST request to - ``/v2/databases/$DATABASE_ID/users/$USERNAME/reset_auth``. - - For ``mysql`` databases, the authentication method can be specifying by - including a key in the JSON body called ``mysql_settings`` with the ``auth_plugin`` - value specified. - - The response will be a JSON object with a ``user`` key. This will be set to an - object containing the standard database user attributes. - - :param database_cluster_uuid: A unique identifier for a database cluster. Required. - :type database_cluster_uuid: str - :param username: The name of the database user. Required. - :type username: str - :param body: Required. - :type body: JSON - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: JSON object - :rtype: JSON - :raises ~azure.core.exceptions.HttpResponseError: - - Example: - .. code-block:: python - - # JSON input template you can fill out and use as your body input. - body = { - "mysql_settings": { - "auth_plugin": "str" # A string specifying the authentication method - to be used for connections to the MySQL user account. The valid values are - ``mysql_native_password`` or ``caching_sha2_password``. If excluded when - creating a new user, the default for the version of MySQL in use will be - used. As of MySQL 8.0, the default is ``caching_sha2_password``. Required. - Known values are: "mysql_native_password" and "caching_sha2_password". - } - } - - # response body for status code(s): 200 - response == { - "user": { - "name": "str", # The name of a database user. Required. - "access_cert": "str", # Optional. Access certificate for TLS client - authentication. (Kafka only). - "access_key": "str", # Optional. Access key for TLS client - authentication. (Kafka only). - "mysql_settings": { - "auth_plugin": "str" # A string specifying the - authentication method to be used for connections to the MySQL user - account. The valid values are ``mysql_native_password`` or - ``caching_sha2_password``. If excluded when creating a new user, the - default for the version of MySQL in use will be used. As of MySQL 8.0, - the default is ``caching_sha2_password``. Required. Known values are: - "mysql_native_password" and "caching_sha2_password". - }, - "password": "str", # Optional. A randomly generated password for the - database user.:code:`
`Requires ``database:view_credentials`` scope. - "role": "str", # Optional. A string representing the database user's - role. The value will be either "primary" or "normal". Known values are: - "primary" and "normal". - "settings": { - "acl": [ - { - "permission": "str", # Permission set - applied to the ACL. 'consume' allows for messages to be consumed - from the topic. 'produce' allows for messages to be published to - the topic. 'produceconsume' allows for both 'consume' and - 'produce' permission. 'admin' allows for 'produceconsume' as well - as any operations to administer the topic (delete, update). - Required. Known values are: "admin", "consume", "produce", and - "produceconsume". - "topic": "str", # A regex for matching the - topic(s) that this ACL should apply to. Required. - "id": "str" # Optional. An identifier for - the ACL. Will be computed after the ACL is created/updated. - } - ], - "mongo_user_settings": { - "databases": [ - "str" # Optional. A list of databases to - which the user should have access. When the database is set to - ``admin``"" , the user will have access to all databases based on - the user's role i.e. a user with the role ``readOnly`` assigned - to the ``admin`` database will have read access to all databases. - ], - "role": "str" # Optional. The role to assign to the - user with each role mapping to a MongoDB built-in role. ``readOnly`` - maps to a `read - `_ - role. ``readWrite`` maps to a `readWrite - `_ - role. ``dbAdmin`` maps to a `dbAdmin - `_ - role. Known values are: "readOnly", "readWrite", and "dbAdmin". - }, - "opensearch_acl": [ - { - "index": "str", # Optional. A regex for - matching the indexes that this ACL should apply to. - "permission": "str" # Optional. Permission - set applied to the ACL. 'read' allows user to read from the - index. 'write' allows for user to write to the index. 'readwrite' - allows for both 'read' and 'write' permission. 'deny'(default) - restricts user from performing any operation over an index. - 'admin' allows for 'readwrite' as well as any operations to - administer the index. Known values are: "deny", "admin", "read", - "readwrite", and "write". - } - ], - "pg_allow_replication": bool # Optional. For Postgres - clusters, set to ``true`` for a user with replication rights. This option - is not currently supported for other database engines. - } - } - } - # response body for status code(s): 404 - response == { - "id": "str", # A short identifier corresponding to the HTTP status code - returned. For example, the ID for a response returning a 404 status code would - be "not_found.". Required. - "message": "str", # A message providing additional information about the - error, including details to help resolve it when possible. Required. - "request_id": "str" # Optional. Optionally, some endpoints may include a - request ID that should be provided when reporting bugs or opening support - tickets to help identify the issue. - } - """ - - @overload - def reset_auth( - self, - database_cluster_uuid: str, - username: str, - body: IO[bytes], - *, - content_type: str = "application/json", - **kwargs: Any, - ) -> JSON: - # pylint: disable=line-too-long - """Reset a Database User's Password or Authentication Method. - - To reset the password for a database user, send a POST request to - ``/v2/databases/$DATABASE_ID/users/$USERNAME/reset_auth``. - - For ``mysql`` databases, the authentication method can be specifying by - including a key in the JSON body called ``mysql_settings`` with the ``auth_plugin`` - value specified. - - The response will be a JSON object with a ``user`` key. This will be set to an - object containing the standard database user attributes. - - :param database_cluster_uuid: A unique identifier for a database cluster. Required. - :type database_cluster_uuid: str - :param username: The name of the database user. Required. - :type username: str - :param body: Required. - :type body: IO[bytes] - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str - :return: JSON object - :rtype: JSON - :raises ~azure.core.exceptions.HttpResponseError: - - Example: - .. code-block:: python + return cls(pipeline_response, deserialized, response_headers) # type: ignore - # response body for status code(s): 200 - response == { - "user": { - "name": "str", # The name of a database user. Required. - "access_cert": "str", # Optional. Access certificate for TLS client - authentication. (Kafka only). - "access_key": "str", # Optional. Access key for TLS client - authentication. (Kafka only). - "mysql_settings": { - "auth_plugin": "str" # A string specifying the - authentication method to be used for connections to the MySQL user - account. The valid values are ``mysql_native_password`` or - ``caching_sha2_password``. If excluded when creating a new user, the - default for the version of MySQL in use will be used. As of MySQL 8.0, - the default is ``caching_sha2_password``. Required. Known values are: - "mysql_native_password" and "caching_sha2_password". - }, - "password": "str", # Optional. A randomly generated password for the - database user.:code:`
`Requires ``database:view_credentials`` scope. - "role": "str", # Optional. A string representing the database user's - role. The value will be either "primary" or "normal". Known values are: - "primary" and "normal". - "settings": { - "acl": [ - { - "permission": "str", # Permission set - applied to the ACL. 'consume' allows for messages to be consumed - from the topic. 'produce' allows for messages to be published to - the topic. 'produceconsume' allows for both 'consume' and - 'produce' permission. 'admin' allows for 'produceconsume' as well - as any operations to administer the topic (delete, update). - Required. Known values are: "admin", "consume", "produce", and - "produceconsume". - "topic": "str", # A regex for matching the - topic(s) that this ACL should apply to. Required. - "id": "str" # Optional. An identifier for - the ACL. Will be computed after the ACL is created/updated. - } - ], - "mongo_user_settings": { - "databases": [ - "str" # Optional. A list of databases to - which the user should have access. When the database is set to - ``admin``"" , the user will have access to all databases based on - the user's role i.e. a user with the role ``readOnly`` assigned - to the ``admin`` database will have read access to all databases. - ], - "role": "str" # Optional. The role to assign to the - user with each role mapping to a MongoDB built-in role. ``readOnly`` - maps to a `read - `_ - role. ``readWrite`` maps to a `readWrite - `_ - role. ``dbAdmin`` maps to a `dbAdmin - `_ - role. Known values are: "readOnly", "readWrite", and "dbAdmin". - }, - "opensearch_acl": [ - { - "index": "str", # Optional. A regex for - matching the indexes that this ACL should apply to. - "permission": "str" # Optional. Permission - set applied to the ACL. 'read' allows user to read from the - index. 'write' allows for user to write to the index. 'readwrite' - allows for both 'read' and 'write' permission. 'deny'(default) - restricts user from performing any operation over an index. - 'admin' allows for 'readwrite' as well as any operations to - administer the index. Known values are: "deny", "admin", "read", - "readwrite", and "write". - } - ], - "pg_allow_replication": bool # Optional. For Postgres - clusters, set to ``true`` for a user with replication rights. This option - is not currently supported for other database engines. - } - } - } - # response body for status code(s): 404 - response == { - "id": "str", # A short identifier corresponding to the HTTP status code - returned. For example, the ID for a response returning a 404 status code would - be "not_found.". Required. - "message": "str", # A message providing additional information about the - error, including details to help resolve it when possible. Required. - "request_id": "str" # Optional. Optionally, some endpoints may include a - request ID that should be provided when reporting bugs or opening support - tickets to help identify the issue. - } - """ + return deserialized # type: ignore @distributed_trace - def reset_auth( - self, - database_cluster_uuid: str, - username: str, - body: Union[JSON, IO[bytes]], - **kwargs: Any, - ) -> JSON: + def delete_connection_pool( + self, database_cluster_uuid: str, pool_name: str, **kwargs: Any + ) -> Optional[JSON]: # pylint: disable=line-too-long - """Reset a Database User's Password or Authentication Method. - - To reset the password for a database user, send a POST request to - ``/v2/databases/$DATABASE_ID/users/$USERNAME/reset_auth``. + """Delete a Connection Pool (PostgreSQL). - For ``mysql`` databases, the authentication method can be specifying by - including a key in the JSON body called ``mysql_settings`` with the ``auth_plugin`` - value specified. + To delete a specific connection pool for a PostgreSQL database cluster, send + a DELETE request to ``/v2/databases/$DATABASE_ID/pools/$POOL_NAME``. - The response will be a JSON object with a ``user`` key. This will be set to an - object containing the standard database user attributes. + A status of 204 will be given. This indicates that the request was processed + successfully, but that no response body is needed. :param database_cluster_uuid: A unique identifier for a database cluster. Required. :type database_cluster_uuid: str - :param username: The name of the database user. Required. - :type username: str - :param body: Is either a JSON type or a IO[bytes] type. Required. - :type body: JSON or IO[bytes] - :return: JSON object - :rtype: JSON + :param pool_name: The name used to identify the connection pool. Required. + :type pool_name: str + :return: JSON object or None + :rtype: JSON or None :raises ~azure.core.exceptions.HttpResponseError: Example: .. code-block:: python - # JSON input template you can fill out and use as your body input. - body = { - "mysql_settings": { - "auth_plugin": "str" # A string specifying the authentication method - to be used for connections to the MySQL user account. The valid values are - ``mysql_native_password`` or ``caching_sha2_password``. If excluded when - creating a new user, the default for the version of MySQL in use will be - used. As of MySQL 8.0, the default is ``caching_sha2_password``. Required. - Known values are: "mysql_native_password" and "caching_sha2_password". - } - } - - # response body for status code(s): 200 - response == { - "user": { - "name": "str", # The name of a database user. Required. - "access_cert": "str", # Optional. Access certificate for TLS client - authentication. (Kafka only). - "access_key": "str", # Optional. Access key for TLS client - authentication. (Kafka only). - "mysql_settings": { - "auth_plugin": "str" # A string specifying the - authentication method to be used for connections to the MySQL user - account. The valid values are ``mysql_native_password`` or - ``caching_sha2_password``. If excluded when creating a new user, the - default for the version of MySQL in use will be used. As of MySQL 8.0, - the default is ``caching_sha2_password``. Required. Known values are: - "mysql_native_password" and "caching_sha2_password". - }, - "password": "str", # Optional. A randomly generated password for the - database user.:code:`
`Requires ``database:view_credentials`` scope. - "role": "str", # Optional. A string representing the database user's - role. The value will be either "primary" or "normal". Known values are: - "primary" and "normal". - "settings": { - "acl": [ - { - "permission": "str", # Permission set - applied to the ACL. 'consume' allows for messages to be consumed - from the topic. 'produce' allows for messages to be published to - the topic. 'produceconsume' allows for both 'consume' and - 'produce' permission. 'admin' allows for 'produceconsume' as well - as any operations to administer the topic (delete, update). - Required. Known values are: "admin", "consume", "produce", and - "produceconsume". - "topic": "str", # A regex for matching the - topic(s) that this ACL should apply to. Required. - "id": "str" # Optional. An identifier for - the ACL. Will be computed after the ACL is created/updated. - } - ], - "mongo_user_settings": { - "databases": [ - "str" # Optional. A list of databases to - which the user should have access. When the database is set to - ``admin``"" , the user will have access to all databases based on - the user's role i.e. a user with the role ``readOnly`` assigned - to the ``admin`` database will have read access to all databases. - ], - "role": "str" # Optional. The role to assign to the - user with each role mapping to a MongoDB built-in role. ``readOnly`` - maps to a `read - `_ - role. ``readWrite`` maps to a `readWrite - `_ - role. ``dbAdmin`` maps to a `dbAdmin - `_ - role. Known values are: "readOnly", "readWrite", and "dbAdmin". - }, - "opensearch_acl": [ - { - "index": "str", # Optional. A regex for - matching the indexes that this ACL should apply to. - "permission": "str" # Optional. Permission - set applied to the ACL. 'read' allows user to read from the - index. 'write' allows for user to write to the index. 'readwrite' - allows for both 'read' and 'write' permission. 'deny'(default) - restricts user from performing any operation over an index. - 'admin' allows for 'readwrite' as well as any operations to - administer the index. Known values are: "deny", "admin", "read", - "readwrite", and "write". - } - ], - "pg_allow_replication": bool # Optional. For Postgres - clusters, set to ``true`` for a user with replication rights. This option - is not currently supported for other database engines. - } - } - } # response body for status code(s): 404 response == { "id": "str", # A short identifier corresponding to the HTTP status code @@ -126653,28 +134901,14 @@ def reset_auth( } error_map.update(kwargs.pop("error_map", {}) or {}) - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _headers = kwargs.pop("headers", {}) or {} _params = kwargs.pop("params", {}) or {} - content_type: Optional[str] = kwargs.pop( - "content_type", _headers.pop("Content-Type", None) - ) - cls: ClsType[JSON] = kwargs.pop("cls", None) - - content_type = content_type or "application/json" - _json = None - _content = None - if isinstance(body, (IOBase, bytes)): - _content = body - else: - _json = body + cls: ClsType[Optional[JSON]] = kwargs.pop("cls", None) - _request = build_databases_reset_auth_request( + _request = build_databases_delete_connection_pool_request( database_cluster_uuid=database_cluster_uuid, - username=username, - content_type=content_type, - json=_json, - content=_content, + pool_name=pool_name, headers=_headers, params=_params, ) @@ -126689,14 +134923,15 @@ def reset_auth( response = pipeline_response.http_response - if response.status_code not in [200, 404]: + if response.status_code not in [204, 404]: if _stream: response.read() # Load the body in memory and close the socket map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore raise HttpResponseError(response=response) + deserialized = None response_headers = {} - if response.status_code == 200: + if response.status_code == 204: response_headers["ratelimit-limit"] = self._deserialize( "int", response.headers.get("ratelimit-limit") ) @@ -126707,11 +134942,6 @@ def reset_auth( "int", response.headers.get("ratelimit-reset") ) - if response.content: - deserialized = response.json() - else: - deserialized = None - if response.status_code == 404: response_headers["ratelimit-limit"] = self._deserialize( "int", response.headers.get("ratelimit-limit") @@ -126729,22 +134959,19 @@ def reset_auth( deserialized = None if cls: - return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore + return cls(pipeline_response, deserialized, response_headers) # type: ignore - return cast(JSON, deserialized) # type: ignore + return deserialized # type: ignore @distributed_trace - def list(self, database_cluster_uuid: str, **kwargs: Any) -> JSON: + def get_eviction_policy(self, database_cluster_uuid: str, **kwargs: Any) -> JSON: # pylint: disable=line-too-long - """List All Databases. - - To list all of the databases in a clusters, send a GET request to - ``/v2/databases/$DATABASE_ID/dbs``. - - The result will be a JSON object with a ``dbs`` key. This will be set to an array - of database objects, each of which will contain the standard database attributes. + """Retrieve the Eviction Policy for a Caching or Valkey Cluster. - Note: Database management is not supported for Caching or Valkey clusters. + To retrieve the configured eviction policy for an existing Caching or Valkey cluster, send a + GET request to ``/v2/databases/$DATABASE_ID/eviction_policy``. + The response will be a JSON object with an ``eviction_policy`` key. This will be set to a + string representing the eviction policy. :param database_cluster_uuid: A unique identifier for a database cluster. Required. :type database_cluster_uuid: str @@ -126757,11 +134984,16 @@ def list(self, database_cluster_uuid: str, **kwargs: Any) -> JSON: # response body for status code(s): 200 response == { - "dbs": [ - { - "name": "str" # The name of the database. Required. - } - ] + "eviction_policy": "str" # A string specifying the desired eviction policy + for a Caching or Valkey cluster. * ``noeviction``"" : Don't evict any data, + returns error when memory limit is reached. * ``allkeys_lru:`` Evict any key, + least recently used (LRU) first. * ``allkeys_random``"" : Evict keys in a random + order. * ``volatile_lru``"" : Evict keys with expiration only, least recently + used (LRU) first. * ``volatile_random``"" : Evict keys with expiration only in a + random order. * ``volatile_ttl``"" : Evict keys with expiration only, shortest + time-to-live (TTL) first. Required. Known values are: "noeviction", + "allkeys_lru", "allkeys_random", "volatile_lru", "volatile_random", and + "volatile_ttl". } # response body for status code(s): 404 response == { @@ -126793,7 +135025,7 @@ def list(self, database_cluster_uuid: str, **kwargs: Any) -> JSON: cls: ClsType[JSON] = kwargs.pop("cls", None) - _request = build_databases_list_request( + _request = build_databases_get_eviction_policy_request( database_cluster_uuid=database_cluster_uuid, headers=_headers, params=_params, @@ -126854,24 +135086,19 @@ def list(self, database_cluster_uuid: str, **kwargs: Any) -> JSON: return cast(JSON, deserialized) # type: ignore @overload - def add( + def update_eviction_policy( self, database_cluster_uuid: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any, - ) -> JSON: + ) -> Optional[JSON]: # pylint: disable=line-too-long - """Add a New Database. - - To add a new database to an existing cluster, send a POST request to - ``/v2/databases/$DATABASE_ID/dbs``. - - Note: Database management is not supported for Caching or Valkey clusters. + """Configure the Eviction Policy for a Caching or Valkey Cluster. - The response will be a JSON object with a key called ``db``. The value of this will be - an object that contains the standard attributes associated with a database. + To configure an eviction policy for an existing Caching or Valkey cluster, send a PUT request + to ``/v2/databases/$DATABASE_ID/eviction_policy`` specifying the desired policy. :param database_cluster_uuid: A unique identifier for a database cluster. Required. :type database_cluster_uuid: str @@ -126880,8 +135107,8 @@ def add( :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str - :return: JSON object - :rtype: JSON + :return: JSON object or None + :rtype: JSON or None :raises ~azure.core.exceptions.HttpResponseError: Example: @@ -126889,15 +135116,18 @@ def add( # JSON input template you can fill out and use as your body input. body = { - "name": "str" # The name of the database. Required. + "eviction_policy": "str" # A string specifying the desired eviction policy + for a Caching or Valkey cluster. * ``noeviction``"" : Don't evict any data, + returns error when memory limit is reached. * ``allkeys_lru:`` Evict any key, + least recently used (LRU) first. * ``allkeys_random``"" : Evict keys in a random + order. * ``volatile_lru``"" : Evict keys with expiration only, least recently + used (LRU) first. * ``volatile_random``"" : Evict keys with expiration only in a + random order. * ``volatile_ttl``"" : Evict keys with expiration only, shortest + time-to-live (TTL) first. Required. Known values are: "noeviction", + "allkeys_lru", "allkeys_random", "volatile_lru", "volatile_random", and + "volatile_ttl". } - # response body for status code(s): 201 - response == { - "db": { - "name": "str" # The name of the database. Required. - } - } # response body for status code(s): 404 response == { "id": "str", # A short identifier corresponding to the HTTP status code @@ -126912,24 +135142,19 @@ def add( """ @overload - def add( + def update_eviction_policy( self, database_cluster_uuid: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any, - ) -> JSON: + ) -> Optional[JSON]: # pylint: disable=line-too-long - """Add a New Database. - - To add a new database to an existing cluster, send a POST request to - ``/v2/databases/$DATABASE_ID/dbs``. - - Note: Database management is not supported for Caching or Valkey clusters. + """Configure the Eviction Policy for a Caching or Valkey Cluster. - The response will be a JSON object with a key called ``db``. The value of this will be - an object that contains the standard attributes associated with a database. + To configure an eviction policy for an existing Caching or Valkey cluster, send a PUT request + to ``/v2/databases/$DATABASE_ID/eviction_policy`` specifying the desired policy. :param database_cluster_uuid: A unique identifier for a database cluster. Required. :type database_cluster_uuid: str @@ -126938,19 +135163,13 @@ def add( :keyword content_type: Body Parameter content-type. Content type parameter for binary body. Default value is "application/json". :paramtype content_type: str - :return: JSON object - :rtype: JSON + :return: JSON object or None + :rtype: JSON or None :raises ~azure.core.exceptions.HttpResponseError: Example: .. code-block:: python - # response body for status code(s): 201 - response == { - "db": { - "name": "str" # The name of the database. Required. - } - } # response body for status code(s): 404 response == { "id": "str", # A short identifier corresponding to the HTTP status code @@ -126965,26 +135184,21 @@ def add( """ @distributed_trace - def add( + def update_eviction_policy( self, database_cluster_uuid: str, body: Union[JSON, IO[bytes]], **kwargs: Any - ) -> JSON: + ) -> Optional[JSON]: # pylint: disable=line-too-long - """Add a New Database. - - To add a new database to an existing cluster, send a POST request to - ``/v2/databases/$DATABASE_ID/dbs``. - - Note: Database management is not supported for Caching or Valkey clusters. + """Configure the Eviction Policy for a Caching or Valkey Cluster. - The response will be a JSON object with a key called ``db``. The value of this will be - an object that contains the standard attributes associated with a database. + To configure an eviction policy for an existing Caching or Valkey cluster, send a PUT request + to ``/v2/databases/$DATABASE_ID/eviction_policy`` specifying the desired policy. :param database_cluster_uuid: A unique identifier for a database cluster. Required. :type database_cluster_uuid: str :param body: Is either a JSON type or a IO[bytes] type. Required. :type body: JSON or IO[bytes] - :return: JSON object - :rtype: JSON + :return: JSON object or None + :rtype: JSON or None :raises ~azure.core.exceptions.HttpResponseError: Example: @@ -126992,15 +135206,18 @@ def add( # JSON input template you can fill out and use as your body input. body = { - "name": "str" # The name of the database. Required. + "eviction_policy": "str" # A string specifying the desired eviction policy + for a Caching or Valkey cluster. * ``noeviction``"" : Don't evict any data, + returns error when memory limit is reached. * ``allkeys_lru:`` Evict any key, + least recently used (LRU) first. * ``allkeys_random``"" : Evict keys in a random + order. * ``volatile_lru``"" : Evict keys with expiration only, least recently + used (LRU) first. * ``volatile_random``"" : Evict keys with expiration only in a + random order. * ``volatile_ttl``"" : Evict keys with expiration only, shortest + time-to-live (TTL) first. Required. Known values are: "noeviction", + "allkeys_lru", "allkeys_random", "volatile_lru", "volatile_random", and + "volatile_ttl". } - # response body for status code(s): 201 - response == { - "db": { - "name": "str" # The name of the database. Required. - } - } # response body for status code(s): 404 response == { "id": "str", # A short identifier corresponding to the HTTP status code @@ -127032,7 +135249,7 @@ def add( content_type: Optional[str] = kwargs.pop( "content_type", _headers.pop("Content-Type", None) ) - cls: ClsType[JSON] = kwargs.pop("cls", None) + cls: ClsType[Optional[JSON]] = kwargs.pop("cls", None) content_type = content_type or "application/json" _json = None @@ -127042,7 +135259,7 @@ def add( else: _json = body - _request = build_databases_add_request( + _request = build_databases_update_eviction_policy_request( database_cluster_uuid=database_cluster_uuid, content_type=content_type, json=_json, @@ -127061,14 +135278,15 @@ def add( response = pipeline_response.http_response - if response.status_code not in [201, 404]: + if response.status_code not in [204, 404]: if _stream: response.read() # Load the body in memory and close the socket map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore raise HttpResponseError(response=response) + deserialized = None response_headers = {} - if response.status_code == 201: + if response.status_code == 204: response_headers["ratelimit-limit"] = self._deserialize( "int", response.headers.get("ratelimit-limit") ) @@ -127079,11 +135297,6 @@ def add( "int", response.headers.get("ratelimit-reset") ) - if response.content: - deserialized = response.json() - else: - deserialized = None - if response.status_code == 404: response_headers["ratelimit-limit"] = self._deserialize( "int", response.headers.get("ratelimit-limit") @@ -127101,29 +135314,22 @@ def add( deserialized = None if cls: - return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore + return cls(pipeline_response, deserialized, response_headers) # type: ignore - return cast(JSON, deserialized) # type: ignore + return deserialized # type: ignore @distributed_trace - def get( - self, database_cluster_uuid: str, database_name: str, **kwargs: Any - ) -> JSON: + def get_sql_mode(self, database_cluster_uuid: str, **kwargs: Any) -> JSON: # pylint: disable=line-too-long - """Retrieve an Existing Database. - - To show information about an existing database cluster, send a GET request to - ``/v2/databases/$DATABASE_ID/dbs/$DB_NAME``. - - Note: Database management is not supported for Caching or Valkey clusters. + """Retrieve the SQL Modes for a MySQL Cluster. - The response will be a JSON object with a ``db`` key. This will be set to an object - containing the standard database attributes. + To retrieve the configured SQL modes for an existing MySQL cluster, send a GET request to + ``/v2/databases/$DATABASE_ID/sql_mode``. + The response will be a JSON object with a ``sql_mode`` key. This will be set to a string + representing the configured SQL modes. :param database_cluster_uuid: A unique identifier for a database cluster. Required. :type database_cluster_uuid: str - :param database_name: The name of the database. Required. - :type database_name: str :return: JSON object :rtype: JSON :raises ~azure.core.exceptions.HttpResponseError: @@ -127133,9 +135339,8 @@ def get( # response body for status code(s): 200 response == { - "db": { - "name": "str" # The name of the database. Required. - } + "sql_mode": "str" # A string specifying the configured SQL modes for the + MySQL cluster. Required. } # response body for status code(s): 404 response == { @@ -127167,9 +135372,8 @@ def get( cls: ClsType[JSON] = kwargs.pop("cls", None) - _request = build_databases_get_request( + _request = build_databases_get_sql_mode_request( database_cluster_uuid=database_cluster_uuid, - database_name=database_name, headers=_headers, params=_params, ) @@ -127228,25 +135432,31 @@ def get( return cast(JSON, deserialized) # type: ignore - @distributed_trace - def delete( - self, database_cluster_uuid: str, database_name: str, **kwargs: Any + @overload + def update_sql_mode( + self, + database_cluster_uuid: str, + body: JSON, + *, + content_type: str = "application/json", + **kwargs: Any, ) -> Optional[JSON]: # pylint: disable=line-too-long - """Delete a Database. - - To delete a specific database, send a DELETE request to - ``/v2/databases/$DATABASE_ID/dbs/$DB_NAME``. - - A status of 204 will be given. This indicates that the request was processed - successfully, but that no response body is needed. + """Update SQL Mode for a Cluster. - Note: Database management is not supported for Caching or Valkey clusters. + To configure the SQL modes for an existing MySQL cluster, send a PUT request to + ``/v2/databases/$DATABASE_ID/sql_mode`` specifying the desired modes. See the official MySQL 8 + documentation for a `full list of supported SQL modes + `_. + A successful request will receive a 204 No Content status code with no body in response. :param database_cluster_uuid: A unique identifier for a database cluster. Required. :type database_cluster_uuid: str - :param database_name: The name of the database. Required. - :type database_name: str + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str :return: JSON object or None :rtype: JSON or None :raises ~azure.core.exceptions.HttpResponseError: @@ -127254,6 +135464,12 @@ def delete( Example: .. code-block:: python + # JSON input template you can fill out and use as your body input. + body = { + "sql_mode": "str" # A string specifying the configured SQL modes for the + MySQL cluster. Required. + } + # response body for status code(s): 404 response == { "id": "str", # A short identifier corresponding to the HTTP status code @@ -127266,208 +135482,82 @@ def delete( tickets to help identify the issue. } """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - 401: cast( - Type[HttpResponseError], - lambda response: ClientAuthenticationError(response=response), - ), - 429: HttpResponseError, - 500: HttpResponseError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = kwargs.pop("headers", {}) or {} - _params = kwargs.pop("params", {}) or {} - - cls: ClsType[Optional[JSON]] = kwargs.pop("cls", None) - - _request = build_databases_delete_request( - database_cluster_uuid=database_cluster_uuid, - database_name=database_name, - headers=_headers, - params=_params, - ) - _request.url = self._client.format_url(_request.url) - - _stream = False - pipeline_response: PipelineResponse = ( - self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - ) - - response = pipeline_response.http_response - - if response.status_code not in [204, 404]: - if _stream: - response.read() # Load the body in memory and close the socket - map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore - raise HttpResponseError(response=response) - deserialized = None - response_headers = {} - if response.status_code == 204: - response_headers["ratelimit-limit"] = self._deserialize( - "int", response.headers.get("ratelimit-limit") - ) - response_headers["ratelimit-remaining"] = self._deserialize( - "int", response.headers.get("ratelimit-remaining") - ) - response_headers["ratelimit-reset"] = self._deserialize( - "int", response.headers.get("ratelimit-reset") - ) + @overload + def update_sql_mode( + self, + database_cluster_uuid: str, + body: IO[bytes], + *, + content_type: str = "application/json", + **kwargs: Any, + ) -> Optional[JSON]: + # pylint: disable=line-too-long + """Update SQL Mode for a Cluster. - if response.status_code == 404: - response_headers["ratelimit-limit"] = self._deserialize( - "int", response.headers.get("ratelimit-limit") - ) - response_headers["ratelimit-remaining"] = self._deserialize( - "int", response.headers.get("ratelimit-remaining") - ) - response_headers["ratelimit-reset"] = self._deserialize( - "int", response.headers.get("ratelimit-reset") - ) + To configure the SQL modes for an existing MySQL cluster, send a PUT request to + ``/v2/databases/$DATABASE_ID/sql_mode`` specifying the desired modes. See the official MySQL 8 + documentation for a `full list of supported SQL modes + `_. + A successful request will receive a 204 No Content status code with no body in response. - if response.content: - deserialized = response.json() - else: - deserialized = None + :param database_cluster_uuid: A unique identifier for a database cluster. Required. + :type database_cluster_uuid: str + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: JSON object or None + :rtype: JSON or None + :raises ~azure.core.exceptions.HttpResponseError: - if cls: - return cls(pipeline_response, deserialized, response_headers) # type: ignore + Example: + .. code-block:: python - return deserialized # type: ignore + # response body for status code(s): 404 + response == { + "id": "str", # A short identifier corresponding to the HTTP status code + returned. For example, the ID for a response returning a 404 status code would + be "not_found.". Required. + "message": "str", # A message providing additional information about the + error, including details to help resolve it when possible. Required. + "request_id": "str" # Optional. Optionally, some endpoints may include a + request ID that should be provided when reporting bugs or opening support + tickets to help identify the issue. + } + """ @distributed_trace - def list_connection_pools(self, database_cluster_uuid: str, **kwargs: Any) -> JSON: + def update_sql_mode( + self, database_cluster_uuid: str, body: Union[JSON, IO[bytes]], **kwargs: Any + ) -> Optional[JSON]: # pylint: disable=line-too-long - """List Connection Pools (PostgreSQL). + """Update SQL Mode for a Cluster. - To list all of the connection pools available to a PostgreSQL database cluster, send a GET - request to ``/v2/databases/$DATABASE_ID/pools``. - The result will be a JSON object with a ``pools`` key. This will be set to an array of - connection pool objects. + To configure the SQL modes for an existing MySQL cluster, send a PUT request to + ``/v2/databases/$DATABASE_ID/sql_mode`` specifying the desired modes. See the official MySQL 8 + documentation for a `full list of supported SQL modes + `_. + A successful request will receive a 204 No Content status code with no body in response. :param database_cluster_uuid: A unique identifier for a database cluster. Required. :type database_cluster_uuid: str - :return: JSON object - :rtype: JSON + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :return: JSON object or None + :rtype: JSON or None :raises ~azure.core.exceptions.HttpResponseError: Example: .. code-block:: python - # response body for status code(s): 200 - response == { - "pools": [ - { - "db": "str", # The database for use with the connection - pool. Required. - "mode": "str", # The PGBouncer transaction mode for the - connection pool. The allowed values are session, transaction, and - statement. Required. - "name": "str", # A unique name for the connection pool. Must - be between 3 and 60 characters. Required. - "size": 0, # The desired size of the PGBouncer connection - pool. The maximum allowed size is determined by the size of the cluster's - primary node. 25 backend server connections are allowed for every 1GB of - RAM. Three are reserved for maintenance. For example, a primary node with - 1 GB of RAM allows for a maximum of 22 backend server connections while - one with 4 GB would allow for 97. Note that these are shared across all - connection pools in a cluster. Required. - "connection": { - "database": "str", # Optional. The name of the - default database. - "host": "str", # Optional. The FQDN pointing to the - database cluster's current primary node. - "password": "str", # Optional. The randomly - generated password for the default - user.:code:`
`:code:`
`Requires ``database:view_credentials`` - scope. - "port": 0, # Optional. The port on which the - database cluster is listening. - "ssl": bool, # Optional. A boolean value indicating - if the connection should be made over SSL. - "uri": "str", # Optional. A connection string in the - format accepted by the ``psql`` command. This is provided as a - convenience and should be able to be constructed by the other - attributes. - "user": "str" # Optional. The default user for the - database.:code:`
`:code:`
`Requires - ``database:view_credentials`` scope. - }, - "private_connection": { - "database": "str", # Optional. The name of the - default database. - "host": "str", # Optional. The FQDN pointing to the - database cluster's current primary node. - "password": "str", # Optional. The randomly - generated password for the default - user.:code:`
`:code:`
`Requires ``database:view_credentials`` - scope. - "port": 0, # Optional. The port on which the - database cluster is listening. - "ssl": bool, # Optional. A boolean value indicating - if the connection should be made over SSL. - "uri": "str", # Optional. A connection string in the - format accepted by the ``psql`` command. This is provided as a - convenience and should be able to be constructed by the other - attributes. - "user": "str" # Optional. The default user for the - database.:code:`
`:code:`
`Requires - ``database:view_credentials`` scope. - }, - "standby_connection": { - "database": "str", # Optional. The name of the - default database. - "host": "str", # Optional. The FQDN pointing to the - database cluster's current primary node. - "password": "str", # Optional. The randomly - generated password for the default - user.:code:`
`:code:`
`Requires ``database:view_credentials`` - scope. - "port": 0, # Optional. The port on which the - database cluster is listening. - "ssl": bool, # Optional. A boolean value indicating - if the connection should be made over SSL. - "uri": "str", # Optional. A connection string in the - format accepted by the ``psql`` command. This is provided as a - convenience and should be able to be constructed by the other - attributes. - "user": "str" # Optional. The default user for the - database.:code:`
`:code:`
`Requires - ``database:view_credentials`` scope. - }, - "standby_private_connection": { - "database": "str", # Optional. The name of the - default database. - "host": "str", # Optional. The FQDN pointing to the - database cluster's current primary node. - "password": "str", # Optional. The randomly - generated password for the default - user.:code:`
`:code:`
`Requires ``database:view_credentials`` - scope. - "port": 0, # Optional. The port on which the - database cluster is listening. - "ssl": bool, # Optional. A boolean value indicating - if the connection should be made over SSL. - "uri": "str", # Optional. A connection string in the - format accepted by the ``psql`` command. This is provided as a - convenience and should be able to be constructed by the other - attributes. - "user": "str" # Optional. The default user for the - database.:code:`
`:code:`
`Requires - ``database:view_credentials`` scope. - }, - "user": "str" # Optional. The name of the user for use with - the connection pool. When excluded, all sessions connect to the database - as the inbound user. - } - ] + # JSON input template you can fill out and use as your body input. + body = { + "sql_mode": "str" # A string specifying the configured SQL modes for the + MySQL cluster. Required. } + # response body for status code(s): 404 response == { "id": "str", # A short identifier corresponding to the HTTP status code @@ -127493,13 +135583,27 @@ def list_connection_pools(self, database_cluster_uuid: str, **kwargs: Any) -> JS } error_map.update(kwargs.pop("error_map", {}) or {}) - _headers = kwargs.pop("headers", {}) or {} + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = kwargs.pop("params", {}) or {} - cls: ClsType[JSON] = kwargs.pop("cls", None) + content_type: Optional[str] = kwargs.pop( + "content_type", _headers.pop("Content-Type", None) + ) + cls: ClsType[Optional[JSON]] = kwargs.pop("cls", None) - _request = build_databases_list_connection_pools_request( + content_type = content_type or "application/json" + _json = None + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _json = body + + _request = build_databases_update_sql_mode_request( database_cluster_uuid=database_cluster_uuid, + content_type=content_type, + json=_json, + content=_content, headers=_headers, params=_params, ) @@ -127514,14 +135618,15 @@ def list_connection_pools(self, database_cluster_uuid: str, **kwargs: Any) -> JS response = pipeline_response.http_response - if response.status_code not in [200, 404]: + if response.status_code not in [204, 404]: if _stream: response.read() # Load the body in memory and close the socket map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore raise HttpResponseError(response=response) + deserialized = None response_headers = {} - if response.status_code == 200: + if response.status_code == 204: response_headers["ratelimit-limit"] = self._deserialize( "int", response.headers.get("ratelimit-limit") ) @@ -127532,11 +135637,6 @@ def list_connection_pools(self, database_cluster_uuid: str, **kwargs: Any) -> JS "int", response.headers.get("ratelimit-reset") ) - if response.content: - deserialized = response.json() - else: - deserialized = None - if response.status_code == 404: response_headers["ratelimit-limit"] = self._deserialize( "int", response.headers.get("ratelimit-limit") @@ -127554,33 +135654,25 @@ def list_connection_pools(self, database_cluster_uuid: str, **kwargs: Any) -> JS deserialized = None if cls: - return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore + return cls(pipeline_response, deserialized, response_headers) # type: ignore - return cast(JSON, deserialized) # type: ignore + return deserialized # type: ignore @overload - def add_connection_pool( + def update_major_version( self, database_cluster_uuid: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any, - ) -> JSON: + ) -> Optional[JSON]: # pylint: disable=line-too-long - """Add a New Connection Pool (PostgreSQL). - - For PostgreSQL database clusters, connection pools can be used to allow a - database to share its idle connections. The popular PostgreSQL connection - pooling utility PgBouncer is used to provide this service. `See here for more information - `_ - about how and why to use PgBouncer connection pooling including - details about the available transaction modes. + """Upgrade Major Version for a Database. - To add a new connection pool to a PostgreSQL database cluster, send a POST - request to ``/v2/databases/$DATABASE_ID/pools`` specifying a name for the pool, - the user to connect with, the database to connect to, as well as its desired - size and transaction mode. + To upgrade the major version of a database, send a PUT request to + ``/v2/databases/$DATABASE_ID/upgrade``\\ , specifying the target version. + A successful request will receive a 204 No Content status code with no body in response. :param database_cluster_uuid: A unique identifier for a database cluster. Required. :type database_cluster_uuid: str @@ -127589,8 +135681,8 @@ def add_connection_pool( :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str - :return: JSON object - :rtype: JSON + :return: JSON object or None + :rtype: JSON or None :raises ~azure.core.exceptions.HttpResponseError: Example: @@ -127598,190 +135690,10 @@ def add_connection_pool( # JSON input template you can fill out and use as your body input. body = { - "db": "str", # The database for use with the connection pool. Required. - "mode": "str", # The PGBouncer transaction mode for the connection pool. The - allowed values are session, transaction, and statement. Required. - "name": "str", # A unique name for the connection pool. Must be between 3 - and 60 characters. Required. - "size": 0, # The desired size of the PGBouncer connection pool. The maximum - allowed size is determined by the size of the cluster's primary node. 25 backend - server connections are allowed for every 1GB of RAM. Three are reserved for - maintenance. For example, a primary node with 1 GB of RAM allows for a maximum of - 22 backend server connections while one with 4 GB would allow for 97. Note that - these are shared across all connection pools in a cluster. Required. - "connection": { - "database": "str", # Optional. The name of the default database. - "host": "str", # Optional. The FQDN pointing to the database - cluster's current primary node. - "password": "str", # Optional. The randomly generated password for - the default user.:code:`
`:code:`
`Requires - ``database:view_credentials`` scope. - "port": 0, # Optional. The port on which the database cluster is - listening. - "ssl": bool, # Optional. A boolean value indicating if the - connection should be made over SSL. - "uri": "str", # Optional. A connection string in the format accepted - by the ``psql`` command. This is provided as a convenience and should be able - to be constructed by the other attributes. - "user": "str" # Optional. The default user for the - database.:code:`
`:code:`
`Requires ``database:view_credentials`` - scope. - }, - "private_connection": { - "database": "str", # Optional. The name of the default database. - "host": "str", # Optional. The FQDN pointing to the database - cluster's current primary node. - "password": "str", # Optional. The randomly generated password for - the default user.:code:`
`:code:`
`Requires - ``database:view_credentials`` scope. - "port": 0, # Optional. The port on which the database cluster is - listening. - "ssl": bool, # Optional. A boolean value indicating if the - connection should be made over SSL. - "uri": "str", # Optional. A connection string in the format accepted - by the ``psql`` command. This is provided as a convenience and should be able - to be constructed by the other attributes. - "user": "str" # Optional. The default user for the - database.:code:`
`:code:`
`Requires ``database:view_credentials`` - scope. - }, - "standby_connection": { - "database": "str", # Optional. The name of the default database. - "host": "str", # Optional. The FQDN pointing to the database - cluster's current primary node. - "password": "str", # Optional. The randomly generated password for - the default user.:code:`
`:code:`
`Requires - ``database:view_credentials`` scope. - "port": 0, # Optional. The port on which the database cluster is - listening. - "ssl": bool, # Optional. A boolean value indicating if the - connection should be made over SSL. - "uri": "str", # Optional. A connection string in the format accepted - by the ``psql`` command. This is provided as a convenience and should be able - to be constructed by the other attributes. - "user": "str" # Optional. The default user for the - database.:code:`
`:code:`
`Requires ``database:view_credentials`` - scope. - }, - "standby_private_connection": { - "database": "str", # Optional. The name of the default database. - "host": "str", # Optional. The FQDN pointing to the database - cluster's current primary node. - "password": "str", # Optional. The randomly generated password for - the default user.:code:`
`:code:`
`Requires - ``database:view_credentials`` scope. - "port": 0, # Optional. The port on which the database cluster is - listening. - "ssl": bool, # Optional. A boolean value indicating if the - connection should be made over SSL. - "uri": "str", # Optional. A connection string in the format accepted - by the ``psql`` command. This is provided as a convenience and should be able - to be constructed by the other attributes. - "user": "str" # Optional. The default user for the - database.:code:`
`:code:`
`Requires ``database:view_credentials`` - scope. - }, - "user": "str" # Optional. The name of the user for use with the connection - pool. When excluded, all sessions connect to the database as the inbound user. + "version": "str" # Optional. A string representing the version of the + database engine in use for the cluster. } - # response body for status code(s): 201 - response == { - "pool": { - "db": "str", # The database for use with the connection pool. - Required. - "mode": "str", # The PGBouncer transaction mode for the connection - pool. The allowed values are session, transaction, and statement. Required. - "name": "str", # A unique name for the connection pool. Must be - between 3 and 60 characters. Required. - "size": 0, # The desired size of the PGBouncer connection pool. The - maximum allowed size is determined by the size of the cluster's primary node. - 25 backend server connections are allowed for every 1GB of RAM. Three are - reserved for maintenance. For example, a primary node with 1 GB of RAM allows - for a maximum of 22 backend server connections while one with 4 GB would - allow for 97. Note that these are shared across all connection pools in a - cluster. Required. - "connection": { - "database": "str", # Optional. The name of the default - database. - "host": "str", # Optional. The FQDN pointing to the database - cluster's current primary node. - "password": "str", # Optional. The randomly generated - password for the default user.:code:`
`:code:`
`Requires - ``database:view_credentials`` scope. - "port": 0, # Optional. The port on which the database - cluster is listening. - "ssl": bool, # Optional. A boolean value indicating if the - connection should be made over SSL. - "uri": "str", # Optional. A connection string in the format - accepted by the ``psql`` command. This is provided as a convenience and - should be able to be constructed by the other attributes. - "user": "str" # Optional. The default user for the - database.:code:`
`:code:`
`Requires ``database:view_credentials`` - scope. - }, - "private_connection": { - "database": "str", # Optional. The name of the default - database. - "host": "str", # Optional. The FQDN pointing to the database - cluster's current primary node. - "password": "str", # Optional. The randomly generated - password for the default user.:code:`
`:code:`
`Requires - ``database:view_credentials`` scope. - "port": 0, # Optional. The port on which the database - cluster is listening. - "ssl": bool, # Optional. A boolean value indicating if the - connection should be made over SSL. - "uri": "str", # Optional. A connection string in the format - accepted by the ``psql`` command. This is provided as a convenience and - should be able to be constructed by the other attributes. - "user": "str" # Optional. The default user for the - database.:code:`
`:code:`
`Requires ``database:view_credentials`` - scope. - }, - "standby_connection": { - "database": "str", # Optional. The name of the default - database. - "host": "str", # Optional. The FQDN pointing to the database - cluster's current primary node. - "password": "str", # Optional. The randomly generated - password for the default user.:code:`
`:code:`
`Requires - ``database:view_credentials`` scope. - "port": 0, # Optional. The port on which the database - cluster is listening. - "ssl": bool, # Optional. A boolean value indicating if the - connection should be made over SSL. - "uri": "str", # Optional. A connection string in the format - accepted by the ``psql`` command. This is provided as a convenience and - should be able to be constructed by the other attributes. - "user": "str" # Optional. The default user for the - database.:code:`
`:code:`
`Requires ``database:view_credentials`` - scope. - }, - "standby_private_connection": { - "database": "str", # Optional. The name of the default - database. - "host": "str", # Optional. The FQDN pointing to the database - cluster's current primary node. - "password": "str", # Optional. The randomly generated - password for the default user.:code:`
`:code:`
`Requires - ``database:view_credentials`` scope. - "port": 0, # Optional. The port on which the database - cluster is listening. - "ssl": bool, # Optional. A boolean value indicating if the - connection should be made over SSL. - "uri": "str", # Optional. A connection string in the format - accepted by the ``psql`` command. This is provided as a convenience and - should be able to be constructed by the other attributes. - "user": "str" # Optional. The default user for the - database.:code:`
`:code:`
`Requires ``database:view_credentials`` - scope. - }, - "user": "str" # Optional. The name of the user for use with the - connection pool. When excluded, all sessions connect to the database as the - inbound user. - } - } # response body for status code(s): 404 response == { "id": "str", # A short identifier corresponding to the HTTP status code @@ -127796,28 +135708,20 @@ def add_connection_pool( """ @overload - def add_connection_pool( + def update_major_version( self, database_cluster_uuid: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any, - ) -> JSON: + ) -> Optional[JSON]: # pylint: disable=line-too-long - """Add a New Connection Pool (PostgreSQL). - - For PostgreSQL database clusters, connection pools can be used to allow a - database to share its idle connections. The popular PostgreSQL connection - pooling utility PgBouncer is used to provide this service. `See here for more information - `_ - about how and why to use PgBouncer connection pooling including - details about the available transaction modes. + """Upgrade Major Version for a Database. - To add a new connection pool to a PostgreSQL database cluster, send a POST - request to ``/v2/databases/$DATABASE_ID/pools`` specifying a name for the pool, - the user to connect with, the database to connect to, as well as its desired - size and transaction mode. + To upgrade the major version of a database, send a PUT request to + ``/v2/databases/$DATABASE_ID/upgrade``\\ , specifying the target version. + A successful request will receive a 204 No Content status code with no body in response. :param database_cluster_uuid: A unique identifier for a database cluster. Required. :type database_cluster_uuid: str @@ -127826,110 +135730,13 @@ def add_connection_pool( :keyword content_type: Body Parameter content-type. Content type parameter for binary body. Default value is "application/json". :paramtype content_type: str - :return: JSON object - :rtype: JSON + :return: JSON object or None + :rtype: JSON or None :raises ~azure.core.exceptions.HttpResponseError: Example: .. code-block:: python - # response body for status code(s): 201 - response == { - "pool": { - "db": "str", # The database for use with the connection pool. - Required. - "mode": "str", # The PGBouncer transaction mode for the connection - pool. The allowed values are session, transaction, and statement. Required. - "name": "str", # A unique name for the connection pool. Must be - between 3 and 60 characters. Required. - "size": 0, # The desired size of the PGBouncer connection pool. The - maximum allowed size is determined by the size of the cluster's primary node. - 25 backend server connections are allowed for every 1GB of RAM. Three are - reserved for maintenance. For example, a primary node with 1 GB of RAM allows - for a maximum of 22 backend server connections while one with 4 GB would - allow for 97. Note that these are shared across all connection pools in a - cluster. Required. - "connection": { - "database": "str", # Optional. The name of the default - database. - "host": "str", # Optional. The FQDN pointing to the database - cluster's current primary node. - "password": "str", # Optional. The randomly generated - password for the default user.:code:`
`:code:`
`Requires - ``database:view_credentials`` scope. - "port": 0, # Optional. The port on which the database - cluster is listening. - "ssl": bool, # Optional. A boolean value indicating if the - connection should be made over SSL. - "uri": "str", # Optional. A connection string in the format - accepted by the ``psql`` command. This is provided as a convenience and - should be able to be constructed by the other attributes. - "user": "str" # Optional. The default user for the - database.:code:`
`:code:`
`Requires ``database:view_credentials`` - scope. - }, - "private_connection": { - "database": "str", # Optional. The name of the default - database. - "host": "str", # Optional. The FQDN pointing to the database - cluster's current primary node. - "password": "str", # Optional. The randomly generated - password for the default user.:code:`
`:code:`
`Requires - ``database:view_credentials`` scope. - "port": 0, # Optional. The port on which the database - cluster is listening. - "ssl": bool, # Optional. A boolean value indicating if the - connection should be made over SSL. - "uri": "str", # Optional. A connection string in the format - accepted by the ``psql`` command. This is provided as a convenience and - should be able to be constructed by the other attributes. - "user": "str" # Optional. The default user for the - database.:code:`
`:code:`
`Requires ``database:view_credentials`` - scope. - }, - "standby_connection": { - "database": "str", # Optional. The name of the default - database. - "host": "str", # Optional. The FQDN pointing to the database - cluster's current primary node. - "password": "str", # Optional. The randomly generated - password for the default user.:code:`
`:code:`
`Requires - ``database:view_credentials`` scope. - "port": 0, # Optional. The port on which the database - cluster is listening. - "ssl": bool, # Optional. A boolean value indicating if the - connection should be made over SSL. - "uri": "str", # Optional. A connection string in the format - accepted by the ``psql`` command. This is provided as a convenience and - should be able to be constructed by the other attributes. - "user": "str" # Optional. The default user for the - database.:code:`
`:code:`
`Requires ``database:view_credentials`` - scope. - }, - "standby_private_connection": { - "database": "str", # Optional. The name of the default - database. - "host": "str", # Optional. The FQDN pointing to the database - cluster's current primary node. - "password": "str", # Optional. The randomly generated - password for the default user.:code:`
`:code:`
`Requires - ``database:view_credentials`` scope. - "port": 0, # Optional. The port on which the database - cluster is listening. - "ssl": bool, # Optional. A boolean value indicating if the - connection should be made over SSL. - "uri": "str", # Optional. A connection string in the format - accepted by the ``psql`` command. This is provided as a convenience and - should be able to be constructed by the other attributes. - "user": "str" # Optional. The default user for the - database.:code:`
`:code:`
`Requires ``database:view_credentials`` - scope. - }, - "user": "str" # Optional. The name of the user for use with the - connection pool. When excluded, all sessions connect to the database as the - inbound user. - } - } # response body for status code(s): 404 response == { "id": "str", # A short identifier corresponding to the HTTP status code @@ -127944,30 +135751,22 @@ def add_connection_pool( """ @distributed_trace - def add_connection_pool( + def update_major_version( self, database_cluster_uuid: str, body: Union[JSON, IO[bytes]], **kwargs: Any - ) -> JSON: + ) -> Optional[JSON]: # pylint: disable=line-too-long - """Add a New Connection Pool (PostgreSQL). - - For PostgreSQL database clusters, connection pools can be used to allow a - database to share its idle connections. The popular PostgreSQL connection - pooling utility PgBouncer is used to provide this service. `See here for more information - `_ - about how and why to use PgBouncer connection pooling including - details about the available transaction modes. + """Upgrade Major Version for a Database. - To add a new connection pool to a PostgreSQL database cluster, send a POST - request to ``/v2/databases/$DATABASE_ID/pools`` specifying a name for the pool, - the user to connect with, the database to connect to, as well as its desired - size and transaction mode. + To upgrade the major version of a database, send a PUT request to + ``/v2/databases/$DATABASE_ID/upgrade``\\ , specifying the target version. + A successful request will receive a 204 No Content status code with no body in response. :param database_cluster_uuid: A unique identifier for a database cluster. Required. :type database_cluster_uuid: str :param body: Is either a JSON type or a IO[bytes] type. Required. :type body: JSON or IO[bytes] - :return: JSON object - :rtype: JSON + :return: JSON object or None + :rtype: JSON or None :raises ~azure.core.exceptions.HttpResponseError: Example: @@ -127975,190 +135774,10 @@ def add_connection_pool( # JSON input template you can fill out and use as your body input. body = { - "db": "str", # The database for use with the connection pool. Required. - "mode": "str", # The PGBouncer transaction mode for the connection pool. The - allowed values are session, transaction, and statement. Required. - "name": "str", # A unique name for the connection pool. Must be between 3 - and 60 characters. Required. - "size": 0, # The desired size of the PGBouncer connection pool. The maximum - allowed size is determined by the size of the cluster's primary node. 25 backend - server connections are allowed for every 1GB of RAM. Three are reserved for - maintenance. For example, a primary node with 1 GB of RAM allows for a maximum of - 22 backend server connections while one with 4 GB would allow for 97. Note that - these are shared across all connection pools in a cluster. Required. - "connection": { - "database": "str", # Optional. The name of the default database. - "host": "str", # Optional. The FQDN pointing to the database - cluster's current primary node. - "password": "str", # Optional. The randomly generated password for - the default user.:code:`
`:code:`
`Requires - ``database:view_credentials`` scope. - "port": 0, # Optional. The port on which the database cluster is - listening. - "ssl": bool, # Optional. A boolean value indicating if the - connection should be made over SSL. - "uri": "str", # Optional. A connection string in the format accepted - by the ``psql`` command. This is provided as a convenience and should be able - to be constructed by the other attributes. - "user": "str" # Optional. The default user for the - database.:code:`
`:code:`
`Requires ``database:view_credentials`` - scope. - }, - "private_connection": { - "database": "str", # Optional. The name of the default database. - "host": "str", # Optional. The FQDN pointing to the database - cluster's current primary node. - "password": "str", # Optional. The randomly generated password for - the default user.:code:`
`:code:`
`Requires - ``database:view_credentials`` scope. - "port": 0, # Optional. The port on which the database cluster is - listening. - "ssl": bool, # Optional. A boolean value indicating if the - connection should be made over SSL. - "uri": "str", # Optional. A connection string in the format accepted - by the ``psql`` command. This is provided as a convenience and should be able - to be constructed by the other attributes. - "user": "str" # Optional. The default user for the - database.:code:`
`:code:`
`Requires ``database:view_credentials`` - scope. - }, - "standby_connection": { - "database": "str", # Optional. The name of the default database. - "host": "str", # Optional. The FQDN pointing to the database - cluster's current primary node. - "password": "str", # Optional. The randomly generated password for - the default user.:code:`
`:code:`
`Requires - ``database:view_credentials`` scope. - "port": 0, # Optional. The port on which the database cluster is - listening. - "ssl": bool, # Optional. A boolean value indicating if the - connection should be made over SSL. - "uri": "str", # Optional. A connection string in the format accepted - by the ``psql`` command. This is provided as a convenience and should be able - to be constructed by the other attributes. - "user": "str" # Optional. The default user for the - database.:code:`
`:code:`
`Requires ``database:view_credentials`` - scope. - }, - "standby_private_connection": { - "database": "str", # Optional. The name of the default database. - "host": "str", # Optional. The FQDN pointing to the database - cluster's current primary node. - "password": "str", # Optional. The randomly generated password for - the default user.:code:`
`:code:`
`Requires - ``database:view_credentials`` scope. - "port": 0, # Optional. The port on which the database cluster is - listening. - "ssl": bool, # Optional. A boolean value indicating if the - connection should be made over SSL. - "uri": "str", # Optional. A connection string in the format accepted - by the ``psql`` command. This is provided as a convenience and should be able - to be constructed by the other attributes. - "user": "str" # Optional. The default user for the - database.:code:`
`:code:`
`Requires ``database:view_credentials`` - scope. - }, - "user": "str" # Optional. The name of the user for use with the connection - pool. When excluded, all sessions connect to the database as the inbound user. + "version": "str" # Optional. A string representing the version of the + database engine in use for the cluster. } - # response body for status code(s): 201 - response == { - "pool": { - "db": "str", # The database for use with the connection pool. - Required. - "mode": "str", # The PGBouncer transaction mode for the connection - pool. The allowed values are session, transaction, and statement. Required. - "name": "str", # A unique name for the connection pool. Must be - between 3 and 60 characters. Required. - "size": 0, # The desired size of the PGBouncer connection pool. The - maximum allowed size is determined by the size of the cluster's primary node. - 25 backend server connections are allowed for every 1GB of RAM. Three are - reserved for maintenance. For example, a primary node with 1 GB of RAM allows - for a maximum of 22 backend server connections while one with 4 GB would - allow for 97. Note that these are shared across all connection pools in a - cluster. Required. - "connection": { - "database": "str", # Optional. The name of the default - database. - "host": "str", # Optional. The FQDN pointing to the database - cluster's current primary node. - "password": "str", # Optional. The randomly generated - password for the default user.:code:`
`:code:`
`Requires - ``database:view_credentials`` scope. - "port": 0, # Optional. The port on which the database - cluster is listening. - "ssl": bool, # Optional. A boolean value indicating if the - connection should be made over SSL. - "uri": "str", # Optional. A connection string in the format - accepted by the ``psql`` command. This is provided as a convenience and - should be able to be constructed by the other attributes. - "user": "str" # Optional. The default user for the - database.:code:`
`:code:`
`Requires ``database:view_credentials`` - scope. - }, - "private_connection": { - "database": "str", # Optional. The name of the default - database. - "host": "str", # Optional. The FQDN pointing to the database - cluster's current primary node. - "password": "str", # Optional. The randomly generated - password for the default user.:code:`
`:code:`
`Requires - ``database:view_credentials`` scope. - "port": 0, # Optional. The port on which the database - cluster is listening. - "ssl": bool, # Optional. A boolean value indicating if the - connection should be made over SSL. - "uri": "str", # Optional. A connection string in the format - accepted by the ``psql`` command. This is provided as a convenience and - should be able to be constructed by the other attributes. - "user": "str" # Optional. The default user for the - database.:code:`
`:code:`
`Requires ``database:view_credentials`` - scope. - }, - "standby_connection": { - "database": "str", # Optional. The name of the default - database. - "host": "str", # Optional. The FQDN pointing to the database - cluster's current primary node. - "password": "str", # Optional. The randomly generated - password for the default user.:code:`
`:code:`
`Requires - ``database:view_credentials`` scope. - "port": 0, # Optional. The port on which the database - cluster is listening. - "ssl": bool, # Optional. A boolean value indicating if the - connection should be made over SSL. - "uri": "str", # Optional. A connection string in the format - accepted by the ``psql`` command. This is provided as a convenience and - should be able to be constructed by the other attributes. - "user": "str" # Optional. The default user for the - database.:code:`
`:code:`
`Requires ``database:view_credentials`` - scope. - }, - "standby_private_connection": { - "database": "str", # Optional. The name of the default - database. - "host": "str", # Optional. The FQDN pointing to the database - cluster's current primary node. - "password": "str", # Optional. The randomly generated - password for the default user.:code:`
`:code:`
`Requires - ``database:view_credentials`` scope. - "port": 0, # Optional. The port on which the database - cluster is listening. - "ssl": bool, # Optional. A boolean value indicating if the - connection should be made over SSL. - "uri": "str", # Optional. A connection string in the format - accepted by the ``psql`` command. This is provided as a convenience and - should be able to be constructed by the other attributes. - "user": "str" # Optional. The default user for the - database.:code:`
`:code:`
`Requires ``database:view_credentials`` - scope. - }, - "user": "str" # Optional. The name of the user for use with the - connection pool. When excluded, all sessions connect to the database as the - inbound user. - } - } # response body for status code(s): 404 response == { "id": "str", # A short identifier corresponding to the HTTP status code @@ -128190,7 +135809,7 @@ def add_connection_pool( content_type: Optional[str] = kwargs.pop( "content_type", _headers.pop("Content-Type", None) ) - cls: ClsType[JSON] = kwargs.pop("cls", None) + cls: ClsType[Optional[JSON]] = kwargs.pop("cls", None) content_type = content_type or "application/json" _json = None @@ -128200,7 +135819,7 @@ def add_connection_pool( else: _json = body - _request = build_databases_add_connection_pool_request( + _request = build_databases_update_major_version_request( database_cluster_uuid=database_cluster_uuid, content_type=content_type, json=_json, @@ -128219,14 +135838,15 @@ def add_connection_pool( response = pipeline_response.http_response - if response.status_code not in [201, 404]: + if response.status_code not in [204, 404]: if _stream: response.read() # Load the body in memory and close the socket map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore raise HttpResponseError(response=response) + deserialized = None response_headers = {} - if response.status_code == 201: + if response.status_code == 204: response_headers["ratelimit-limit"] = self._deserialize( "int", response.headers.get("ratelimit-limit") ) @@ -128237,11 +135857,6 @@ def add_connection_pool( "int", response.headers.get("ratelimit-reset") ) - if response.content: - deserialized = response.json() - else: - deserialized = None - if response.status_code == 404: response_headers["ratelimit-limit"] = self._deserialize( "int", response.headers.get("ratelimit-limit") @@ -128259,25 +135874,21 @@ def add_connection_pool( deserialized = None if cls: - return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore + return cls(pipeline_response, deserialized, response_headers) # type: ignore - return cast(JSON, deserialized) # type: ignore + return deserialized # type: ignore - @distributed_trace - def get_connection_pool( - self, database_cluster_uuid: str, pool_name: str, **kwargs: Any - ) -> JSON: + @distributed_trace + def get_autoscale(self, database_cluster_uuid: str, **kwargs: Any) -> JSON: # pylint: disable=line-too-long - """Retrieve Existing Connection Pool (PostgreSQL). + """Retrieve Autoscale Configuration for a Database Cluster. - To show information about an existing connection pool for a PostgreSQL database cluster, send a - GET request to ``/v2/databases/$DATABASE_ID/pools/$POOL_NAME``. - The response will be a JSON object with a ``pool`` key. + To retrieve the autoscale configuration for an existing database cluster, send a GET request to + ``/v2/databases/$DATABASE_ID/autoscale``. + The response will be a JSON object with autoscaling configuration details. :param database_cluster_uuid: A unique identifier for a database cluster. Required. :type database_cluster_uuid: str - :param pool_name: The name used to identify the connection pool. Required. - :type pool_name: str :return: JSON object :rtype: JSON :raises ~azure.core.exceptions.HttpResponseError: @@ -128287,99 +135898,16 @@ def get_connection_pool( # response body for status code(s): 200 response == { - "pool": { - "db": "str", # The database for use with the connection pool. - Required. - "mode": "str", # The PGBouncer transaction mode for the connection - pool. The allowed values are session, transaction, and statement. Required. - "name": "str", # A unique name for the connection pool. Must be - between 3 and 60 characters. Required. - "size": 0, # The desired size of the PGBouncer connection pool. The - maximum allowed size is determined by the size of the cluster's primary node. - 25 backend server connections are allowed for every 1GB of RAM. Three are - reserved for maintenance. For example, a primary node with 1 GB of RAM allows - for a maximum of 22 backend server connections while one with 4 GB would - allow for 97. Note that these are shared across all connection pools in a - cluster. Required. - "connection": { - "database": "str", # Optional. The name of the default - database. - "host": "str", # Optional. The FQDN pointing to the database - cluster's current primary node. - "password": "str", # Optional. The randomly generated - password for the default user.:code:`
`:code:`
`Requires - ``database:view_credentials`` scope. - "port": 0, # Optional. The port on which the database - cluster is listening. - "ssl": bool, # Optional. A boolean value indicating if the - connection should be made over SSL. - "uri": "str", # Optional. A connection string in the format - accepted by the ``psql`` command. This is provided as a convenience and - should be able to be constructed by the other attributes. - "user": "str" # Optional. The default user for the - database.:code:`
`:code:`
`Requires ``database:view_credentials`` - scope. - }, - "private_connection": { - "database": "str", # Optional. The name of the default - database. - "host": "str", # Optional. The FQDN pointing to the database - cluster's current primary node. - "password": "str", # Optional. The randomly generated - password for the default user.:code:`
`:code:`
`Requires - ``database:view_credentials`` scope. - "port": 0, # Optional. The port on which the database - cluster is listening. - "ssl": bool, # Optional. A boolean value indicating if the - connection should be made over SSL. - "uri": "str", # Optional. A connection string in the format - accepted by the ``psql`` command. This is provided as a convenience and - should be able to be constructed by the other attributes. - "user": "str" # Optional. The default user for the - database.:code:`
`:code:`
`Requires ``database:view_credentials`` - scope. - }, - "standby_connection": { - "database": "str", # Optional. The name of the default - database. - "host": "str", # Optional. The FQDN pointing to the database - cluster's current primary node. - "password": "str", # Optional. The randomly generated - password for the default user.:code:`
`:code:`
`Requires - ``database:view_credentials`` scope. - "port": 0, # Optional. The port on which the database - cluster is listening. - "ssl": bool, # Optional. A boolean value indicating if the - connection should be made over SSL. - "uri": "str", # Optional. A connection string in the format - accepted by the ``psql`` command. This is provided as a convenience and - should be able to be constructed by the other attributes. - "user": "str" # Optional. The default user for the - database.:code:`
`:code:`
`Requires ``database:view_credentials`` - scope. - }, - "standby_private_connection": { - "database": "str", # Optional. The name of the default - database. - "host": "str", # Optional. The FQDN pointing to the database - cluster's current primary node. - "password": "str", # Optional. The randomly generated - password for the default user.:code:`
`:code:`
`Requires - ``database:view_credentials`` scope. - "port": 0, # Optional. The port on which the database - cluster is listening. - "ssl": bool, # Optional. A boolean value indicating if the - connection should be made over SSL. - "uri": "str", # Optional. A connection string in the format - accepted by the ``psql`` command. This is provided as a convenience and - should be able to be constructed by the other attributes. - "user": "str" # Optional. The default user for the - database.:code:`
`:code:`
`Requires ``database:view_credentials`` - scope. - }, - "user": "str" # Optional. The name of the user for use with the - connection pool. When excluded, all sessions connect to the database as the - inbound user. + "autoscale": { + "storage": { + "enabled": bool, # Whether storage autoscaling is enabled + for the cluster. Required. + "increment_gib": 0, # Optional. The amount of additional + storage to add (in GiB) when autoscaling is triggered. + "threshold_percent": 0 # Optional. The storage usage + threshold percentage that triggers autoscaling. When storage usage + exceeds this percentage, additional storage will be added automatically. + } } } # response body for status code(s): 404 @@ -128412,9 +135940,8 @@ def get_connection_pool( cls: ClsType[JSON] = kwargs.pop("cls", None) - _request = build_databases_get_connection_pool_request( + _request = build_databases_get_autoscale_request( database_cluster_uuid=database_cluster_uuid, - pool_name=pool_name, headers=_headers, params=_params, ) @@ -128474,25 +136001,23 @@ def get_connection_pool( return cast(JSON, deserialized) # type: ignore @overload - def update_connection_pool( + def update_autoscale( self, database_cluster_uuid: str, - pool_name: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any, ) -> Optional[JSON]: # pylint: disable=line-too-long - """Update Connection Pools (PostgreSQL). + """Configure Autoscale Settings for a Database Cluster. - To update a connection pool for a PostgreSQL database cluster, send a PUT request to - ``/v2/databases/$DATABASE_ID/pools/$POOL_NAME``. + To configure autoscale settings for an existing database cluster, send a PUT request to + ``/v2/databases/$DATABASE_ID/autoscale``\\ , specifying the autoscale configuration. + A successful request will receive a 204 No Content status code with no body in response. :param database_cluster_uuid: A unique identifier for a database cluster. Required. :type database_cluster_uuid: str - :param pool_name: The name used to identify the connection pool. Required. - :type pool_name: str :param body: Required. :type body: JSON :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. @@ -128507,20 +136032,18 @@ def update_connection_pool( # JSON input template you can fill out and use as your body input. body = { - "db": "str", # The database for use with the connection pool. Required. - "mode": "str", # The PGBouncer transaction mode for the connection pool. The - allowed values are session, transaction, and statement. Required. - "size": 0, # The desired size of the PGBouncer connection pool. The maximum - allowed size is determined by the size of the cluster's primary node. 25 backend - server connections are allowed for every 1GB of RAM. Three are reserved for - maintenance. For example, a primary node with 1 GB of RAM allows for a maximum of - 22 backend server connections while one with 4 GB would allow for 97. Note that - these are shared across all connection pools in a cluster. Required. - "user": "str" # Optional. The name of the user for use with the connection - pool. When excluded, all sessions connect to the database as the inbound user. + "storage": { + "enabled": bool, # Whether storage autoscaling is enabled for the + cluster. Required. + "increment_gib": 0, # Optional. The amount of additional storage to + add (in GiB) when autoscaling is triggered. + "threshold_percent": 0 # Optional. The storage usage threshold + percentage that triggers autoscaling. When storage usage exceeds this + percentage, additional storage will be added automatically. + } } - # response body for status code(s): 404 + # response body for status code(s): 404, 422 response == { "id": "str", # A short identifier corresponding to the HTTP status code returned. For example, the ID for a response returning a 404 status code would @@ -128534,25 +136057,23 @@ def update_connection_pool( """ @overload - def update_connection_pool( + def update_autoscale( self, database_cluster_uuid: str, - pool_name: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any, ) -> Optional[JSON]: # pylint: disable=line-too-long - """Update Connection Pools (PostgreSQL). + """Configure Autoscale Settings for a Database Cluster. - To update a connection pool for a PostgreSQL database cluster, send a PUT request to - ``/v2/databases/$DATABASE_ID/pools/$POOL_NAME``. + To configure autoscale settings for an existing database cluster, send a PUT request to + ``/v2/databases/$DATABASE_ID/autoscale``\\ , specifying the autoscale configuration. + A successful request will receive a 204 No Content status code with no body in response. :param database_cluster_uuid: A unique identifier for a database cluster. Required. :type database_cluster_uuid: str - :param pool_name: The name used to identify the connection pool. Required. - :type pool_name: str :param body: Required. :type body: IO[bytes] :keyword content_type: Body Parameter content-type. Content type parameter for binary body. @@ -128565,7 +136086,7 @@ def update_connection_pool( Example: .. code-block:: python - # response body for status code(s): 404 + # response body for status code(s): 404, 422 response == { "id": "str", # A short identifier corresponding to the HTTP status code returned. For example, the ID for a response returning a 404 status code would @@ -128579,23 +136100,18 @@ def update_connection_pool( """ @distributed_trace - def update_connection_pool( - self, - database_cluster_uuid: str, - pool_name: str, - body: Union[JSON, IO[bytes]], - **kwargs: Any, + def update_autoscale( + self, database_cluster_uuid: str, body: Union[JSON, IO[bytes]], **kwargs: Any ) -> Optional[JSON]: # pylint: disable=line-too-long - """Update Connection Pools (PostgreSQL). + """Configure Autoscale Settings for a Database Cluster. - To update a connection pool for a PostgreSQL database cluster, send a PUT request to - ``/v2/databases/$DATABASE_ID/pools/$POOL_NAME``. + To configure autoscale settings for an existing database cluster, send a PUT request to + ``/v2/databases/$DATABASE_ID/autoscale``\\ , specifying the autoscale configuration. + A successful request will receive a 204 No Content status code with no body in response. :param database_cluster_uuid: A unique identifier for a database cluster. Required. :type database_cluster_uuid: str - :param pool_name: The name used to identify the connection pool. Required. - :type pool_name: str :param body: Is either a JSON type or a IO[bytes] type. Required. :type body: JSON or IO[bytes] :return: JSON object or None @@ -128607,20 +136123,18 @@ def update_connection_pool( # JSON input template you can fill out and use as your body input. body = { - "db": "str", # The database for use with the connection pool. Required. - "mode": "str", # The PGBouncer transaction mode for the connection pool. The - allowed values are session, transaction, and statement. Required. - "size": 0, # The desired size of the PGBouncer connection pool. The maximum - allowed size is determined by the size of the cluster's primary node. 25 backend - server connections are allowed for every 1GB of RAM. Three are reserved for - maintenance. For example, a primary node with 1 GB of RAM allows for a maximum of - 22 backend server connections while one with 4 GB would allow for 97. Note that - these are shared across all connection pools in a cluster. Required. - "user": "str" # Optional. The name of the user for use with the connection - pool. When excluded, all sessions connect to the database as the inbound user. + "storage": { + "enabled": bool, # Whether storage autoscaling is enabled for the + cluster. Required. + "increment_gib": 0, # Optional. The amount of additional storage to + add (in GiB) when autoscaling is triggered. + "threshold_percent": 0 # Optional. The storage usage threshold + percentage that triggers autoscaling. When storage usage exceeds this + percentage, additional storage will be added automatically. + } } - # response body for status code(s): 404 + # response body for status code(s): 404, 422 response == { "id": "str", # A short identifier corresponding to the HTTP status code returned. For example, the ID for a response returning a 404 status code would @@ -128661,9 +136175,8 @@ def update_connection_pool( else: _json = body - _request = build_databases_update_connection_pool_request( + _request = build_databases_update_autoscale_request( database_cluster_uuid=database_cluster_uuid, - pool_name=pool_name, content_type=content_type, json=_json, content=_content, @@ -128681,7 +136194,7 @@ def update_connection_pool( response = pipeline_response.http_response - if response.status_code not in [204, 404]: + if response.status_code not in [204, 404, 422]: if _stream: response.read() # Load the body in memory and close the socket map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore @@ -128716,35 +136229,826 @@ def update_connection_pool( else: deserialized = None + if response.status_code == 422: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.content: + deserialized = response.json() + else: + deserialized = None + if cls: return cls(pipeline_response, deserialized, response_headers) # type: ignore return deserialized # type: ignore @distributed_trace - def delete_connection_pool( - self, database_cluster_uuid: str, pool_name: str, **kwargs: Any - ) -> Optional[JSON]: + def list_kafka_topics(self, database_cluster_uuid: str, **kwargs: Any) -> JSON: # pylint: disable=line-too-long - """Delete a Connection Pool (PostgreSQL). + """List Topics for a Kafka Cluster. - To delete a specific connection pool for a PostgreSQL database cluster, send - a DELETE request to ``/v2/databases/$DATABASE_ID/pools/$POOL_NAME``. + To list all of a Kafka cluster's topics, send a GET request to + ``/v2/databases/$DATABASE_ID/topics``. - A status of 204 will be given. This indicates that the request was processed - successfully, but that no response body is needed. + The result will be a JSON object with a ``topics`` key. :param database_cluster_uuid: A unique identifier for a database cluster. Required. :type database_cluster_uuid: str - :param pool_name: The name used to identify the connection pool. Required. - :type pool_name: str - :return: JSON object or None - :rtype: JSON or None + :return: JSON object + :rtype: JSON + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 200 + response == { + "topics": [ + { + "name": "str", # Optional. The name of the Kafka topic. + "partition_count": 0, # Optional. The number of partitions + available for the topic. On update, this value can only be increased. + "replication_factor": 0, # Optional. The number of nodes to + replicate data across the cluster. + "state": "str" # Optional. The state of the Kafka topic. + Known values are: "active", "configuring", "deleting", and "unknown". + } + ] + } + # response body for status code(s): 404 + response == { + "id": "str", # A short identifier corresponding to the HTTP status code + returned. For example, the ID for a response returning a 404 status code would + be "not_found.". Required. + "message": "str", # A message providing additional information about the + error, including details to help resolve it when possible. Required. + "request_id": "str" # Optional. Optionally, some endpoints may include a + request ID that should be provided when reporting bugs or opening support + tickets to help identify the issue. + } + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + 401: cast( + Type[HttpResponseError], + lambda response: ClientAuthenticationError(response=response), + ), + 429: HttpResponseError, + 500: HttpResponseError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[JSON] = kwargs.pop("cls", None) + + _request = build_databases_list_kafka_topics_request( + database_cluster_uuid=database_cluster_uuid, + headers=_headers, + params=_params, + ) + _request.url = self._client.format_url(_request.url) + + _stream = False + pipeline_response: PipelineResponse = ( + self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + ) + + response = pipeline_response.http_response + + if response.status_code not in [200, 404]: + if _stream: + response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore + raise HttpResponseError(response=response) + + response_headers = {} + if response.status_code == 200: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.content: + deserialized = response.json() + else: + deserialized = None + + if response.status_code == 404: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.content: + deserialized = response.json() + else: + deserialized = None + + if cls: + return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore + + return cast(JSON, deserialized) # type: ignore + + @overload + def create_kafka_topic( + self, + database_cluster_uuid: str, + body: Optional[JSON] = None, + *, + content_type: str = "application/json", + **kwargs: Any, + ) -> JSON: + # pylint: disable=line-too-long + """Create Topic for a Kafka Cluster. + + To create a topic attached to a Kafka cluster, send a POST request to + ``/v2/databases/$DATABASE_ID/topics``. + + The result will be a JSON object with a ``topic`` key. + + :param database_cluster_uuid: A unique identifier for a database cluster. Required. + :type database_cluster_uuid: str + :param body: Default value is None. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: JSON object + :rtype: JSON + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "config": { + "cleanup_policy": "delete", # Optional. Default value is "delete". + The cleanup_policy sets the retention policy to use on log segments. 'delete' + will discard old segments when retention time/size limits are reached. + 'compact' will enable log compaction, resulting in retention of the latest + value for each key. Known values are: "delete", "compact", and + "compact_delete". + "compression_type": "producer", # Optional. Default value is + "producer". The compression_type specifies the compression type of the topic. + Known values are: "producer", "gzip", "snappy", "Iz4", "zstd", and + "uncompressed". + "delete_retention_ms": 86400000, # Optional. Default value is + 86400000. The delete_retention_ms specifies how long (in ms) to retain delete + tombstone markers for topics. + "file_delete_delay_ms": 60000, # Optional. Default value is 60000. + The file_delete_delay_ms specifies the time (in ms) to wait before deleting a + file from the filesystem. + "flush_messages": 9223372036854776000, # Optional. Default value is + 9223372036854776000. The flush_messages specifies the number of messages to + accumulate on a log partition before messages are flushed to disk. + "flush_ms": 9223372036854776000, # Optional. Default value is + 9223372036854776000. The flush_ms specifies the maximum time (in ms) that a + message is kept in memory before being flushed to disk. + "index_interval_bytes": 4096, # Optional. Default value is 4096. The + index_interval_bytes specifies the number of bytes between entries being + added into te offset index. + "max_compaction_lag_ms": 9223372036854776000, # Optional. Default + value is 9223372036854776000. The max_compaction_lag_ms specifies the maximum + amount of time (in ms) that a message will remain uncompacted. This is only + applicable if the logs are have compaction enabled. + "max_message_bytes": 1048588, # Optional. Default value is 1048588. + The max_messages_bytes specifies the largest record batch size (in bytes) + that can be sent to the server. This is calculated after compression if + compression is enabled. + "message_down_conversion_enable": True, # Optional. Default value is + True. The message_down_conversion_enable specifies whether down-conversion of + message formats is enabled to satisfy consumer requests. When 'false', the + broker will not perform conversion for consumers expecting older message + formats. The broker will respond with an ``UNSUPPORTED_VERSION`` error for + consume requests from these older clients. + "message_format_version": "3.0-IV1", # Optional. Default value is + "3.0-IV1". The message_format_version specifies the message format version + used by the broker to append messages to the logs. The value of this setting + is assumed to be 3.0-IV1 if the broker protocol version is 3.0 or higher. By + setting a particular message format version, all existing messages on disk + must be smaller or equal to the specified version. Known values are: "0.8.0", + "0.8.1", "0.8.2", "0.9.0", "0.10.0-IV0", "0.10.0-IV1", "0.10.1-IV0", + "0.10.1-IV1", "0.10.1-IV2", "0.10.2-IV0", "0.11.0-IV0", "0.11.0-IV1", + "0.11.0-IV2", "1.0-IV0", "1.1-IV0", "2.0-IV0", "2.0-IV1", "2.1-IV0", + "2.1-IV1", "2.1-IV2", "2.2-IV0", "2.2-IV1", "2.3-IV0", "2.3-IV1", "2.4-IV0", + "2.4-IV1", "2.5-IV0", "2.6-IV0", "2.7-IV0", "2.7-IV1", "2.7-IV2", "2.8-IV0", + "2.8-IV1", "3.0-IV0", "3.0-IV1", "3.1-IV0", "3.2-IV0", "3.3-IV0", "3.3-IV1", + "3.3-IV2", and "3.3-IV3". + "message_timestamp_type": "create_time", # Optional. Default value + is "create_time". The message_timestamp_type specifies whether to use the + message create time or log append time as the timestamp on a message. Known + values are: "create_time" and "log_append_time". + "min_cleanable_dirty_ratio": 0.5, # Optional. Default value is 0.5. + The min_cleanable_dirty_ratio specifies the frequency of log compaction (if + enabled) in relation to duplicates present in the logs. For example, at 0.5, + at most 50% of the log could be duplicates before compaction would begin. + "min_compaction_lag_ms": 0, # Optional. Default value is 0. The + min_compaction_lag_ms specifies the minimum time (in ms) that a message will + remain uncompacted in the log. Only relevant if log compaction is enabled. + "min_insync_replicas": 1, # Optional. Default value is 1. The + min_insync_replicas specifies the number of replicas that must ACK a write + for the write to be considered successful. + "preallocate": False, # Optional. Default value is False. The + preallocate specifies whether a file should be preallocated on disk when + creating a new log segment. + "retention_bytes": -1, # Optional. Default value is -1. The + retention_bytes specifies the maximum size of the log (in bytes) before + deleting messages. -1 indicates that there is no limit. + "retention_ms": 604800000, # Optional. Default value is 604800000. + The retention_ms specifies the maximum amount of time (in ms) to keep a + message before deleting it. + "segment_bytes": 209715200, # Optional. Default value is 209715200. + The segment_bytes specifies the maximum size of a single log file (in bytes). + "segment_jitter_ms": 0, # Optional. Default value is 0. The + segment_jitter_ms specifies the maximum random jitter subtracted from the + scheduled segment roll time to avoid thundering herds of segment rolling. + "segment_ms": 604800000 # Optional. Default value is 604800000. The + segment_ms specifies the period of time after which the log will be forced to + roll if the segment file isn't full. This ensures that retention can delete + or compact old data. + }, + "name": "str", # Optional. The name of the Kafka topic. + "partition_count": 0, # Optional. The number of partitions available for the + topic. On update, this value can only be increased. + "replication_factor": 0 # Optional. The number of nodes to replicate data + across the cluster. + } + + # response body for status code(s): 201 + response == { + "topic": { + "config": { + "cleanup_policy": "delete", # Optional. Default value is + "delete". The cleanup_policy sets the retention policy to use on log + segments. 'delete' will discard old segments when retention time/size + limits are reached. 'compact' will enable log compaction, resulting in + retention of the latest value for each key. Known values are: "delete", + "compact", and "compact_delete". + "compression_type": "producer", # Optional. Default value is + "producer". The compression_type specifies the compression type of the + topic. Known values are: "producer", "gzip", "snappy", "Iz4", "zstd", and + "uncompressed". + "delete_retention_ms": 86400000, # Optional. Default value + is 86400000. The delete_retention_ms specifies how long (in ms) to retain + delete tombstone markers for topics. + "file_delete_delay_ms": 60000, # Optional. Default value is + 60000. The file_delete_delay_ms specifies the time (in ms) to wait before + deleting a file from the filesystem. + "flush_messages": 9223372036854776000, # Optional. Default + value is 9223372036854776000. The flush_messages specifies the number of + messages to accumulate on a log partition before messages are flushed to + disk. + "flush_ms": 9223372036854776000, # Optional. Default value + is 9223372036854776000. The flush_ms specifies the maximum time (in ms) + that a message is kept in memory before being flushed to disk. + "index_interval_bytes": 4096, # Optional. Default value is + 4096. The index_interval_bytes specifies the number of bytes between + entries being added into te offset index. + "max_compaction_lag_ms": 9223372036854776000, # Optional. + Default value is 9223372036854776000. The max_compaction_lag_ms specifies + the maximum amount of time (in ms) that a message will remain + uncompacted. This is only applicable if the logs are have compaction + enabled. + "max_message_bytes": 1048588, # Optional. Default value is + 1048588. The max_messages_bytes specifies the largest record batch size + (in bytes) that can be sent to the server. This is calculated after + compression if compression is enabled. + "message_down_conversion_enable": True, # Optional. Default + value is True. The message_down_conversion_enable specifies whether + down-conversion of message formats is enabled to satisfy consumer + requests. When 'false', the broker will not perform conversion for + consumers expecting older message formats. The broker will respond with + an ``UNSUPPORTED_VERSION`` error for consume requests from these older + clients. + "message_format_version": "3.0-IV1", # Optional. Default + value is "3.0-IV1". The message_format_version specifies the message + format version used by the broker to append messages to the logs. The + value of this setting is assumed to be 3.0-IV1 if the broker protocol + version is 3.0 or higher. By setting a particular message format + version, all existing messages on disk must be smaller or equal to the + specified version. Known values are: "0.8.0", "0.8.1", "0.8.2", "0.9.0", + "0.10.0-IV0", "0.10.0-IV1", "0.10.1-IV0", "0.10.1-IV1", "0.10.1-IV2", + "0.10.2-IV0", "0.11.0-IV0", "0.11.0-IV1", "0.11.0-IV2", "1.0-IV0", + "1.1-IV0", "2.0-IV0", "2.0-IV1", "2.1-IV0", "2.1-IV1", "2.1-IV2", + "2.2-IV0", "2.2-IV1", "2.3-IV0", "2.3-IV1", "2.4-IV0", "2.4-IV1", + "2.5-IV0", "2.6-IV0", "2.7-IV0", "2.7-IV1", "2.7-IV2", "2.8-IV0", + "2.8-IV1", "3.0-IV0", "3.0-IV1", "3.1-IV0", "3.2-IV0", "3.3-IV0", + "3.3-IV1", "3.3-IV2", and "3.3-IV3". + "message_timestamp_type": "create_time", # Optional. Default + value is "create_time". The message_timestamp_type specifies whether to + use the message create time or log append time as the timestamp on a + message. Known values are: "create_time" and "log_append_time". + "min_cleanable_dirty_ratio": 0.5, # Optional. Default value + is 0.5. The min_cleanable_dirty_ratio specifies the frequency of log + compaction (if enabled) in relation to duplicates present in the logs. + For example, at 0.5, at most 50% of the log could be duplicates before + compaction would begin. + "min_compaction_lag_ms": 0, # Optional. Default value is 0. + The min_compaction_lag_ms specifies the minimum time (in ms) that a + message will remain uncompacted in the log. Only relevant if log + compaction is enabled. + "min_insync_replicas": 1, # Optional. Default value is 1. + The min_insync_replicas specifies the number of replicas that must ACK a + write for the write to be considered successful. + "preallocate": False, # Optional. Default value is False. + The preallocate specifies whether a file should be preallocated on disk + when creating a new log segment. + "retention_bytes": -1, # Optional. Default value is -1. The + retention_bytes specifies the maximum size of the log (in bytes) before + deleting messages. -1 indicates that there is no limit. + "retention_ms": 604800000, # Optional. Default value is + 604800000. The retention_ms specifies the maximum amount of time (in ms) + to keep a message before deleting it. + "segment_bytes": 209715200, # Optional. Default value is + 209715200. The segment_bytes specifies the maximum size of a single log + file (in bytes). + "segment_jitter_ms": 0, # Optional. Default value is 0. The + segment_jitter_ms specifies the maximum random jitter subtracted from the + scheduled segment roll time to avoid thundering herds of segment rolling. + "segment_ms": 604800000 # Optional. Default value is + 604800000. The segment_ms specifies the period of time after which the + log will be forced to roll if the segment file isn't full. This ensures + that retention can delete or compact old data. + }, + "name": "str", # Optional. The name of the Kafka topic. + "partitions": [ + { + "consumer_groups": [ + { + "group_name": "str", # Optional. + Name of the consumer group. + "offset": 0 # Optional. The current + offset of the consumer group. + } + ], + "earliest_offset": 0, # Optional. The earliest + consumer offset amongst consumer groups. + "id": 0, # Optional. An identifier for the + partition. + "in_sync_replicas": 0, # Optional. The number of + nodes that are in-sync (have the latest data) for the given + partition. + "size": 0 # Optional. Size of the topic partition in + bytes. + } + ], + "replication_factor": 0, # Optional. The number of nodes to + replicate data across the cluster. + "state": "str" # Optional. The state of the Kafka topic. Known + values are: "active", "configuring", "deleting", and "unknown". + } + } + # response body for status code(s): 404 + response == { + "id": "str", # A short identifier corresponding to the HTTP status code + returned. For example, the ID for a response returning a 404 status code would + be "not_found.". Required. + "message": "str", # A message providing additional information about the + error, including details to help resolve it when possible. Required. + "request_id": "str" # Optional. Optionally, some endpoints may include a + request ID that should be provided when reporting bugs or opening support + tickets to help identify the issue. + } + """ + + @overload + def create_kafka_topic( + self, + database_cluster_uuid: str, + body: Optional[IO[bytes]] = None, + *, + content_type: str = "application/json", + **kwargs: Any, + ) -> JSON: + # pylint: disable=line-too-long + """Create Topic for a Kafka Cluster. + + To create a topic attached to a Kafka cluster, send a POST request to + ``/v2/databases/$DATABASE_ID/topics``. + + The result will be a JSON object with a ``topic`` key. + + :param database_cluster_uuid: A unique identifier for a database cluster. Required. + :type database_cluster_uuid: str + :param body: Default value is None. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: JSON object + :rtype: JSON + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 201 + response == { + "topic": { + "config": { + "cleanup_policy": "delete", # Optional. Default value is + "delete". The cleanup_policy sets the retention policy to use on log + segments. 'delete' will discard old segments when retention time/size + limits are reached. 'compact' will enable log compaction, resulting in + retention of the latest value for each key. Known values are: "delete", + "compact", and "compact_delete". + "compression_type": "producer", # Optional. Default value is + "producer". The compression_type specifies the compression type of the + topic. Known values are: "producer", "gzip", "snappy", "Iz4", "zstd", and + "uncompressed". + "delete_retention_ms": 86400000, # Optional. Default value + is 86400000. The delete_retention_ms specifies how long (in ms) to retain + delete tombstone markers for topics. + "file_delete_delay_ms": 60000, # Optional. Default value is + 60000. The file_delete_delay_ms specifies the time (in ms) to wait before + deleting a file from the filesystem. + "flush_messages": 9223372036854776000, # Optional. Default + value is 9223372036854776000. The flush_messages specifies the number of + messages to accumulate on a log partition before messages are flushed to + disk. + "flush_ms": 9223372036854776000, # Optional. Default value + is 9223372036854776000. The flush_ms specifies the maximum time (in ms) + that a message is kept in memory before being flushed to disk. + "index_interval_bytes": 4096, # Optional. Default value is + 4096. The index_interval_bytes specifies the number of bytes between + entries being added into te offset index. + "max_compaction_lag_ms": 9223372036854776000, # Optional. + Default value is 9223372036854776000. The max_compaction_lag_ms specifies + the maximum amount of time (in ms) that a message will remain + uncompacted. This is only applicable if the logs are have compaction + enabled. + "max_message_bytes": 1048588, # Optional. Default value is + 1048588. The max_messages_bytes specifies the largest record batch size + (in bytes) that can be sent to the server. This is calculated after + compression if compression is enabled. + "message_down_conversion_enable": True, # Optional. Default + value is True. The message_down_conversion_enable specifies whether + down-conversion of message formats is enabled to satisfy consumer + requests. When 'false', the broker will not perform conversion for + consumers expecting older message formats. The broker will respond with + an ``UNSUPPORTED_VERSION`` error for consume requests from these older + clients. + "message_format_version": "3.0-IV1", # Optional. Default + value is "3.0-IV1". The message_format_version specifies the message + format version used by the broker to append messages to the logs. The + value of this setting is assumed to be 3.0-IV1 if the broker protocol + version is 3.0 or higher. By setting a particular message format + version, all existing messages on disk must be smaller or equal to the + specified version. Known values are: "0.8.0", "0.8.1", "0.8.2", "0.9.0", + "0.10.0-IV0", "0.10.0-IV1", "0.10.1-IV0", "0.10.1-IV1", "0.10.1-IV2", + "0.10.2-IV0", "0.11.0-IV0", "0.11.0-IV1", "0.11.0-IV2", "1.0-IV0", + "1.1-IV0", "2.0-IV0", "2.0-IV1", "2.1-IV0", "2.1-IV1", "2.1-IV2", + "2.2-IV0", "2.2-IV1", "2.3-IV0", "2.3-IV1", "2.4-IV0", "2.4-IV1", + "2.5-IV0", "2.6-IV0", "2.7-IV0", "2.7-IV1", "2.7-IV2", "2.8-IV0", + "2.8-IV1", "3.0-IV0", "3.0-IV1", "3.1-IV0", "3.2-IV0", "3.3-IV0", + "3.3-IV1", "3.3-IV2", and "3.3-IV3". + "message_timestamp_type": "create_time", # Optional. Default + value is "create_time". The message_timestamp_type specifies whether to + use the message create time or log append time as the timestamp on a + message. Known values are: "create_time" and "log_append_time". + "min_cleanable_dirty_ratio": 0.5, # Optional. Default value + is 0.5. The min_cleanable_dirty_ratio specifies the frequency of log + compaction (if enabled) in relation to duplicates present in the logs. + For example, at 0.5, at most 50% of the log could be duplicates before + compaction would begin. + "min_compaction_lag_ms": 0, # Optional. Default value is 0. + The min_compaction_lag_ms specifies the minimum time (in ms) that a + message will remain uncompacted in the log. Only relevant if log + compaction is enabled. + "min_insync_replicas": 1, # Optional. Default value is 1. + The min_insync_replicas specifies the number of replicas that must ACK a + write for the write to be considered successful. + "preallocate": False, # Optional. Default value is False. + The preallocate specifies whether a file should be preallocated on disk + when creating a new log segment. + "retention_bytes": -1, # Optional. Default value is -1. The + retention_bytes specifies the maximum size of the log (in bytes) before + deleting messages. -1 indicates that there is no limit. + "retention_ms": 604800000, # Optional. Default value is + 604800000. The retention_ms specifies the maximum amount of time (in ms) + to keep a message before deleting it. + "segment_bytes": 209715200, # Optional. Default value is + 209715200. The segment_bytes specifies the maximum size of a single log + file (in bytes). + "segment_jitter_ms": 0, # Optional. Default value is 0. The + segment_jitter_ms specifies the maximum random jitter subtracted from the + scheduled segment roll time to avoid thundering herds of segment rolling. + "segment_ms": 604800000 # Optional. Default value is + 604800000. The segment_ms specifies the period of time after which the + log will be forced to roll if the segment file isn't full. This ensures + that retention can delete or compact old data. + }, + "name": "str", # Optional. The name of the Kafka topic. + "partitions": [ + { + "consumer_groups": [ + { + "group_name": "str", # Optional. + Name of the consumer group. + "offset": 0 # Optional. The current + offset of the consumer group. + } + ], + "earliest_offset": 0, # Optional. The earliest + consumer offset amongst consumer groups. + "id": 0, # Optional. An identifier for the + partition. + "in_sync_replicas": 0, # Optional. The number of + nodes that are in-sync (have the latest data) for the given + partition. + "size": 0 # Optional. Size of the topic partition in + bytes. + } + ], + "replication_factor": 0, # Optional. The number of nodes to + replicate data across the cluster. + "state": "str" # Optional. The state of the Kafka topic. Known + values are: "active", "configuring", "deleting", and "unknown". + } + } + # response body for status code(s): 404 + response == { + "id": "str", # A short identifier corresponding to the HTTP status code + returned. For example, the ID for a response returning a 404 status code would + be "not_found.". Required. + "message": "str", # A message providing additional information about the + error, including details to help resolve it when possible. Required. + "request_id": "str" # Optional. Optionally, some endpoints may include a + request ID that should be provided when reporting bugs or opening support + tickets to help identify the issue. + } + """ + + @distributed_trace + def create_kafka_topic( + self, + database_cluster_uuid: str, + body: Optional[Union[JSON, IO[bytes]]] = None, + **kwargs: Any, + ) -> JSON: + # pylint: disable=line-too-long + """Create Topic for a Kafka Cluster. + + To create a topic attached to a Kafka cluster, send a POST request to + ``/v2/databases/$DATABASE_ID/topics``. + + The result will be a JSON object with a ``topic`` key. + + :param database_cluster_uuid: A unique identifier for a database cluster. Required. + :type database_cluster_uuid: str + :param body: Is either a JSON type or a IO[bytes] type. Default value is None. + :type body: JSON or IO[bytes] + :return: JSON object + :rtype: JSON :raises ~azure.core.exceptions.HttpResponseError: Example: .. code-block:: python + # JSON input template you can fill out and use as your body input. + body = { + "config": { + "cleanup_policy": "delete", # Optional. Default value is "delete". + The cleanup_policy sets the retention policy to use on log segments. 'delete' + will discard old segments when retention time/size limits are reached. + 'compact' will enable log compaction, resulting in retention of the latest + value for each key. Known values are: "delete", "compact", and + "compact_delete". + "compression_type": "producer", # Optional. Default value is + "producer". The compression_type specifies the compression type of the topic. + Known values are: "producer", "gzip", "snappy", "Iz4", "zstd", and + "uncompressed". + "delete_retention_ms": 86400000, # Optional. Default value is + 86400000. The delete_retention_ms specifies how long (in ms) to retain delete + tombstone markers for topics. + "file_delete_delay_ms": 60000, # Optional. Default value is 60000. + The file_delete_delay_ms specifies the time (in ms) to wait before deleting a + file from the filesystem. + "flush_messages": 9223372036854776000, # Optional. Default value is + 9223372036854776000. The flush_messages specifies the number of messages to + accumulate on a log partition before messages are flushed to disk. + "flush_ms": 9223372036854776000, # Optional. Default value is + 9223372036854776000. The flush_ms specifies the maximum time (in ms) that a + message is kept in memory before being flushed to disk. + "index_interval_bytes": 4096, # Optional. Default value is 4096. The + index_interval_bytes specifies the number of bytes between entries being + added into te offset index. + "max_compaction_lag_ms": 9223372036854776000, # Optional. Default + value is 9223372036854776000. The max_compaction_lag_ms specifies the maximum + amount of time (in ms) that a message will remain uncompacted. This is only + applicable if the logs are have compaction enabled. + "max_message_bytes": 1048588, # Optional. Default value is 1048588. + The max_messages_bytes specifies the largest record batch size (in bytes) + that can be sent to the server. This is calculated after compression if + compression is enabled. + "message_down_conversion_enable": True, # Optional. Default value is + True. The message_down_conversion_enable specifies whether down-conversion of + message formats is enabled to satisfy consumer requests. When 'false', the + broker will not perform conversion for consumers expecting older message + formats. The broker will respond with an ``UNSUPPORTED_VERSION`` error for + consume requests from these older clients. + "message_format_version": "3.0-IV1", # Optional. Default value is + "3.0-IV1". The message_format_version specifies the message format version + used by the broker to append messages to the logs. The value of this setting + is assumed to be 3.0-IV1 if the broker protocol version is 3.0 or higher. By + setting a particular message format version, all existing messages on disk + must be smaller or equal to the specified version. Known values are: "0.8.0", + "0.8.1", "0.8.2", "0.9.0", "0.10.0-IV0", "0.10.0-IV1", "0.10.1-IV0", + "0.10.1-IV1", "0.10.1-IV2", "0.10.2-IV0", "0.11.0-IV0", "0.11.0-IV1", + "0.11.0-IV2", "1.0-IV0", "1.1-IV0", "2.0-IV0", "2.0-IV1", "2.1-IV0", + "2.1-IV1", "2.1-IV2", "2.2-IV0", "2.2-IV1", "2.3-IV0", "2.3-IV1", "2.4-IV0", + "2.4-IV1", "2.5-IV0", "2.6-IV0", "2.7-IV0", "2.7-IV1", "2.7-IV2", "2.8-IV0", + "2.8-IV1", "3.0-IV0", "3.0-IV1", "3.1-IV0", "3.2-IV0", "3.3-IV0", "3.3-IV1", + "3.3-IV2", and "3.3-IV3". + "message_timestamp_type": "create_time", # Optional. Default value + is "create_time". The message_timestamp_type specifies whether to use the + message create time or log append time as the timestamp on a message. Known + values are: "create_time" and "log_append_time". + "min_cleanable_dirty_ratio": 0.5, # Optional. Default value is 0.5. + The min_cleanable_dirty_ratio specifies the frequency of log compaction (if + enabled) in relation to duplicates present in the logs. For example, at 0.5, + at most 50% of the log could be duplicates before compaction would begin. + "min_compaction_lag_ms": 0, # Optional. Default value is 0. The + min_compaction_lag_ms specifies the minimum time (in ms) that a message will + remain uncompacted in the log. Only relevant if log compaction is enabled. + "min_insync_replicas": 1, # Optional. Default value is 1. The + min_insync_replicas specifies the number of replicas that must ACK a write + for the write to be considered successful. + "preallocate": False, # Optional. Default value is False. The + preallocate specifies whether a file should be preallocated on disk when + creating a new log segment. + "retention_bytes": -1, # Optional. Default value is -1. The + retention_bytes specifies the maximum size of the log (in bytes) before + deleting messages. -1 indicates that there is no limit. + "retention_ms": 604800000, # Optional. Default value is 604800000. + The retention_ms specifies the maximum amount of time (in ms) to keep a + message before deleting it. + "segment_bytes": 209715200, # Optional. Default value is 209715200. + The segment_bytes specifies the maximum size of a single log file (in bytes). + "segment_jitter_ms": 0, # Optional. Default value is 0. The + segment_jitter_ms specifies the maximum random jitter subtracted from the + scheduled segment roll time to avoid thundering herds of segment rolling. + "segment_ms": 604800000 # Optional. Default value is 604800000. The + segment_ms specifies the period of time after which the log will be forced to + roll if the segment file isn't full. This ensures that retention can delete + or compact old data. + }, + "name": "str", # Optional. The name of the Kafka topic. + "partition_count": 0, # Optional. The number of partitions available for the + topic. On update, this value can only be increased. + "replication_factor": 0 # Optional. The number of nodes to replicate data + across the cluster. + } + + # response body for status code(s): 201 + response == { + "topic": { + "config": { + "cleanup_policy": "delete", # Optional. Default value is + "delete". The cleanup_policy sets the retention policy to use on log + segments. 'delete' will discard old segments when retention time/size + limits are reached. 'compact' will enable log compaction, resulting in + retention of the latest value for each key. Known values are: "delete", + "compact", and "compact_delete". + "compression_type": "producer", # Optional. Default value is + "producer". The compression_type specifies the compression type of the + topic. Known values are: "producer", "gzip", "snappy", "Iz4", "zstd", and + "uncompressed". + "delete_retention_ms": 86400000, # Optional. Default value + is 86400000. The delete_retention_ms specifies how long (in ms) to retain + delete tombstone markers for topics. + "file_delete_delay_ms": 60000, # Optional. Default value is + 60000. The file_delete_delay_ms specifies the time (in ms) to wait before + deleting a file from the filesystem. + "flush_messages": 9223372036854776000, # Optional. Default + value is 9223372036854776000. The flush_messages specifies the number of + messages to accumulate on a log partition before messages are flushed to + disk. + "flush_ms": 9223372036854776000, # Optional. Default value + is 9223372036854776000. The flush_ms specifies the maximum time (in ms) + that a message is kept in memory before being flushed to disk. + "index_interval_bytes": 4096, # Optional. Default value is + 4096. The index_interval_bytes specifies the number of bytes between + entries being added into te offset index. + "max_compaction_lag_ms": 9223372036854776000, # Optional. + Default value is 9223372036854776000. The max_compaction_lag_ms specifies + the maximum amount of time (in ms) that a message will remain + uncompacted. This is only applicable if the logs are have compaction + enabled. + "max_message_bytes": 1048588, # Optional. Default value is + 1048588. The max_messages_bytes specifies the largest record batch size + (in bytes) that can be sent to the server. This is calculated after + compression if compression is enabled. + "message_down_conversion_enable": True, # Optional. Default + value is True. The message_down_conversion_enable specifies whether + down-conversion of message formats is enabled to satisfy consumer + requests. When 'false', the broker will not perform conversion for + consumers expecting older message formats. The broker will respond with + an ``UNSUPPORTED_VERSION`` error for consume requests from these older + clients. + "message_format_version": "3.0-IV1", # Optional. Default + value is "3.0-IV1". The message_format_version specifies the message + format version used by the broker to append messages to the logs. The + value of this setting is assumed to be 3.0-IV1 if the broker protocol + version is 3.0 or higher. By setting a particular message format + version, all existing messages on disk must be smaller or equal to the + specified version. Known values are: "0.8.0", "0.8.1", "0.8.2", "0.9.0", + "0.10.0-IV0", "0.10.0-IV1", "0.10.1-IV0", "0.10.1-IV1", "0.10.1-IV2", + "0.10.2-IV0", "0.11.0-IV0", "0.11.0-IV1", "0.11.0-IV2", "1.0-IV0", + "1.1-IV0", "2.0-IV0", "2.0-IV1", "2.1-IV0", "2.1-IV1", "2.1-IV2", + "2.2-IV0", "2.2-IV1", "2.3-IV0", "2.3-IV1", "2.4-IV0", "2.4-IV1", + "2.5-IV0", "2.6-IV0", "2.7-IV0", "2.7-IV1", "2.7-IV2", "2.8-IV0", + "2.8-IV1", "3.0-IV0", "3.0-IV1", "3.1-IV0", "3.2-IV0", "3.3-IV0", + "3.3-IV1", "3.3-IV2", and "3.3-IV3". + "message_timestamp_type": "create_time", # Optional. Default + value is "create_time". The message_timestamp_type specifies whether to + use the message create time or log append time as the timestamp on a + message. Known values are: "create_time" and "log_append_time". + "min_cleanable_dirty_ratio": 0.5, # Optional. Default value + is 0.5. The min_cleanable_dirty_ratio specifies the frequency of log + compaction (if enabled) in relation to duplicates present in the logs. + For example, at 0.5, at most 50% of the log could be duplicates before + compaction would begin. + "min_compaction_lag_ms": 0, # Optional. Default value is 0. + The min_compaction_lag_ms specifies the minimum time (in ms) that a + message will remain uncompacted in the log. Only relevant if log + compaction is enabled. + "min_insync_replicas": 1, # Optional. Default value is 1. + The min_insync_replicas specifies the number of replicas that must ACK a + write for the write to be considered successful. + "preallocate": False, # Optional. Default value is False. + The preallocate specifies whether a file should be preallocated on disk + when creating a new log segment. + "retention_bytes": -1, # Optional. Default value is -1. The + retention_bytes specifies the maximum size of the log (in bytes) before + deleting messages. -1 indicates that there is no limit. + "retention_ms": 604800000, # Optional. Default value is + 604800000. The retention_ms specifies the maximum amount of time (in ms) + to keep a message before deleting it. + "segment_bytes": 209715200, # Optional. Default value is + 209715200. The segment_bytes specifies the maximum size of a single log + file (in bytes). + "segment_jitter_ms": 0, # Optional. Default value is 0. The + segment_jitter_ms specifies the maximum random jitter subtracted from the + scheduled segment roll time to avoid thundering herds of segment rolling. + "segment_ms": 604800000 # Optional. Default value is + 604800000. The segment_ms specifies the period of time after which the + log will be forced to roll if the segment file isn't full. This ensures + that retention can delete or compact old data. + }, + "name": "str", # Optional. The name of the Kafka topic. + "partitions": [ + { + "consumer_groups": [ + { + "group_name": "str", # Optional. + Name of the consumer group. + "offset": 0 # Optional. The current + offset of the consumer group. + } + ], + "earliest_offset": 0, # Optional. The earliest + consumer offset amongst consumer groups. + "id": 0, # Optional. An identifier for the + partition. + "in_sync_replicas": 0, # Optional. The number of + nodes that are in-sync (have the latest data) for the given + partition. + "size": 0 # Optional. Size of the topic partition in + bytes. + } + ], + "replication_factor": 0, # Optional. The number of nodes to + replicate data across the cluster. + "state": "str" # Optional. The state of the Kafka topic. Known + values are: "active", "configuring", "deleting", and "unknown". + } + } # response body for status code(s): 404 response == { "id": "str", # A short identifier corresponding to the HTTP status code @@ -128770,14 +137074,30 @@ def delete_connection_pool( } error_map.update(kwargs.pop("error_map", {}) or {}) - _headers = kwargs.pop("headers", {}) or {} + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = kwargs.pop("params", {}) or {} - cls: ClsType[Optional[JSON]] = kwargs.pop("cls", None) + content_type: Optional[str] = kwargs.pop( + "content_type", _headers.pop("Content-Type", None) + ) + cls: ClsType[JSON] = kwargs.pop("cls", None) - _request = build_databases_delete_connection_pool_request( + content_type = content_type or "application/json" + _json = None + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + if body is not None: + _json = body + else: + _json = None + + _request = build_databases_create_kafka_topic_request( database_cluster_uuid=database_cluster_uuid, - pool_name=pool_name, + content_type=content_type, + json=_json, + content=_content, headers=_headers, params=_params, ) @@ -128792,15 +137112,14 @@ def delete_connection_pool( response = pipeline_response.http_response - if response.status_code not in [204, 404]: + if response.status_code not in [201, 404]: if _stream: response.read() # Load the body in memory and close the socket map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore raise HttpResponseError(response=response) - deserialized = None response_headers = {} - if response.status_code == 204: + if response.status_code == 201: response_headers["ratelimit-limit"] = self._deserialize( "int", response.headers.get("ratelimit-limit") ) @@ -128811,6 +137130,11 @@ def delete_connection_pool( "int", response.headers.get("ratelimit-reset") ) + if response.content: + deserialized = response.json() + else: + deserialized = None + if response.status_code == 404: response_headers["ratelimit-limit"] = self._deserialize( "int", response.headers.get("ratelimit-limit") @@ -128828,22 +137152,26 @@ def delete_connection_pool( deserialized = None if cls: - return cls(pipeline_response, deserialized, response_headers) # type: ignore + return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore - return deserialized # type: ignore + return cast(JSON, deserialized) # type: ignore @distributed_trace - def get_eviction_policy(self, database_cluster_uuid: str, **kwargs: Any) -> JSON: + def get_kafka_topic( + self, database_cluster_uuid: str, topic_name: str, **kwargs: Any + ) -> JSON: # pylint: disable=line-too-long - """Retrieve the Eviction Policy for a Caching or Valkey Cluster. + """Get Topic for a Kafka Cluster. - To retrieve the configured eviction policy for an existing Caching or Valkey cluster, send a - GET request to ``/v2/databases/$DATABASE_ID/eviction_policy``. - The response will be a JSON object with an ``eviction_policy`` key. This will be set to a - string representing the eviction policy. + To retrieve a given topic by name from the set of a Kafka cluster's topics, + send a GET request to ``/v2/databases/$DATABASE_ID/topics/$TOPIC_NAME``. + + The result will be a JSON object with a ``topic`` key. :param database_cluster_uuid: A unique identifier for a database cluster. Required. :type database_cluster_uuid: str + :param topic_name: The name used to identify the Kafka topic. Required. + :type topic_name: str :return: JSON object :rtype: JSON :raises ~azure.core.exceptions.HttpResponseError: @@ -128853,16 +137181,127 @@ def get_eviction_policy(self, database_cluster_uuid: str, **kwargs: Any) -> JSON # response body for status code(s): 200 response == { - "eviction_policy": "str" # A string specifying the desired eviction policy - for a Caching or Valkey cluster. * ``noeviction``"" : Don't evict any data, - returns error when memory limit is reached. * ``allkeys_lru:`` Evict any key, - least recently used (LRU) first. * ``allkeys_random``"" : Evict keys in a random - order. * ``volatile_lru``"" : Evict keys with expiration only, least recently - used (LRU) first. * ``volatile_random``"" : Evict keys with expiration only in a - random order. * ``volatile_ttl``"" : Evict keys with expiration only, shortest - time-to-live (TTL) first. Required. Known values are: "noeviction", - "allkeys_lru", "allkeys_random", "volatile_lru", "volatile_random", and - "volatile_ttl". + "topic": { + "config": { + "cleanup_policy": "delete", # Optional. Default value is + "delete". The cleanup_policy sets the retention policy to use on log + segments. 'delete' will discard old segments when retention time/size + limits are reached. 'compact' will enable log compaction, resulting in + retention of the latest value for each key. Known values are: "delete", + "compact", and "compact_delete". + "compression_type": "producer", # Optional. Default value is + "producer". The compression_type specifies the compression type of the + topic. Known values are: "producer", "gzip", "snappy", "Iz4", "zstd", and + "uncompressed". + "delete_retention_ms": 86400000, # Optional. Default value + is 86400000. The delete_retention_ms specifies how long (in ms) to retain + delete tombstone markers for topics. + "file_delete_delay_ms": 60000, # Optional. Default value is + 60000. The file_delete_delay_ms specifies the time (in ms) to wait before + deleting a file from the filesystem. + "flush_messages": 9223372036854776000, # Optional. Default + value is 9223372036854776000. The flush_messages specifies the number of + messages to accumulate on a log partition before messages are flushed to + disk. + "flush_ms": 9223372036854776000, # Optional. Default value + is 9223372036854776000. The flush_ms specifies the maximum time (in ms) + that a message is kept in memory before being flushed to disk. + "index_interval_bytes": 4096, # Optional. Default value is + 4096. The index_interval_bytes specifies the number of bytes between + entries being added into te offset index. + "max_compaction_lag_ms": 9223372036854776000, # Optional. + Default value is 9223372036854776000. The max_compaction_lag_ms specifies + the maximum amount of time (in ms) that a message will remain + uncompacted. This is only applicable if the logs are have compaction + enabled. + "max_message_bytes": 1048588, # Optional. Default value is + 1048588. The max_messages_bytes specifies the largest record batch size + (in bytes) that can be sent to the server. This is calculated after + compression if compression is enabled. + "message_down_conversion_enable": True, # Optional. Default + value is True. The message_down_conversion_enable specifies whether + down-conversion of message formats is enabled to satisfy consumer + requests. When 'false', the broker will not perform conversion for + consumers expecting older message formats. The broker will respond with + an ``UNSUPPORTED_VERSION`` error for consume requests from these older + clients. + "message_format_version": "3.0-IV1", # Optional. Default + value is "3.0-IV1". The message_format_version specifies the message + format version used by the broker to append messages to the logs. The + value of this setting is assumed to be 3.0-IV1 if the broker protocol + version is 3.0 or higher. By setting a particular message format + version, all existing messages on disk must be smaller or equal to the + specified version. Known values are: "0.8.0", "0.8.1", "0.8.2", "0.9.0", + "0.10.0-IV0", "0.10.0-IV1", "0.10.1-IV0", "0.10.1-IV1", "0.10.1-IV2", + "0.10.2-IV0", "0.11.0-IV0", "0.11.0-IV1", "0.11.0-IV2", "1.0-IV0", + "1.1-IV0", "2.0-IV0", "2.0-IV1", "2.1-IV0", "2.1-IV1", "2.1-IV2", + "2.2-IV0", "2.2-IV1", "2.3-IV0", "2.3-IV1", "2.4-IV0", "2.4-IV1", + "2.5-IV0", "2.6-IV0", "2.7-IV0", "2.7-IV1", "2.7-IV2", "2.8-IV0", + "2.8-IV1", "3.0-IV0", "3.0-IV1", "3.1-IV0", "3.2-IV0", "3.3-IV0", + "3.3-IV1", "3.3-IV2", and "3.3-IV3". + "message_timestamp_type": "create_time", # Optional. Default + value is "create_time". The message_timestamp_type specifies whether to + use the message create time or log append time as the timestamp on a + message. Known values are: "create_time" and "log_append_time". + "min_cleanable_dirty_ratio": 0.5, # Optional. Default value + is 0.5. The min_cleanable_dirty_ratio specifies the frequency of log + compaction (if enabled) in relation to duplicates present in the logs. + For example, at 0.5, at most 50% of the log could be duplicates before + compaction would begin. + "min_compaction_lag_ms": 0, # Optional. Default value is 0. + The min_compaction_lag_ms specifies the minimum time (in ms) that a + message will remain uncompacted in the log. Only relevant if log + compaction is enabled. + "min_insync_replicas": 1, # Optional. Default value is 1. + The min_insync_replicas specifies the number of replicas that must ACK a + write for the write to be considered successful. + "preallocate": False, # Optional. Default value is False. + The preallocate specifies whether a file should be preallocated on disk + when creating a new log segment. + "retention_bytes": -1, # Optional. Default value is -1. The + retention_bytes specifies the maximum size of the log (in bytes) before + deleting messages. -1 indicates that there is no limit. + "retention_ms": 604800000, # Optional. Default value is + 604800000. The retention_ms specifies the maximum amount of time (in ms) + to keep a message before deleting it. + "segment_bytes": 209715200, # Optional. Default value is + 209715200. The segment_bytes specifies the maximum size of a single log + file (in bytes). + "segment_jitter_ms": 0, # Optional. Default value is 0. The + segment_jitter_ms specifies the maximum random jitter subtracted from the + scheduled segment roll time to avoid thundering herds of segment rolling. + "segment_ms": 604800000 # Optional. Default value is + 604800000. The segment_ms specifies the period of time after which the + log will be forced to roll if the segment file isn't full. This ensures + that retention can delete or compact old data. + }, + "name": "str", # Optional. The name of the Kafka topic. + "partitions": [ + { + "consumer_groups": [ + { + "group_name": "str", # Optional. + Name of the consumer group. + "offset": 0 # Optional. The current + offset of the consumer group. + } + ], + "earliest_offset": 0, # Optional. The earliest + consumer offset amongst consumer groups. + "id": 0, # Optional. An identifier for the + partition. + "in_sync_replicas": 0, # Optional. The number of + nodes that are in-sync (have the latest data) for the given + partition. + "size": 0 # Optional. Size of the topic partition in + bytes. + } + ], + "replication_factor": 0, # Optional. The number of nodes to + replicate data across the cluster. + "state": "str" # Optional. The state of the Kafka topic. Known + values are: "active", "configuring", "deleting", and "unknown". + } } # response body for status code(s): 404 response == { @@ -128894,8 +137333,9 @@ def get_eviction_policy(self, database_cluster_uuid: str, **kwargs: Any) -> JSON cls: ClsType[JSON] = kwargs.pop("cls", None) - _request = build_databases_get_eviction_policy_request( + _request = build_databases_get_kafka_topic_request( database_cluster_uuid=database_cluster_uuid, + topic_name=topic_name, headers=_headers, params=_params, ) @@ -128955,29 +137395,34 @@ def get_eviction_policy(self, database_cluster_uuid: str, **kwargs: Any) -> JSON return cast(JSON, deserialized) # type: ignore @overload - def update_eviction_policy( + def update_kafka_topic( self, database_cluster_uuid: str, - body: JSON, + topic_name: str, + body: Optional[JSON] = None, *, content_type: str = "application/json", **kwargs: Any, - ) -> Optional[JSON]: + ) -> JSON: # pylint: disable=line-too-long - """Configure the Eviction Policy for a Caching or Valkey Cluster. + """Update Topic for a Kafka Cluster. - To configure an eviction policy for an existing Caching or Valkey cluster, send a PUT request - to ``/v2/databases/$DATABASE_ID/eviction_policy`` specifying the desired policy. + To update a topic attached to a Kafka cluster, send a PUT request to + ``/v2/databases/$DATABASE_ID/topics/$TOPIC_NAME``. + + The result will be a JSON object with a ``topic`` key. :param database_cluster_uuid: A unique identifier for a database cluster. Required. :type database_cluster_uuid: str - :param body: Required. + :param topic_name: The name used to identify the Kafka topic. Required. + :type topic_name: str + :param body: Default value is None. :type body: JSON :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str - :return: JSON object or None - :rtype: JSON or None + :return: JSON object + :rtype: JSON :raises ~azure.core.exceptions.HttpResponseError: Example: @@ -128985,18 +137430,222 @@ def update_eviction_policy( # JSON input template you can fill out and use as your body input. body = { - "eviction_policy": "str" # A string specifying the desired eviction policy - for a Caching or Valkey cluster. * ``noeviction``"" : Don't evict any data, - returns error when memory limit is reached. * ``allkeys_lru:`` Evict any key, - least recently used (LRU) first. * ``allkeys_random``"" : Evict keys in a random - order. * ``volatile_lru``"" : Evict keys with expiration only, least recently - used (LRU) first. * ``volatile_random``"" : Evict keys with expiration only in a - random order. * ``volatile_ttl``"" : Evict keys with expiration only, shortest - time-to-live (TTL) first. Required. Known values are: "noeviction", - "allkeys_lru", "allkeys_random", "volatile_lru", "volatile_random", and - "volatile_ttl". + "config": { + "cleanup_policy": "delete", # Optional. Default value is "delete". + The cleanup_policy sets the retention policy to use on log segments. 'delete' + will discard old segments when retention time/size limits are reached. + 'compact' will enable log compaction, resulting in retention of the latest + value for each key. Known values are: "delete", "compact", and + "compact_delete". + "compression_type": "producer", # Optional. Default value is + "producer". The compression_type specifies the compression type of the topic. + Known values are: "producer", "gzip", "snappy", "Iz4", "zstd", and + "uncompressed". + "delete_retention_ms": 86400000, # Optional. Default value is + 86400000. The delete_retention_ms specifies how long (in ms) to retain delete + tombstone markers for topics. + "file_delete_delay_ms": 60000, # Optional. Default value is 60000. + The file_delete_delay_ms specifies the time (in ms) to wait before deleting a + file from the filesystem. + "flush_messages": 9223372036854776000, # Optional. Default value is + 9223372036854776000. The flush_messages specifies the number of messages to + accumulate on a log partition before messages are flushed to disk. + "flush_ms": 9223372036854776000, # Optional. Default value is + 9223372036854776000. The flush_ms specifies the maximum time (in ms) that a + message is kept in memory before being flushed to disk. + "index_interval_bytes": 4096, # Optional. Default value is 4096. The + index_interval_bytes specifies the number of bytes between entries being + added into te offset index. + "max_compaction_lag_ms": 9223372036854776000, # Optional. Default + value is 9223372036854776000. The max_compaction_lag_ms specifies the maximum + amount of time (in ms) that a message will remain uncompacted. This is only + applicable if the logs are have compaction enabled. + "max_message_bytes": 1048588, # Optional. Default value is 1048588. + The max_messages_bytes specifies the largest record batch size (in bytes) + that can be sent to the server. This is calculated after compression if + compression is enabled. + "message_down_conversion_enable": True, # Optional. Default value is + True. The message_down_conversion_enable specifies whether down-conversion of + message formats is enabled to satisfy consumer requests. When 'false', the + broker will not perform conversion for consumers expecting older message + formats. The broker will respond with an ``UNSUPPORTED_VERSION`` error for + consume requests from these older clients. + "message_format_version": "3.0-IV1", # Optional. Default value is + "3.0-IV1". The message_format_version specifies the message format version + used by the broker to append messages to the logs. The value of this setting + is assumed to be 3.0-IV1 if the broker protocol version is 3.0 or higher. By + setting a particular message format version, all existing messages on disk + must be smaller or equal to the specified version. Known values are: "0.8.0", + "0.8.1", "0.8.2", "0.9.0", "0.10.0-IV0", "0.10.0-IV1", "0.10.1-IV0", + "0.10.1-IV1", "0.10.1-IV2", "0.10.2-IV0", "0.11.0-IV0", "0.11.0-IV1", + "0.11.0-IV2", "1.0-IV0", "1.1-IV0", "2.0-IV0", "2.0-IV1", "2.1-IV0", + "2.1-IV1", "2.1-IV2", "2.2-IV0", "2.2-IV1", "2.3-IV0", "2.3-IV1", "2.4-IV0", + "2.4-IV1", "2.5-IV0", "2.6-IV0", "2.7-IV0", "2.7-IV1", "2.7-IV2", "2.8-IV0", + "2.8-IV1", "3.0-IV0", "3.0-IV1", "3.1-IV0", "3.2-IV0", "3.3-IV0", "3.3-IV1", + "3.3-IV2", and "3.3-IV3". + "message_timestamp_type": "create_time", # Optional. Default value + is "create_time". The message_timestamp_type specifies whether to use the + message create time or log append time as the timestamp on a message. Known + values are: "create_time" and "log_append_time". + "min_cleanable_dirty_ratio": 0.5, # Optional. Default value is 0.5. + The min_cleanable_dirty_ratio specifies the frequency of log compaction (if + enabled) in relation to duplicates present in the logs. For example, at 0.5, + at most 50% of the log could be duplicates before compaction would begin. + "min_compaction_lag_ms": 0, # Optional. Default value is 0. The + min_compaction_lag_ms specifies the minimum time (in ms) that a message will + remain uncompacted in the log. Only relevant if log compaction is enabled. + "min_insync_replicas": 1, # Optional. Default value is 1. The + min_insync_replicas specifies the number of replicas that must ACK a write + for the write to be considered successful. + "preallocate": False, # Optional. Default value is False. The + preallocate specifies whether a file should be preallocated on disk when + creating a new log segment. + "retention_bytes": -1, # Optional. Default value is -1. The + retention_bytes specifies the maximum size of the log (in bytes) before + deleting messages. -1 indicates that there is no limit. + "retention_ms": 604800000, # Optional. Default value is 604800000. + The retention_ms specifies the maximum amount of time (in ms) to keep a + message before deleting it. + "segment_bytes": 209715200, # Optional. Default value is 209715200. + The segment_bytes specifies the maximum size of a single log file (in bytes). + "segment_jitter_ms": 0, # Optional. Default value is 0. The + segment_jitter_ms specifies the maximum random jitter subtracted from the + scheduled segment roll time to avoid thundering herds of segment rolling. + "segment_ms": 604800000 # Optional. Default value is 604800000. The + segment_ms specifies the period of time after which the log will be forced to + roll if the segment file isn't full. This ensures that retention can delete + or compact old data. + }, + "partition_count": 0, # Optional. The number of partitions available for the + topic. On update, this value can only be increased. + "replication_factor": 0 # Optional. The number of nodes to replicate data + across the cluster. } + # response body for status code(s): 200 + response == { + "topic": { + "config": { + "cleanup_policy": "delete", # Optional. Default value is + "delete". The cleanup_policy sets the retention policy to use on log + segments. 'delete' will discard old segments when retention time/size + limits are reached. 'compact' will enable log compaction, resulting in + retention of the latest value for each key. Known values are: "delete", + "compact", and "compact_delete". + "compression_type": "producer", # Optional. Default value is + "producer". The compression_type specifies the compression type of the + topic. Known values are: "producer", "gzip", "snappy", "Iz4", "zstd", and + "uncompressed". + "delete_retention_ms": 86400000, # Optional. Default value + is 86400000. The delete_retention_ms specifies how long (in ms) to retain + delete tombstone markers for topics. + "file_delete_delay_ms": 60000, # Optional. Default value is + 60000. The file_delete_delay_ms specifies the time (in ms) to wait before + deleting a file from the filesystem. + "flush_messages": 9223372036854776000, # Optional. Default + value is 9223372036854776000. The flush_messages specifies the number of + messages to accumulate on a log partition before messages are flushed to + disk. + "flush_ms": 9223372036854776000, # Optional. Default value + is 9223372036854776000. The flush_ms specifies the maximum time (in ms) + that a message is kept in memory before being flushed to disk. + "index_interval_bytes": 4096, # Optional. Default value is + 4096. The index_interval_bytes specifies the number of bytes between + entries being added into te offset index. + "max_compaction_lag_ms": 9223372036854776000, # Optional. + Default value is 9223372036854776000. The max_compaction_lag_ms specifies + the maximum amount of time (in ms) that a message will remain + uncompacted. This is only applicable if the logs are have compaction + enabled. + "max_message_bytes": 1048588, # Optional. Default value is + 1048588. The max_messages_bytes specifies the largest record batch size + (in bytes) that can be sent to the server. This is calculated after + compression if compression is enabled. + "message_down_conversion_enable": True, # Optional. Default + value is True. The message_down_conversion_enable specifies whether + down-conversion of message formats is enabled to satisfy consumer + requests. When 'false', the broker will not perform conversion for + consumers expecting older message formats. The broker will respond with + an ``UNSUPPORTED_VERSION`` error for consume requests from these older + clients. + "message_format_version": "3.0-IV1", # Optional. Default + value is "3.0-IV1". The message_format_version specifies the message + format version used by the broker to append messages to the logs. The + value of this setting is assumed to be 3.0-IV1 if the broker protocol + version is 3.0 or higher. By setting a particular message format + version, all existing messages on disk must be smaller or equal to the + specified version. Known values are: "0.8.0", "0.8.1", "0.8.2", "0.9.0", + "0.10.0-IV0", "0.10.0-IV1", "0.10.1-IV0", "0.10.1-IV1", "0.10.1-IV2", + "0.10.2-IV0", "0.11.0-IV0", "0.11.0-IV1", "0.11.0-IV2", "1.0-IV0", + "1.1-IV0", "2.0-IV0", "2.0-IV1", "2.1-IV0", "2.1-IV1", "2.1-IV2", + "2.2-IV0", "2.2-IV1", "2.3-IV0", "2.3-IV1", "2.4-IV0", "2.4-IV1", + "2.5-IV0", "2.6-IV0", "2.7-IV0", "2.7-IV1", "2.7-IV2", "2.8-IV0", + "2.8-IV1", "3.0-IV0", "3.0-IV1", "3.1-IV0", "3.2-IV0", "3.3-IV0", + "3.3-IV1", "3.3-IV2", and "3.3-IV3". + "message_timestamp_type": "create_time", # Optional. Default + value is "create_time". The message_timestamp_type specifies whether to + use the message create time or log append time as the timestamp on a + message. Known values are: "create_time" and "log_append_time". + "min_cleanable_dirty_ratio": 0.5, # Optional. Default value + is 0.5. The min_cleanable_dirty_ratio specifies the frequency of log + compaction (if enabled) in relation to duplicates present in the logs. + For example, at 0.5, at most 50% of the log could be duplicates before + compaction would begin. + "min_compaction_lag_ms": 0, # Optional. Default value is 0. + The min_compaction_lag_ms specifies the minimum time (in ms) that a + message will remain uncompacted in the log. Only relevant if log + compaction is enabled. + "min_insync_replicas": 1, # Optional. Default value is 1. + The min_insync_replicas specifies the number of replicas that must ACK a + write for the write to be considered successful. + "preallocate": False, # Optional. Default value is False. + The preallocate specifies whether a file should be preallocated on disk + when creating a new log segment. + "retention_bytes": -1, # Optional. Default value is -1. The + retention_bytes specifies the maximum size of the log (in bytes) before + deleting messages. -1 indicates that there is no limit. + "retention_ms": 604800000, # Optional. Default value is + 604800000. The retention_ms specifies the maximum amount of time (in ms) + to keep a message before deleting it. + "segment_bytes": 209715200, # Optional. Default value is + 209715200. The segment_bytes specifies the maximum size of a single log + file (in bytes). + "segment_jitter_ms": 0, # Optional. Default value is 0. The + segment_jitter_ms specifies the maximum random jitter subtracted from the + scheduled segment roll time to avoid thundering herds of segment rolling. + "segment_ms": 604800000 # Optional. Default value is + 604800000. The segment_ms specifies the period of time after which the + log will be forced to roll if the segment file isn't full. This ensures + that retention can delete or compact old data. + }, + "name": "str", # Optional. The name of the Kafka topic. + "partitions": [ + { + "consumer_groups": [ + { + "group_name": "str", # Optional. + Name of the consumer group. + "offset": 0 # Optional. The current + offset of the consumer group. + } + ], + "earliest_offset": 0, # Optional. The earliest + consumer offset amongst consumer groups. + "id": 0, # Optional. An identifier for the + partition. + "in_sync_replicas": 0, # Optional. The number of + nodes that are in-sync (have the latest data) for the given + partition. + "size": 0 # Optional. Size of the topic partition in + bytes. + } + ], + "replication_factor": 0, # Optional. The number of nodes to + replicate data across the cluster. + "state": "str" # Optional. The state of the Kafka topic. Known + values are: "active", "configuring", "deleting", and "unknown". + } + } # response body for status code(s): 404 response == { "id": "str", # A short identifier corresponding to the HTTP status code @@ -129011,34 +137660,163 @@ def update_eviction_policy( """ @overload - def update_eviction_policy( + def update_kafka_topic( self, database_cluster_uuid: str, - body: IO[bytes], + topic_name: str, + body: Optional[IO[bytes]] = None, *, content_type: str = "application/json", **kwargs: Any, - ) -> Optional[JSON]: + ) -> JSON: # pylint: disable=line-too-long - """Configure the Eviction Policy for a Caching or Valkey Cluster. + """Update Topic for a Kafka Cluster. - To configure an eviction policy for an existing Caching or Valkey cluster, send a PUT request - to ``/v2/databases/$DATABASE_ID/eviction_policy`` specifying the desired policy. + To update a topic attached to a Kafka cluster, send a PUT request to + ``/v2/databases/$DATABASE_ID/topics/$TOPIC_NAME``. + + The result will be a JSON object with a ``topic`` key. :param database_cluster_uuid: A unique identifier for a database cluster. Required. :type database_cluster_uuid: str - :param body: Required. + :param topic_name: The name used to identify the Kafka topic. Required. + :type topic_name: str + :param body: Default value is None. :type body: IO[bytes] :keyword content_type: Body Parameter content-type. Content type parameter for binary body. Default value is "application/json". :paramtype content_type: str - :return: JSON object or None - :rtype: JSON or None + :return: JSON object + :rtype: JSON :raises ~azure.core.exceptions.HttpResponseError: Example: .. code-block:: python + # response body for status code(s): 200 + response == { + "topic": { + "config": { + "cleanup_policy": "delete", # Optional. Default value is + "delete". The cleanup_policy sets the retention policy to use on log + segments. 'delete' will discard old segments when retention time/size + limits are reached. 'compact' will enable log compaction, resulting in + retention of the latest value for each key. Known values are: "delete", + "compact", and "compact_delete". + "compression_type": "producer", # Optional. Default value is + "producer". The compression_type specifies the compression type of the + topic. Known values are: "producer", "gzip", "snappy", "Iz4", "zstd", and + "uncompressed". + "delete_retention_ms": 86400000, # Optional. Default value + is 86400000. The delete_retention_ms specifies how long (in ms) to retain + delete tombstone markers for topics. + "file_delete_delay_ms": 60000, # Optional. Default value is + 60000. The file_delete_delay_ms specifies the time (in ms) to wait before + deleting a file from the filesystem. + "flush_messages": 9223372036854776000, # Optional. Default + value is 9223372036854776000. The flush_messages specifies the number of + messages to accumulate on a log partition before messages are flushed to + disk. + "flush_ms": 9223372036854776000, # Optional. Default value + is 9223372036854776000. The flush_ms specifies the maximum time (in ms) + that a message is kept in memory before being flushed to disk. + "index_interval_bytes": 4096, # Optional. Default value is + 4096. The index_interval_bytes specifies the number of bytes between + entries being added into te offset index. + "max_compaction_lag_ms": 9223372036854776000, # Optional. + Default value is 9223372036854776000. The max_compaction_lag_ms specifies + the maximum amount of time (in ms) that a message will remain + uncompacted. This is only applicable if the logs are have compaction + enabled. + "max_message_bytes": 1048588, # Optional. Default value is + 1048588. The max_messages_bytes specifies the largest record batch size + (in bytes) that can be sent to the server. This is calculated after + compression if compression is enabled. + "message_down_conversion_enable": True, # Optional. Default + value is True. The message_down_conversion_enable specifies whether + down-conversion of message formats is enabled to satisfy consumer + requests. When 'false', the broker will not perform conversion for + consumers expecting older message formats. The broker will respond with + an ``UNSUPPORTED_VERSION`` error for consume requests from these older + clients. + "message_format_version": "3.0-IV1", # Optional. Default + value is "3.0-IV1". The message_format_version specifies the message + format version used by the broker to append messages to the logs. The + value of this setting is assumed to be 3.0-IV1 if the broker protocol + version is 3.0 or higher. By setting a particular message format + version, all existing messages on disk must be smaller or equal to the + specified version. Known values are: "0.8.0", "0.8.1", "0.8.2", "0.9.0", + "0.10.0-IV0", "0.10.0-IV1", "0.10.1-IV0", "0.10.1-IV1", "0.10.1-IV2", + "0.10.2-IV0", "0.11.0-IV0", "0.11.0-IV1", "0.11.0-IV2", "1.0-IV0", + "1.1-IV0", "2.0-IV0", "2.0-IV1", "2.1-IV0", "2.1-IV1", "2.1-IV2", + "2.2-IV0", "2.2-IV1", "2.3-IV0", "2.3-IV1", "2.4-IV0", "2.4-IV1", + "2.5-IV0", "2.6-IV0", "2.7-IV0", "2.7-IV1", "2.7-IV2", "2.8-IV0", + "2.8-IV1", "3.0-IV0", "3.0-IV1", "3.1-IV0", "3.2-IV0", "3.3-IV0", + "3.3-IV1", "3.3-IV2", and "3.3-IV3". + "message_timestamp_type": "create_time", # Optional. Default + value is "create_time". The message_timestamp_type specifies whether to + use the message create time or log append time as the timestamp on a + message. Known values are: "create_time" and "log_append_time". + "min_cleanable_dirty_ratio": 0.5, # Optional. Default value + is 0.5. The min_cleanable_dirty_ratio specifies the frequency of log + compaction (if enabled) in relation to duplicates present in the logs. + For example, at 0.5, at most 50% of the log could be duplicates before + compaction would begin. + "min_compaction_lag_ms": 0, # Optional. Default value is 0. + The min_compaction_lag_ms specifies the minimum time (in ms) that a + message will remain uncompacted in the log. Only relevant if log + compaction is enabled. + "min_insync_replicas": 1, # Optional. Default value is 1. + The min_insync_replicas specifies the number of replicas that must ACK a + write for the write to be considered successful. + "preallocate": False, # Optional. Default value is False. + The preallocate specifies whether a file should be preallocated on disk + when creating a new log segment. + "retention_bytes": -1, # Optional. Default value is -1. The + retention_bytes specifies the maximum size of the log (in bytes) before + deleting messages. -1 indicates that there is no limit. + "retention_ms": 604800000, # Optional. Default value is + 604800000. The retention_ms specifies the maximum amount of time (in ms) + to keep a message before deleting it. + "segment_bytes": 209715200, # Optional. Default value is + 209715200. The segment_bytes specifies the maximum size of a single log + file (in bytes). + "segment_jitter_ms": 0, # Optional. Default value is 0. The + segment_jitter_ms specifies the maximum random jitter subtracted from the + scheduled segment roll time to avoid thundering herds of segment rolling. + "segment_ms": 604800000 # Optional. Default value is + 604800000. The segment_ms specifies the period of time after which the + log will be forced to roll if the segment file isn't full. This ensures + that retention can delete or compact old data. + }, + "name": "str", # Optional. The name of the Kafka topic. + "partitions": [ + { + "consumer_groups": [ + { + "group_name": "str", # Optional. + Name of the consumer group. + "offset": 0 # Optional. The current + offset of the consumer group. + } + ], + "earliest_offset": 0, # Optional. The earliest + consumer offset amongst consumer groups. + "id": 0, # Optional. An identifier for the + partition. + "in_sync_replicas": 0, # Optional. The number of + nodes that are in-sync (have the latest data) for the given + partition. + "size": 0 # Optional. Size of the topic partition in + bytes. + } + ], + "replication_factor": 0, # Optional. The number of nodes to + replicate data across the cluster. + "state": "str" # Optional. The state of the Kafka topic. Known + values are: "active", "configuring", "deleting", and "unknown". + } + } # response body for status code(s): 404 response == { "id": "str", # A short identifier corresponding to the HTTP status code @@ -129053,21 +137831,29 @@ def update_eviction_policy( """ @distributed_trace - def update_eviction_policy( - self, database_cluster_uuid: str, body: Union[JSON, IO[bytes]], **kwargs: Any - ) -> Optional[JSON]: + def update_kafka_topic( + self, + database_cluster_uuid: str, + topic_name: str, + body: Optional[Union[JSON, IO[bytes]]] = None, + **kwargs: Any, + ) -> JSON: # pylint: disable=line-too-long - """Configure the Eviction Policy for a Caching or Valkey Cluster. + """Update Topic for a Kafka Cluster. - To configure an eviction policy for an existing Caching or Valkey cluster, send a PUT request - to ``/v2/databases/$DATABASE_ID/eviction_policy`` specifying the desired policy. + To update a topic attached to a Kafka cluster, send a PUT request to + ``/v2/databases/$DATABASE_ID/topics/$TOPIC_NAME``. + + The result will be a JSON object with a ``topic`` key. :param database_cluster_uuid: A unique identifier for a database cluster. Required. :type database_cluster_uuid: str - :param body: Is either a JSON type or a IO[bytes] type. Required. + :param topic_name: The name used to identify the Kafka topic. Required. + :type topic_name: str + :param body: Is either a JSON type or a IO[bytes] type. Default value is None. :type body: JSON or IO[bytes] - :return: JSON object or None - :rtype: JSON or None + :return: JSON object + :rtype: JSON :raises ~azure.core.exceptions.HttpResponseError: Example: @@ -129075,18 +137861,222 @@ def update_eviction_policy( # JSON input template you can fill out and use as your body input. body = { - "eviction_policy": "str" # A string specifying the desired eviction policy - for a Caching or Valkey cluster. * ``noeviction``"" : Don't evict any data, - returns error when memory limit is reached. * ``allkeys_lru:`` Evict any key, - least recently used (LRU) first. * ``allkeys_random``"" : Evict keys in a random - order. * ``volatile_lru``"" : Evict keys with expiration only, least recently - used (LRU) first. * ``volatile_random``"" : Evict keys with expiration only in a - random order. * ``volatile_ttl``"" : Evict keys with expiration only, shortest - time-to-live (TTL) first. Required. Known values are: "noeviction", - "allkeys_lru", "allkeys_random", "volatile_lru", "volatile_random", and - "volatile_ttl". + "config": { + "cleanup_policy": "delete", # Optional. Default value is "delete". + The cleanup_policy sets the retention policy to use on log segments. 'delete' + will discard old segments when retention time/size limits are reached. + 'compact' will enable log compaction, resulting in retention of the latest + value for each key. Known values are: "delete", "compact", and + "compact_delete". + "compression_type": "producer", # Optional. Default value is + "producer". The compression_type specifies the compression type of the topic. + Known values are: "producer", "gzip", "snappy", "Iz4", "zstd", and + "uncompressed". + "delete_retention_ms": 86400000, # Optional. Default value is + 86400000. The delete_retention_ms specifies how long (in ms) to retain delete + tombstone markers for topics. + "file_delete_delay_ms": 60000, # Optional. Default value is 60000. + The file_delete_delay_ms specifies the time (in ms) to wait before deleting a + file from the filesystem. + "flush_messages": 9223372036854776000, # Optional. Default value is + 9223372036854776000. The flush_messages specifies the number of messages to + accumulate on a log partition before messages are flushed to disk. + "flush_ms": 9223372036854776000, # Optional. Default value is + 9223372036854776000. The flush_ms specifies the maximum time (in ms) that a + message is kept in memory before being flushed to disk. + "index_interval_bytes": 4096, # Optional. Default value is 4096. The + index_interval_bytes specifies the number of bytes between entries being + added into te offset index. + "max_compaction_lag_ms": 9223372036854776000, # Optional. Default + value is 9223372036854776000. The max_compaction_lag_ms specifies the maximum + amount of time (in ms) that a message will remain uncompacted. This is only + applicable if the logs are have compaction enabled. + "max_message_bytes": 1048588, # Optional. Default value is 1048588. + The max_messages_bytes specifies the largest record batch size (in bytes) + that can be sent to the server. This is calculated after compression if + compression is enabled. + "message_down_conversion_enable": True, # Optional. Default value is + True. The message_down_conversion_enable specifies whether down-conversion of + message formats is enabled to satisfy consumer requests. When 'false', the + broker will not perform conversion for consumers expecting older message + formats. The broker will respond with an ``UNSUPPORTED_VERSION`` error for + consume requests from these older clients. + "message_format_version": "3.0-IV1", # Optional. Default value is + "3.0-IV1". The message_format_version specifies the message format version + used by the broker to append messages to the logs. The value of this setting + is assumed to be 3.0-IV1 if the broker protocol version is 3.0 or higher. By + setting a particular message format version, all existing messages on disk + must be smaller or equal to the specified version. Known values are: "0.8.0", + "0.8.1", "0.8.2", "0.9.0", "0.10.0-IV0", "0.10.0-IV1", "0.10.1-IV0", + "0.10.1-IV1", "0.10.1-IV2", "0.10.2-IV0", "0.11.0-IV0", "0.11.0-IV1", + "0.11.0-IV2", "1.0-IV0", "1.1-IV0", "2.0-IV0", "2.0-IV1", "2.1-IV0", + "2.1-IV1", "2.1-IV2", "2.2-IV0", "2.2-IV1", "2.3-IV0", "2.3-IV1", "2.4-IV0", + "2.4-IV1", "2.5-IV0", "2.6-IV0", "2.7-IV0", "2.7-IV1", "2.7-IV2", "2.8-IV0", + "2.8-IV1", "3.0-IV0", "3.0-IV1", "3.1-IV0", "3.2-IV0", "3.3-IV0", "3.3-IV1", + "3.3-IV2", and "3.3-IV3". + "message_timestamp_type": "create_time", # Optional. Default value + is "create_time". The message_timestamp_type specifies whether to use the + message create time or log append time as the timestamp on a message. Known + values are: "create_time" and "log_append_time". + "min_cleanable_dirty_ratio": 0.5, # Optional. Default value is 0.5. + The min_cleanable_dirty_ratio specifies the frequency of log compaction (if + enabled) in relation to duplicates present in the logs. For example, at 0.5, + at most 50% of the log could be duplicates before compaction would begin. + "min_compaction_lag_ms": 0, # Optional. Default value is 0. The + min_compaction_lag_ms specifies the minimum time (in ms) that a message will + remain uncompacted in the log. Only relevant if log compaction is enabled. + "min_insync_replicas": 1, # Optional. Default value is 1. The + min_insync_replicas specifies the number of replicas that must ACK a write + for the write to be considered successful. + "preallocate": False, # Optional. Default value is False. The + preallocate specifies whether a file should be preallocated on disk when + creating a new log segment. + "retention_bytes": -1, # Optional. Default value is -1. The + retention_bytes specifies the maximum size of the log (in bytes) before + deleting messages. -1 indicates that there is no limit. + "retention_ms": 604800000, # Optional. Default value is 604800000. + The retention_ms specifies the maximum amount of time (in ms) to keep a + message before deleting it. + "segment_bytes": 209715200, # Optional. Default value is 209715200. + The segment_bytes specifies the maximum size of a single log file (in bytes). + "segment_jitter_ms": 0, # Optional. Default value is 0. The + segment_jitter_ms specifies the maximum random jitter subtracted from the + scheduled segment roll time to avoid thundering herds of segment rolling. + "segment_ms": 604800000 # Optional. Default value is 604800000. The + segment_ms specifies the period of time after which the log will be forced to + roll if the segment file isn't full. This ensures that retention can delete + or compact old data. + }, + "partition_count": 0, # Optional. The number of partitions available for the + topic. On update, this value can only be increased. + "replication_factor": 0 # Optional. The number of nodes to replicate data + across the cluster. } + # response body for status code(s): 200 + response == { + "topic": { + "config": { + "cleanup_policy": "delete", # Optional. Default value is + "delete". The cleanup_policy sets the retention policy to use on log + segments. 'delete' will discard old segments when retention time/size + limits are reached. 'compact' will enable log compaction, resulting in + retention of the latest value for each key. Known values are: "delete", + "compact", and "compact_delete". + "compression_type": "producer", # Optional. Default value is + "producer". The compression_type specifies the compression type of the + topic. Known values are: "producer", "gzip", "snappy", "Iz4", "zstd", and + "uncompressed". + "delete_retention_ms": 86400000, # Optional. Default value + is 86400000. The delete_retention_ms specifies how long (in ms) to retain + delete tombstone markers for topics. + "file_delete_delay_ms": 60000, # Optional. Default value is + 60000. The file_delete_delay_ms specifies the time (in ms) to wait before + deleting a file from the filesystem. + "flush_messages": 9223372036854776000, # Optional. Default + value is 9223372036854776000. The flush_messages specifies the number of + messages to accumulate on a log partition before messages are flushed to + disk. + "flush_ms": 9223372036854776000, # Optional. Default value + is 9223372036854776000. The flush_ms specifies the maximum time (in ms) + that a message is kept in memory before being flushed to disk. + "index_interval_bytes": 4096, # Optional. Default value is + 4096. The index_interval_bytes specifies the number of bytes between + entries being added into te offset index. + "max_compaction_lag_ms": 9223372036854776000, # Optional. + Default value is 9223372036854776000. The max_compaction_lag_ms specifies + the maximum amount of time (in ms) that a message will remain + uncompacted. This is only applicable if the logs are have compaction + enabled. + "max_message_bytes": 1048588, # Optional. Default value is + 1048588. The max_messages_bytes specifies the largest record batch size + (in bytes) that can be sent to the server. This is calculated after + compression if compression is enabled. + "message_down_conversion_enable": True, # Optional. Default + value is True. The message_down_conversion_enable specifies whether + down-conversion of message formats is enabled to satisfy consumer + requests. When 'false', the broker will not perform conversion for + consumers expecting older message formats. The broker will respond with + an ``UNSUPPORTED_VERSION`` error for consume requests from these older + clients. + "message_format_version": "3.0-IV1", # Optional. Default + value is "3.0-IV1". The message_format_version specifies the message + format version used by the broker to append messages to the logs. The + value of this setting is assumed to be 3.0-IV1 if the broker protocol + version is 3.0 or higher. By setting a particular message format + version, all existing messages on disk must be smaller or equal to the + specified version. Known values are: "0.8.0", "0.8.1", "0.8.2", "0.9.0", + "0.10.0-IV0", "0.10.0-IV1", "0.10.1-IV0", "0.10.1-IV1", "0.10.1-IV2", + "0.10.2-IV0", "0.11.0-IV0", "0.11.0-IV1", "0.11.0-IV2", "1.0-IV0", + "1.1-IV0", "2.0-IV0", "2.0-IV1", "2.1-IV0", "2.1-IV1", "2.1-IV2", + "2.2-IV0", "2.2-IV1", "2.3-IV0", "2.3-IV1", "2.4-IV0", "2.4-IV1", + "2.5-IV0", "2.6-IV0", "2.7-IV0", "2.7-IV1", "2.7-IV2", "2.8-IV0", + "2.8-IV1", "3.0-IV0", "3.0-IV1", "3.1-IV0", "3.2-IV0", "3.3-IV0", + "3.3-IV1", "3.3-IV2", and "3.3-IV3". + "message_timestamp_type": "create_time", # Optional. Default + value is "create_time". The message_timestamp_type specifies whether to + use the message create time or log append time as the timestamp on a + message. Known values are: "create_time" and "log_append_time". + "min_cleanable_dirty_ratio": 0.5, # Optional. Default value + is 0.5. The min_cleanable_dirty_ratio specifies the frequency of log + compaction (if enabled) in relation to duplicates present in the logs. + For example, at 0.5, at most 50% of the log could be duplicates before + compaction would begin. + "min_compaction_lag_ms": 0, # Optional. Default value is 0. + The min_compaction_lag_ms specifies the minimum time (in ms) that a + message will remain uncompacted in the log. Only relevant if log + compaction is enabled. + "min_insync_replicas": 1, # Optional. Default value is 1. + The min_insync_replicas specifies the number of replicas that must ACK a + write for the write to be considered successful. + "preallocate": False, # Optional. Default value is False. + The preallocate specifies whether a file should be preallocated on disk + when creating a new log segment. + "retention_bytes": -1, # Optional. Default value is -1. The + retention_bytes specifies the maximum size of the log (in bytes) before + deleting messages. -1 indicates that there is no limit. + "retention_ms": 604800000, # Optional. Default value is + 604800000. The retention_ms specifies the maximum amount of time (in ms) + to keep a message before deleting it. + "segment_bytes": 209715200, # Optional. Default value is + 209715200. The segment_bytes specifies the maximum size of a single log + file (in bytes). + "segment_jitter_ms": 0, # Optional. Default value is 0. The + segment_jitter_ms specifies the maximum random jitter subtracted from the + scheduled segment roll time to avoid thundering herds of segment rolling. + "segment_ms": 604800000 # Optional. Default value is + 604800000. The segment_ms specifies the period of time after which the + log will be forced to roll if the segment file isn't full. This ensures + that retention can delete or compact old data. + }, + "name": "str", # Optional. The name of the Kafka topic. + "partitions": [ + { + "consumer_groups": [ + { + "group_name": "str", # Optional. + Name of the consumer group. + "offset": 0 # Optional. The current + offset of the consumer group. + } + ], + "earliest_offset": 0, # Optional. The earliest + consumer offset amongst consumer groups. + "id": 0, # Optional. An identifier for the + partition. + "in_sync_replicas": 0, # Optional. The number of + nodes that are in-sync (have the latest data) for the given + partition. + "size": 0 # Optional. Size of the topic partition in + bytes. + } + ], + "replication_factor": 0, # Optional. The number of nodes to + replicate data across the cluster. + "state": "str" # Optional. The state of the Kafka topic. Known + values are: "active", "configuring", "deleting", and "unknown". + } + } # response body for status code(s): 404 response == { "id": "str", # A short identifier corresponding to the HTTP status code @@ -129118,7 +138108,7 @@ def update_eviction_policy( content_type: Optional[str] = kwargs.pop( "content_type", _headers.pop("Content-Type", None) ) - cls: ClsType[Optional[JSON]] = kwargs.pop("cls", None) + cls: ClsType[JSON] = kwargs.pop("cls", None) content_type = content_type or "application/json" _json = None @@ -129126,10 +138116,14 @@ def update_eviction_policy( if isinstance(body, (IOBase, bytes)): _content = body else: - _json = body + if body is not None: + _json = body + else: + _json = None - _request = build_databases_update_eviction_policy_request( + _request = build_databases_update_kafka_topic_request( database_cluster_uuid=database_cluster_uuid, + topic_name=topic_name, content_type=content_type, json=_json, content=_content, @@ -129147,15 +138141,14 @@ def update_eviction_policy( response = pipeline_response.http_response - if response.status_code not in [204, 404]: + if response.status_code not in [200, 404]: if _stream: response.read() # Load the body in memory and close the socket map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore raise HttpResponseError(response=response) - deserialized = None response_headers = {} - if response.status_code == 204: + if response.status_code == 200: response_headers["ratelimit-limit"] = self._deserialize( "int", response.headers.get("ratelimit-limit") ) @@ -129166,6 +138159,11 @@ def update_eviction_policy( "int", response.headers.get("ratelimit-reset") ) + if response.content: + deserialized = response.json() + else: + deserialized = None + if response.status_code == 404: response_headers["ratelimit-limit"] = self._deserialize( "int", response.headers.get("ratelimit-limit") @@ -129183,34 +138181,34 @@ def update_eviction_policy( deserialized = None if cls: - return cls(pipeline_response, deserialized, response_headers) # type: ignore + return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore - return deserialized # type: ignore + return cast(JSON, deserialized) # type: ignore @distributed_trace - def get_sql_mode(self, database_cluster_uuid: str, **kwargs: Any) -> JSON: + def delete_kafka_topic( + self, database_cluster_uuid: str, topic_name: str, **kwargs: Any + ) -> Optional[JSON]: # pylint: disable=line-too-long - """Retrieve the SQL Modes for a MySQL Cluster. + """Delete Topic for a Kafka Cluster. - To retrieve the configured SQL modes for an existing MySQL cluster, send a GET request to - ``/v2/databases/$DATABASE_ID/sql_mode``. - The response will be a JSON object with a ``sql_mode`` key. This will be set to a string - representing the configured SQL modes. + To delete a single topic within a Kafka cluster, send a DELETE request + to ``/v2/databases/$DATABASE_ID/topics/$TOPIC_NAME``. + + A status of 204 will be given. This indicates that the request was + processed successfully, but that no response body is needed. :param database_cluster_uuid: A unique identifier for a database cluster. Required. :type database_cluster_uuid: str - :return: JSON object - :rtype: JSON + :param topic_name: The name used to identify the Kafka topic. Required. + :type topic_name: str + :return: JSON object or None + :rtype: JSON or None :raises ~azure.core.exceptions.HttpResponseError: Example: .. code-block:: python - # response body for status code(s): 200 - response == { - "sql_mode": "str" # A string specifying the configured SQL modes for the - MySQL cluster. Required. - } # response body for status code(s): 404 response == { "id": "str", # A short identifier corresponding to the HTTP status code @@ -129239,10 +138237,11 @@ def get_sql_mode(self, database_cluster_uuid: str, **kwargs: Any) -> JSON: _headers = kwargs.pop("headers", {}) or {} _params = kwargs.pop("params", {}) or {} - cls: ClsType[JSON] = kwargs.pop("cls", None) + cls: ClsType[Optional[JSON]] = kwargs.pop("cls", None) - _request = build_databases_get_sql_mode_request( + _request = build_databases_delete_kafka_topic_request( database_cluster_uuid=database_cluster_uuid, + topic_name=topic_name, headers=_headers, params=_params, ) @@ -129257,14 +138256,15 @@ def get_sql_mode(self, database_cluster_uuid: str, **kwargs: Any) -> JSON: response = pipeline_response.http_response - if response.status_code not in [200, 404]: + if response.status_code not in [204, 404]: if _stream: response.read() # Load the body in memory and close the socket map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore raise HttpResponseError(response=response) + deserialized = None response_headers = {} - if response.status_code == 200: + if response.status_code == 204: response_headers["ratelimit-limit"] = self._deserialize( "int", response.headers.get("ratelimit-limit") ) @@ -129275,11 +138275,6 @@ def get_sql_mode(self, database_cluster_uuid: str, **kwargs: Any) -> JSON: "int", response.headers.get("ratelimit-reset") ) - if response.content: - deserialized = response.json() - else: - deserialized = None - if response.status_code == 404: response_headers["ratelimit-limit"] = self._deserialize( "int", response.headers.get("ratelimit-limit") @@ -129297,136 +138292,40 @@ def get_sql_mode(self, database_cluster_uuid: str, **kwargs: Any) -> JSON: deserialized = None if cls: - return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore - - return cast(JSON, deserialized) # type: ignore - - @overload - def update_sql_mode( - self, - database_cluster_uuid: str, - body: JSON, - *, - content_type: str = "application/json", - **kwargs: Any, - ) -> Optional[JSON]: - # pylint: disable=line-too-long - """Update SQL Mode for a Cluster. - - To configure the SQL modes for an existing MySQL cluster, send a PUT request to - ``/v2/databases/$DATABASE_ID/sql_mode`` specifying the desired modes. See the official MySQL 8 - documentation for a `full list of supported SQL modes - `_. - A successful request will receive a 204 No Content status code with no body in response. - - :param database_cluster_uuid: A unique identifier for a database cluster. Required. - :type database_cluster_uuid: str - :param body: Required. - :type body: JSON - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: JSON object or None - :rtype: JSON or None - :raises ~azure.core.exceptions.HttpResponseError: - - Example: - .. code-block:: python - - # JSON input template you can fill out and use as your body input. - body = { - "sql_mode": "str" # A string specifying the configured SQL modes for the - MySQL cluster. Required. - } - - # response body for status code(s): 404 - response == { - "id": "str", # A short identifier corresponding to the HTTP status code - returned. For example, the ID for a response returning a 404 status code would - be "not_found.". Required. - "message": "str", # A message providing additional information about the - error, including details to help resolve it when possible. Required. - "request_id": "str" # Optional. Optionally, some endpoints may include a - request ID that should be provided when reporting bugs or opening support - tickets to help identify the issue. - } - """ - - @overload - def update_sql_mode( - self, - database_cluster_uuid: str, - body: IO[bytes], - *, - content_type: str = "application/json", - **kwargs: Any, - ) -> Optional[JSON]: - # pylint: disable=line-too-long - """Update SQL Mode for a Cluster. - - To configure the SQL modes for an existing MySQL cluster, send a PUT request to - ``/v2/databases/$DATABASE_ID/sql_mode`` specifying the desired modes. See the official MySQL 8 - documentation for a `full list of supported SQL modes - `_. - A successful request will receive a 204 No Content status code with no body in response. - - :param database_cluster_uuid: A unique identifier for a database cluster. Required. - :type database_cluster_uuid: str - :param body: Required. - :type body: IO[bytes] - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str - :return: JSON object or None - :rtype: JSON or None - :raises ~azure.core.exceptions.HttpResponseError: - - Example: - .. code-block:: python + return cls(pipeline_response, deserialized, response_headers) # type: ignore - # response body for status code(s): 404 - response == { - "id": "str", # A short identifier corresponding to the HTTP status code - returned. For example, the ID for a response returning a 404 status code would - be "not_found.". Required. - "message": "str", # A message providing additional information about the - error, including details to help resolve it when possible. Required. - "request_id": "str" # Optional. Optionally, some endpoints may include a - request ID that should be provided when reporting bugs or opening support - tickets to help identify the issue. - } - """ + return deserialized # type: ignore @distributed_trace - def update_sql_mode( - self, database_cluster_uuid: str, body: Union[JSON, IO[bytes]], **kwargs: Any - ) -> Optional[JSON]: + def list_logsink(self, database_cluster_uuid: str, **kwargs: Any) -> JSON: # pylint: disable=line-too-long - """Update SQL Mode for a Cluster. + """List Logsinks for a Database Cluster. - To configure the SQL modes for an existing MySQL cluster, send a PUT request to - ``/v2/databases/$DATABASE_ID/sql_mode`` specifying the desired modes. See the official MySQL 8 - documentation for a `full list of supported SQL modes - `_. - A successful request will receive a 204 No Content status code with no body in response. + To list logsinks for a database cluster, send a GET request to + ``/v2/databases/$DATABASE_ID/logsink``. :param database_cluster_uuid: A unique identifier for a database cluster. Required. :type database_cluster_uuid: str - :param body: Is either a JSON type or a IO[bytes] type. Required. - :type body: JSON or IO[bytes] - :return: JSON object or None - :rtype: JSON or None + :return: JSON object + :rtype: JSON :raises ~azure.core.exceptions.HttpResponseError: Example: .. code-block:: python - # JSON input template you can fill out and use as your body input. - body = { - "sql_mode": "str" # A string specifying the configured SQL modes for the - MySQL cluster. Required. + # response body for status code(s): 200 + response == { + "sinks": [ + { + "config": {}, + "sink_id": "str", # Optional. A unique identifier for + Logsink. + "sink_name": "str", # Optional. The name of the Logsink. + "sink_type": "str" # Optional. Known values are: "rsyslog", + "elasticsearch", and "opensearch". + } + ] } - # response body for status code(s): 404 response == { "id": "str", # A short identifier corresponding to the HTTP status code @@ -129452,27 +138351,13 @@ def update_sql_mode( } error_map.update(kwargs.pop("error_map", {}) or {}) - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _headers = kwargs.pop("headers", {}) or {} _params = kwargs.pop("params", {}) or {} - content_type: Optional[str] = kwargs.pop( - "content_type", _headers.pop("Content-Type", None) - ) - cls: ClsType[Optional[JSON]] = kwargs.pop("cls", None) - - content_type = content_type or "application/json" - _json = None - _content = None - if isinstance(body, (IOBase, bytes)): - _content = body - else: - _json = body + cls: ClsType[JSON] = kwargs.pop("cls", None) - _request = build_databases_update_sql_mode_request( + _request = build_databases_list_logsink_request( database_cluster_uuid=database_cluster_uuid, - content_type=content_type, - json=_json, - content=_content, headers=_headers, params=_params, ) @@ -129487,15 +138372,14 @@ def update_sql_mode( response = pipeline_response.http_response - if response.status_code not in [204, 404]: + if response.status_code not in [200, 404]: if _stream: response.read() # Load the body in memory and close the socket map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore raise HttpResponseError(response=response) - deserialized = None response_headers = {} - if response.status_code == 204: + if response.status_code == 200: response_headers["ratelimit-limit"] = self._deserialize( "int", response.headers.get("ratelimit-limit") ) @@ -129506,6 +138390,11 @@ def update_sql_mode( "int", response.headers.get("ratelimit-reset") ) + if response.content: + deserialized = response.json() + else: + deserialized = None + if response.status_code == 404: response_headers["ratelimit-limit"] = self._deserialize( "int", response.headers.get("ratelimit-limit") @@ -129523,25 +138412,24 @@ def update_sql_mode( deserialized = None if cls: - return cls(pipeline_response, deserialized, response_headers) # type: ignore + return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore - return deserialized # type: ignore + return cast(JSON, deserialized) # type: ignore @overload - def update_major_version( + def create_logsink( self, database_cluster_uuid: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any, - ) -> Optional[JSON]: + ) -> JSON: # pylint: disable=line-too-long - """Upgrade Major Version for a Database. + """Create Logsink for a Database Cluster. - To upgrade the major version of a database, send a PUT request to - ``/v2/databases/$DATABASE_ID/upgrade``\\ , specifying the target version. - A successful request will receive a 204 No Content status code with no body in response. + To create logsink for a database cluster, send a POST request to + ``/v2/databases/$DATABASE_ID/logsink``. :param database_cluster_uuid: A unique identifier for a database cluster. Required. :type database_cluster_uuid: str @@ -129550,8 +138438,8 @@ def update_major_version( :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str - :return: JSON object or None - :rtype: JSON or None + :return: JSON object + :rtype: JSON :raises ~azure.core.exceptions.HttpResponseError: Example: @@ -129559,10 +138447,26 @@ def update_major_version( # JSON input template you can fill out and use as your body input. body = { - "version": "str" # Optional. A string representing the version of the - database engine in use for the cluster. + "config": {}, + "sink_name": "str", # Optional. The name of the Logsink. + "sink_type": "str" # Optional. Type of logsink integration. * Use + ``datadog`` for Datadog integration **only with MongoDB clusters**. * For + non-MongoDB clusters, use ``rsyslog`` for general syslog forwarding. * Other + supported types include ``elasticsearch`` and ``opensearch``. More details about + the configuration can be found in the ``config`` property. Known values are: + "rsyslog", "elasticsearch", "opensearch", and "datadog". } + # response body for status code(s): 201 + response == { + "sink": { + "config": {}, + "sink_id": "str", # Optional. A unique identifier for Logsink. + "sink_name": "str", # Optional. The name of the Logsink. + "sink_type": "str" # Optional. Known values are: "rsyslog", + "elasticsearch", and "opensearch". + } + } # response body for status code(s): 404 response == { "id": "str", # A short identifier corresponding to the HTTP status code @@ -129577,20 +138481,19 @@ def update_major_version( """ @overload - def update_major_version( + def create_logsink( self, database_cluster_uuid: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any, - ) -> Optional[JSON]: + ) -> JSON: # pylint: disable=line-too-long - """Upgrade Major Version for a Database. + """Create Logsink for a Database Cluster. - To upgrade the major version of a database, send a PUT request to - ``/v2/databases/$DATABASE_ID/upgrade``\\ , specifying the target version. - A successful request will receive a 204 No Content status code with no body in response. + To create logsink for a database cluster, send a POST request to + ``/v2/databases/$DATABASE_ID/logsink``. :param database_cluster_uuid: A unique identifier for a database cluster. Required. :type database_cluster_uuid: str @@ -129599,13 +138502,23 @@ def update_major_version( :keyword content_type: Body Parameter content-type. Content type parameter for binary body. Default value is "application/json". :paramtype content_type: str - :return: JSON object or None - :rtype: JSON or None + :return: JSON object + :rtype: JSON :raises ~azure.core.exceptions.HttpResponseError: Example: .. code-block:: python + # response body for status code(s): 201 + response == { + "sink": { + "config": {}, + "sink_id": "str", # Optional. A unique identifier for Logsink. + "sink_name": "str", # Optional. The name of the Logsink. + "sink_type": "str" # Optional. Known values are: "rsyslog", + "elasticsearch", and "opensearch". + } + } # response body for status code(s): 404 response == { "id": "str", # A short identifier corresponding to the HTTP status code @@ -129620,22 +138533,21 @@ def update_major_version( """ @distributed_trace - def update_major_version( + def create_logsink( self, database_cluster_uuid: str, body: Union[JSON, IO[bytes]], **kwargs: Any - ) -> Optional[JSON]: + ) -> JSON: # pylint: disable=line-too-long - """Upgrade Major Version for a Database. + """Create Logsink for a Database Cluster. - To upgrade the major version of a database, send a PUT request to - ``/v2/databases/$DATABASE_ID/upgrade``\\ , specifying the target version. - A successful request will receive a 204 No Content status code with no body in response. + To create logsink for a database cluster, send a POST request to + ``/v2/databases/$DATABASE_ID/logsink``. :param database_cluster_uuid: A unique identifier for a database cluster. Required. :type database_cluster_uuid: str :param body: Is either a JSON type or a IO[bytes] type. Required. :type body: JSON or IO[bytes] - :return: JSON object or None - :rtype: JSON or None + :return: JSON object + :rtype: JSON :raises ~azure.core.exceptions.HttpResponseError: Example: @@ -129643,10 +138555,26 @@ def update_major_version( # JSON input template you can fill out and use as your body input. body = { - "version": "str" # Optional. A string representing the version of the - database engine in use for the cluster. + "config": {}, + "sink_name": "str", # Optional. The name of the Logsink. + "sink_type": "str" # Optional. Type of logsink integration. * Use + ``datadog`` for Datadog integration **only with MongoDB clusters**. * For + non-MongoDB clusters, use ``rsyslog`` for general syslog forwarding. * Other + supported types include ``elasticsearch`` and ``opensearch``. More details about + the configuration can be found in the ``config`` property. Known values are: + "rsyslog", "elasticsearch", "opensearch", and "datadog". } + # response body for status code(s): 201 + response == { + "sink": { + "config": {}, + "sink_id": "str", # Optional. A unique identifier for Logsink. + "sink_name": "str", # Optional. The name of the Logsink. + "sink_type": "str" # Optional. Known values are: "rsyslog", + "elasticsearch", and "opensearch". + } + } # response body for status code(s): 404 response == { "id": "str", # A short identifier corresponding to the HTTP status code @@ -129678,7 +138606,7 @@ def update_major_version( content_type: Optional[str] = kwargs.pop( "content_type", _headers.pop("Content-Type", None) ) - cls: ClsType[Optional[JSON]] = kwargs.pop("cls", None) + cls: ClsType[JSON] = kwargs.pop("cls", None) content_type = content_type or "application/json" _json = None @@ -129688,7 +138616,7 @@ def update_major_version( else: _json = body - _request = build_databases_update_major_version_request( + _request = build_databases_create_logsink_request( database_cluster_uuid=database_cluster_uuid, content_type=content_type, json=_json, @@ -129707,15 +138635,14 @@ def update_major_version( response = pipeline_response.http_response - if response.status_code not in [204, 404]: + if response.status_code not in [201, 404]: if _stream: response.read() # Load the body in memory and close the socket map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore raise HttpResponseError(response=response) - deserialized = None response_headers = {} - if response.status_code == 204: + if response.status_code == 201: response_headers["ratelimit-limit"] = self._deserialize( "int", response.headers.get("ratelimit-limit") ) @@ -129726,6 +138653,11 @@ def update_major_version( "int", response.headers.get("ratelimit-reset") ) + if response.content: + deserialized = response.json() + else: + deserialized = None + if response.status_code == 404: response_headers["ratelimit-limit"] = self._deserialize( "int", response.headers.get("ratelimit-limit") @@ -129743,21 +138675,24 @@ def update_major_version( deserialized = None if cls: - return cls(pipeline_response, deserialized, response_headers) # type: ignore + return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore - return deserialized # type: ignore + return cast(JSON, deserialized) # type: ignore @distributed_trace - def get_autoscale(self, database_cluster_uuid: str, **kwargs: Any) -> JSON: + def get_logsink( + self, database_cluster_uuid: str, logsink_id: str, **kwargs: Any + ) -> JSON: # pylint: disable=line-too-long - """Retrieve Autoscale Configuration for a Database Cluster. + """Get Logsink for a Database Cluster. - To retrieve the autoscale configuration for an existing database cluster, send a GET request to - ``/v2/databases/$DATABASE_ID/autoscale``. - The response will be a JSON object with autoscaling configuration details. + To get a logsink for a database cluster, send a GET request to + ``/v2/databases/$DATABASE_ID/logsink/$LOGSINK_ID``. :param database_cluster_uuid: A unique identifier for a database cluster. Required. :type database_cluster_uuid: str + :param logsink_id: A unique identifier for a logsink of a database cluster. Required. + :type logsink_id: str :return: JSON object :rtype: JSON :raises ~azure.core.exceptions.HttpResponseError: @@ -129767,17 +138702,11 @@ def get_autoscale(self, database_cluster_uuid: str, **kwargs: Any) -> JSON: # response body for status code(s): 200 response == { - "autoscale": { - "storage": { - "enabled": bool, # Whether storage autoscaling is enabled - for the cluster. Required. - "increment_gib": 0, # Optional. The amount of additional - storage to add (in GiB) when autoscaling is triggered. - "threshold_percent": 0 # Optional. The storage usage - threshold percentage that triggers autoscaling. When storage usage - exceeds this percentage, additional storage will be added automatically. - } - } + "config": {}, + "sink_id": "str", # Optional. A unique identifier for Logsink. + "sink_name": "str", # Optional. The name of the Logsink. + "sink_type": "str" # Optional. Known values are: "rsyslog", "elasticsearch", + and "opensearch". } # response body for status code(s): 404 response == { @@ -129809,8 +138738,9 @@ def get_autoscale(self, database_cluster_uuid: str, **kwargs: Any) -> JSON: cls: ClsType[JSON] = kwargs.pop("cls", None) - _request = build_databases_get_autoscale_request( + _request = build_databases_get_logsink_request( database_cluster_uuid=database_cluster_uuid, + logsink_id=logsink_id, headers=_headers, params=_params, ) @@ -129870,23 +138800,25 @@ def get_autoscale(self, database_cluster_uuid: str, **kwargs: Any) -> JSON: return cast(JSON, deserialized) # type: ignore @overload - def update_autoscale( + def update_logsink( self, database_cluster_uuid: str, + logsink_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any, ) -> Optional[JSON]: # pylint: disable=line-too-long - """Configure Autoscale Settings for a Database Cluster. + """Update Logsink for a Database Cluster. - To configure autoscale settings for an existing database cluster, send a PUT request to - ``/v2/databases/$DATABASE_ID/autoscale``\\ , specifying the autoscale configuration. - A successful request will receive a 204 No Content status code with no body in response. + To update a logsink for a database cluster, send a PUT request to + ``/v2/databases/$DATABASE_ID/logsink/$LOGSINK_ID``. :param database_cluster_uuid: A unique identifier for a database cluster. Required. :type database_cluster_uuid: str + :param logsink_id: A unique identifier for a logsink of a database cluster. Required. + :type logsink_id: str :param body: Required. :type body: JSON :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. @@ -129901,18 +138833,10 @@ def update_autoscale( # JSON input template you can fill out and use as your body input. body = { - "storage": { - "enabled": bool, # Whether storage autoscaling is enabled for the - cluster. Required. - "increment_gib": 0, # Optional. The amount of additional storage to - add (in GiB) when autoscaling is triggered. - "threshold_percent": 0 # Optional. The storage usage threshold - percentage that triggers autoscaling. When storage usage exceeds this - percentage, additional storage will be added automatically. - } + "config": {} } - # response body for status code(s): 404, 422 + # response body for status code(s): 404 response == { "id": "str", # A short identifier corresponding to the HTTP status code returned. For example, the ID for a response returning a 404 status code would @@ -129926,23 +138850,25 @@ def update_autoscale( """ @overload - def update_autoscale( + def update_logsink( self, database_cluster_uuid: str, + logsink_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any, ) -> Optional[JSON]: # pylint: disable=line-too-long - """Configure Autoscale Settings for a Database Cluster. + """Update Logsink for a Database Cluster. - To configure autoscale settings for an existing database cluster, send a PUT request to - ``/v2/databases/$DATABASE_ID/autoscale``\\ , specifying the autoscale configuration. - A successful request will receive a 204 No Content status code with no body in response. + To update a logsink for a database cluster, send a PUT request to + ``/v2/databases/$DATABASE_ID/logsink/$LOGSINK_ID``. :param database_cluster_uuid: A unique identifier for a database cluster. Required. :type database_cluster_uuid: str + :param logsink_id: A unique identifier for a logsink of a database cluster. Required. + :type logsink_id: str :param body: Required. :type body: IO[bytes] :keyword content_type: Body Parameter content-type. Content type parameter for binary body. @@ -129955,7 +138881,7 @@ def update_autoscale( Example: .. code-block:: python - # response body for status code(s): 404, 422 + # response body for status code(s): 404 response == { "id": "str", # A short identifier corresponding to the HTTP status code returned. For example, the ID for a response returning a 404 status code would @@ -129969,18 +138895,23 @@ def update_autoscale( """ @distributed_trace - def update_autoscale( - self, database_cluster_uuid: str, body: Union[JSON, IO[bytes]], **kwargs: Any + def update_logsink( + self, + database_cluster_uuid: str, + logsink_id: str, + body: Union[JSON, IO[bytes]], + **kwargs: Any, ) -> Optional[JSON]: # pylint: disable=line-too-long - """Configure Autoscale Settings for a Database Cluster. + """Update Logsink for a Database Cluster. - To configure autoscale settings for an existing database cluster, send a PUT request to - ``/v2/databases/$DATABASE_ID/autoscale``\\ , specifying the autoscale configuration. - A successful request will receive a 204 No Content status code with no body in response. + To update a logsink for a database cluster, send a PUT request to + ``/v2/databases/$DATABASE_ID/logsink/$LOGSINK_ID``. :param database_cluster_uuid: A unique identifier for a database cluster. Required. :type database_cluster_uuid: str + :param logsink_id: A unique identifier for a logsink of a database cluster. Required. + :type logsink_id: str :param body: Is either a JSON type or a IO[bytes] type. Required. :type body: JSON or IO[bytes] :return: JSON object or None @@ -129992,18 +138923,10 @@ def update_autoscale( # JSON input template you can fill out and use as your body input. body = { - "storage": { - "enabled": bool, # Whether storage autoscaling is enabled for the - cluster. Required. - "increment_gib": 0, # Optional. The amount of additional storage to - add (in GiB) when autoscaling is triggered. - "threshold_percent": 0 # Optional. The storage usage threshold - percentage that triggers autoscaling. When storage usage exceeds this - percentage, additional storage will be added automatically. - } + "config": {} } - # response body for status code(s): 404, 422 + # response body for status code(s): 404 response == { "id": "str", # A short identifier corresponding to the HTTP status code returned. For example, the ID for a response returning a 404 status code would @@ -130044,8 +138967,9 @@ def update_autoscale( else: _json = body - _request = build_databases_update_autoscale_request( + _request = build_databases_update_logsink_request( database_cluster_uuid=database_cluster_uuid, + logsink_id=logsink_id, content_type=content_type, json=_json, content=_content, @@ -130063,7 +138987,7 @@ def update_autoscale( response = pipeline_response.http_response - if response.status_code not in [204, 404, 422]: + if response.status_code not in [200, 404]: if _stream: response.read() # Load the body in memory and close the socket map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore @@ -130071,7 +138995,7 @@ def update_autoscale( deserialized = None response_headers = {} - if response.status_code == 204: + if response.status_code == 200: response_headers["ratelimit-limit"] = self._deserialize( "int", response.headers.get("ratelimit-limit") ) @@ -130098,7 +139022,99 @@ def update_autoscale( else: deserialized = None - if response.status_code == 422: + if cls: + return cls(pipeline_response, deserialized, response_headers) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + def delete_logsink( + self, database_cluster_uuid: str, logsink_id: str, **kwargs: Any + ) -> Optional[JSON]: + # pylint: disable=line-too-long + """Delete Logsink for a Database Cluster. + + To delete a logsink for a database cluster, send a DELETE request to + ``/v2/databases/$DATABASE_ID/logsink/$LOGSINK_ID``. + + :param database_cluster_uuid: A unique identifier for a database cluster. Required. + :type database_cluster_uuid: str + :param logsink_id: A unique identifier for a logsink of a database cluster. Required. + :type logsink_id: str + :return: JSON object or None + :rtype: JSON or None + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 404 + response == { + "id": "str", # A short identifier corresponding to the HTTP status code + returned. For example, the ID for a response returning a 404 status code would + be "not_found.". Required. + "message": "str", # A message providing additional information about the + error, including details to help resolve it when possible. Required. + "request_id": "str" # Optional. Optionally, some endpoints may include a + request ID that should be provided when reporting bugs or opening support + tickets to help identify the issue. + } + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + 401: cast( + Type[HttpResponseError], + lambda response: ClientAuthenticationError(response=response), + ), + 429: HttpResponseError, + 500: HttpResponseError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[Optional[JSON]] = kwargs.pop("cls", None) + + _request = build_databases_delete_logsink_request( + database_cluster_uuid=database_cluster_uuid, + logsink_id=logsink_id, + headers=_headers, + params=_params, + ) + _request.url = self._client.format_url(_request.url) + + _stream = False + pipeline_response: PipelineResponse = ( + self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + ) + + response = pipeline_response.http_response + + if response.status_code not in [200, 404]: + if _stream: + response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore + raise HttpResponseError(response=response) + + deserialized = None + response_headers = {} + if response.status_code == 200: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.status_code == 404: response_headers["ratelimit-limit"] = self._deserialize( "int", response.headers.get("ratelimit-limit") ) @@ -130120,14 +139136,12 @@ def update_autoscale( return deserialized # type: ignore @distributed_trace - def list_kafka_topics(self, database_cluster_uuid: str, **kwargs: Any) -> JSON: + def list_kafka_schemas(self, database_cluster_uuid: str, **kwargs: Any) -> JSON: # pylint: disable=line-too-long - """List Topics for a Kafka Cluster. - - To list all of a Kafka cluster's topics, send a GET request to - ``/v2/databases/$DATABASE_ID/topics``. + """List Schemas for Kafka Cluster. - The result will be a JSON object with a ``topics`` key. + To list all schemas for a Kafka cluster, send a GET request to + ``/v2/databases/$DATABASE_ID/schema-registry``. :param database_cluster_uuid: A unique identifier for a database cluster. Required. :type database_cluster_uuid: str @@ -130140,15 +139154,15 @@ def list_kafka_topics(self, database_cluster_uuid: str, **kwargs: Any) -> JSON: # response body for status code(s): 200 response == { - "topics": [ + "subjects": [ { - "name": "str", # Optional. The name of the Kafka topic. - "partition_count": 0, # Optional. The number of partitions - available for the topic. On update, this value can only be increased. - "replication_factor": 0, # Optional. The number of nodes to - replicate data across the cluster. - "state": "str" # Optional. The state of the Kafka topic. - Known values are: "active", "configuring", "deleting", and "unknown". + "schema": "str", # Optional. The schema definition in the + specified format. + "schema_id": 0, # Optional. The id for schema. + "schema_type": "str", # Optional. The type of the schema. + Known values are: "AVRO", "JSON", and "PROTOBUF". + "subject_name": "str" # Optional. The name of the schema + subject. } ] } @@ -130182,7 +139196,7 @@ def list_kafka_topics(self, database_cluster_uuid: str, **kwargs: Any) -> JSON: cls: ClsType[JSON] = kwargs.pop("cls", None) - _request = build_databases_list_kafka_topics_request( + _request = build_databases_list_kafka_schemas_request( database_cluster_uuid=database_cluster_uuid, headers=_headers, params=_params, @@ -130243,25 +139257,23 @@ def list_kafka_topics(self, database_cluster_uuid: str, **kwargs: Any) -> JSON: return cast(JSON, deserialized) # type: ignore @overload - def create_kafka_topic( + def create_kafka_schema( self, database_cluster_uuid: str, - body: Optional[JSON] = None, + body: JSON, *, content_type: str = "application/json", **kwargs: Any, ) -> JSON: # pylint: disable=line-too-long - """Create Topic for a Kafka Cluster. - - To create a topic attached to a Kafka cluster, send a POST request to - ``/v2/databases/$DATABASE_ID/topics``. + """Create Schema Registry for Kafka Cluster. - The result will be a JSON object with a ``topic`` key. + To create a Kafka schema for a database cluster, send a POST request to + ``/v2/databases/$DATABASE_ID/schema-registry``. :param database_cluster_uuid: A unique identifier for a database cluster. Required. :type database_cluster_uuid: str - :param body: Default value is None. + :param body: Required. :type body: JSON :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". @@ -130275,222 +139287,19 @@ def create_kafka_topic( # JSON input template you can fill out and use as your body input. body = { - "config": { - "cleanup_policy": "delete", # Optional. Default value is "delete". - The cleanup_policy sets the retention policy to use on log segments. 'delete' - will discard old segments when retention time/size limits are reached. - 'compact' will enable log compaction, resulting in retention of the latest - value for each key. Known values are: "delete", "compact", and - "compact_delete". - "compression_type": "producer", # Optional. Default value is - "producer". The compression_type specifies the compression type of the topic. - Known values are: "producer", "gzip", "snappy", "Iz4", "zstd", and - "uncompressed". - "delete_retention_ms": 86400000, # Optional. Default value is - 86400000. The delete_retention_ms specifies how long (in ms) to retain delete - tombstone markers for topics. - "file_delete_delay_ms": 60000, # Optional. Default value is 60000. - The file_delete_delay_ms specifies the time (in ms) to wait before deleting a - file from the filesystem. - "flush_messages": 9223372036854776000, # Optional. Default value is - 9223372036854776000. The flush_messages specifies the number of messages to - accumulate on a log partition before messages are flushed to disk. - "flush_ms": 9223372036854776000, # Optional. Default value is - 9223372036854776000. The flush_ms specifies the maximum time (in ms) that a - message is kept in memory before being flushed to disk. - "index_interval_bytes": 4096, # Optional. Default value is 4096. The - index_interval_bytes specifies the number of bytes between entries being - added into te offset index. - "max_compaction_lag_ms": 9223372036854776000, # Optional. Default - value is 9223372036854776000. The max_compaction_lag_ms specifies the maximum - amount of time (in ms) that a message will remain uncompacted. This is only - applicable if the logs are have compaction enabled. - "max_message_bytes": 1048588, # Optional. Default value is 1048588. - The max_messages_bytes specifies the largest record batch size (in bytes) - that can be sent to the server. This is calculated after compression if - compression is enabled. - "message_down_conversion_enable": True, # Optional. Default value is - True. The message_down_conversion_enable specifies whether down-conversion of - message formats is enabled to satisfy consumer requests. When 'false', the - broker will not perform conversion for consumers expecting older message - formats. The broker will respond with an ``UNSUPPORTED_VERSION`` error for - consume requests from these older clients. - "message_format_version": "3.0-IV1", # Optional. Default value is - "3.0-IV1". The message_format_version specifies the message format version - used by the broker to append messages to the logs. The value of this setting - is assumed to be 3.0-IV1 if the broker protocol version is 3.0 or higher. By - setting a particular message format version, all existing messages on disk - must be smaller or equal to the specified version. Known values are: "0.8.0", - "0.8.1", "0.8.2", "0.9.0", "0.10.0-IV0", "0.10.0-IV1", "0.10.1-IV0", - "0.10.1-IV1", "0.10.1-IV2", "0.10.2-IV0", "0.11.0-IV0", "0.11.0-IV1", - "0.11.0-IV2", "1.0-IV0", "1.1-IV0", "2.0-IV0", "2.0-IV1", "2.1-IV0", - "2.1-IV1", "2.1-IV2", "2.2-IV0", "2.2-IV1", "2.3-IV0", "2.3-IV1", "2.4-IV0", - "2.4-IV1", "2.5-IV0", "2.6-IV0", "2.7-IV0", "2.7-IV1", "2.7-IV2", "2.8-IV0", - "2.8-IV1", "3.0-IV0", "3.0-IV1", "3.1-IV0", "3.2-IV0", "3.3-IV0", "3.3-IV1", - "3.3-IV2", and "3.3-IV3". - "message_timestamp_type": "create_time", # Optional. Default value - is "create_time". The message_timestamp_type specifies whether to use the - message create time or log append time as the timestamp on a message. Known - values are: "create_time" and "log_append_time". - "min_cleanable_dirty_ratio": 0.5, # Optional. Default value is 0.5. - The min_cleanable_dirty_ratio specifies the frequency of log compaction (if - enabled) in relation to duplicates present in the logs. For example, at 0.5, - at most 50% of the log could be duplicates before compaction would begin. - "min_compaction_lag_ms": 0, # Optional. Default value is 0. The - min_compaction_lag_ms specifies the minimum time (in ms) that a message will - remain uncompacted in the log. Only relevant if log compaction is enabled. - "min_insync_replicas": 1, # Optional. Default value is 1. The - min_insync_replicas specifies the number of replicas that must ACK a write - for the write to be considered successful. - "preallocate": False, # Optional. Default value is False. The - preallocate specifies whether a file should be preallocated on disk when - creating a new log segment. - "retention_bytes": -1, # Optional. Default value is -1. The - retention_bytes specifies the maximum size of the log (in bytes) before - deleting messages. -1 indicates that there is no limit. - "retention_ms": 604800000, # Optional. Default value is 604800000. - The retention_ms specifies the maximum amount of time (in ms) to keep a - message before deleting it. - "segment_bytes": 209715200, # Optional. Default value is 209715200. - The segment_bytes specifies the maximum size of a single log file (in bytes). - "segment_jitter_ms": 0, # Optional. Default value is 0. The - segment_jitter_ms specifies the maximum random jitter subtracted from the - scheduled segment roll time to avoid thundering herds of segment rolling. - "segment_ms": 604800000 # Optional. Default value is 604800000. The - segment_ms specifies the period of time after which the log will be forced to - roll if the segment file isn't full. This ensures that retention can delete - or compact old data. - }, - "name": "str", # Optional. The name of the Kafka topic. - "partition_count": 0, # Optional. The number of partitions available for the - topic. On update, this value can only be increased. - "replication_factor": 0 # Optional. The number of nodes to replicate data - across the cluster. + "schema": "str", # Optional. The schema definition in the specified format. + "schema_type": "str", # Optional. The type of the schema. Known values are: + "AVRO", "JSON", and "PROTOBUF". + "subject_name": "str" # Optional. The name of the schema subject. } # response body for status code(s): 201 response == { - "topic": { - "config": { - "cleanup_policy": "delete", # Optional. Default value is - "delete". The cleanup_policy sets the retention policy to use on log - segments. 'delete' will discard old segments when retention time/size - limits are reached. 'compact' will enable log compaction, resulting in - retention of the latest value for each key. Known values are: "delete", - "compact", and "compact_delete". - "compression_type": "producer", # Optional. Default value is - "producer". The compression_type specifies the compression type of the - topic. Known values are: "producer", "gzip", "snappy", "Iz4", "zstd", and - "uncompressed". - "delete_retention_ms": 86400000, # Optional. Default value - is 86400000. The delete_retention_ms specifies how long (in ms) to retain - delete tombstone markers for topics. - "file_delete_delay_ms": 60000, # Optional. Default value is - 60000. The file_delete_delay_ms specifies the time (in ms) to wait before - deleting a file from the filesystem. - "flush_messages": 9223372036854776000, # Optional. Default - value is 9223372036854776000. The flush_messages specifies the number of - messages to accumulate on a log partition before messages are flushed to - disk. - "flush_ms": 9223372036854776000, # Optional. Default value - is 9223372036854776000. The flush_ms specifies the maximum time (in ms) - that a message is kept in memory before being flushed to disk. - "index_interval_bytes": 4096, # Optional. Default value is - 4096. The index_interval_bytes specifies the number of bytes between - entries being added into te offset index. - "max_compaction_lag_ms": 9223372036854776000, # Optional. - Default value is 9223372036854776000. The max_compaction_lag_ms specifies - the maximum amount of time (in ms) that a message will remain - uncompacted. This is only applicable if the logs are have compaction - enabled. - "max_message_bytes": 1048588, # Optional. Default value is - 1048588. The max_messages_bytes specifies the largest record batch size - (in bytes) that can be sent to the server. This is calculated after - compression if compression is enabled. - "message_down_conversion_enable": True, # Optional. Default - value is True. The message_down_conversion_enable specifies whether - down-conversion of message formats is enabled to satisfy consumer - requests. When 'false', the broker will not perform conversion for - consumers expecting older message formats. The broker will respond with - an ``UNSUPPORTED_VERSION`` error for consume requests from these older - clients. - "message_format_version": "3.0-IV1", # Optional. Default - value is "3.0-IV1". The message_format_version specifies the message - format version used by the broker to append messages to the logs. The - value of this setting is assumed to be 3.0-IV1 if the broker protocol - version is 3.0 or higher. By setting a particular message format - version, all existing messages on disk must be smaller or equal to the - specified version. Known values are: "0.8.0", "0.8.1", "0.8.2", "0.9.0", - "0.10.0-IV0", "0.10.0-IV1", "0.10.1-IV0", "0.10.1-IV1", "0.10.1-IV2", - "0.10.2-IV0", "0.11.0-IV0", "0.11.0-IV1", "0.11.0-IV2", "1.0-IV0", - "1.1-IV0", "2.0-IV0", "2.0-IV1", "2.1-IV0", "2.1-IV1", "2.1-IV2", - "2.2-IV0", "2.2-IV1", "2.3-IV0", "2.3-IV1", "2.4-IV0", "2.4-IV1", - "2.5-IV0", "2.6-IV0", "2.7-IV0", "2.7-IV1", "2.7-IV2", "2.8-IV0", - "2.8-IV1", "3.0-IV0", "3.0-IV1", "3.1-IV0", "3.2-IV0", "3.3-IV0", - "3.3-IV1", "3.3-IV2", and "3.3-IV3". - "message_timestamp_type": "create_time", # Optional. Default - value is "create_time". The message_timestamp_type specifies whether to - use the message create time or log append time as the timestamp on a - message. Known values are: "create_time" and "log_append_time". - "min_cleanable_dirty_ratio": 0.5, # Optional. Default value - is 0.5. The min_cleanable_dirty_ratio specifies the frequency of log - compaction (if enabled) in relation to duplicates present in the logs. - For example, at 0.5, at most 50% of the log could be duplicates before - compaction would begin. - "min_compaction_lag_ms": 0, # Optional. Default value is 0. - The min_compaction_lag_ms specifies the minimum time (in ms) that a - message will remain uncompacted in the log. Only relevant if log - compaction is enabled. - "min_insync_replicas": 1, # Optional. Default value is 1. - The min_insync_replicas specifies the number of replicas that must ACK a - write for the write to be considered successful. - "preallocate": False, # Optional. Default value is False. - The preallocate specifies whether a file should be preallocated on disk - when creating a new log segment. - "retention_bytes": -1, # Optional. Default value is -1. The - retention_bytes specifies the maximum size of the log (in bytes) before - deleting messages. -1 indicates that there is no limit. - "retention_ms": 604800000, # Optional. Default value is - 604800000. The retention_ms specifies the maximum amount of time (in ms) - to keep a message before deleting it. - "segment_bytes": 209715200, # Optional. Default value is - 209715200. The segment_bytes specifies the maximum size of a single log - file (in bytes). - "segment_jitter_ms": 0, # Optional. Default value is 0. The - segment_jitter_ms specifies the maximum random jitter subtracted from the - scheduled segment roll time to avoid thundering herds of segment rolling. - "segment_ms": 604800000 # Optional. Default value is - 604800000. The segment_ms specifies the period of time after which the - log will be forced to roll if the segment file isn't full. This ensures - that retention can delete or compact old data. - }, - "name": "str", # Optional. The name of the Kafka topic. - "partitions": [ - { - "consumer_groups": [ - { - "group_name": "str", # Optional. - Name of the consumer group. - "offset": 0 # Optional. The current - offset of the consumer group. - } - ], - "earliest_offset": 0, # Optional. The earliest - consumer offset amongst consumer groups. - "id": 0, # Optional. An identifier for the - partition. - "in_sync_replicas": 0, # Optional. The number of - nodes that are in-sync (have the latest data) for the given - partition. - "size": 0 # Optional. Size of the topic partition in - bytes. - } - ], - "replication_factor": 0, # Optional. The number of nodes to - replicate data across the cluster. - "state": "str" # Optional. The state of the Kafka topic. Known - values are: "active", "configuring", "deleting", and "unknown". - } + "schema": "str", # Optional. The schema definition in the specified format. + "schema_id": 0, # Optional. The id for schema. + "schema_type": "str", # Optional. The type of the schema. Known values are: + "AVRO", "JSON", and "PROTOBUF". + "subject_name": "str" # Optional. The name of the schema subject. } # response body for status code(s): 404 response == { @@ -130506,417 +139315,91 @@ def create_kafka_topic( """ @overload - def create_kafka_topic( + def create_kafka_schema( self, database_cluster_uuid: str, - body: Optional[IO[bytes]] = None, + body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any, ) -> JSON: # pylint: disable=line-too-long - """Create Topic for a Kafka Cluster. - - To create a topic attached to a Kafka cluster, send a POST request to - ``/v2/databases/$DATABASE_ID/topics``. + """Create Schema Registry for Kafka Cluster. - The result will be a JSON object with a ``topic`` key. + To create a Kafka schema for a database cluster, send a POST request to + ``/v2/databases/$DATABASE_ID/schema-registry``. :param database_cluster_uuid: A unique identifier for a database cluster. Required. :type database_cluster_uuid: str - :param body: Default value is None. + :param body: Required. :type body: IO[bytes] :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str - :return: JSON object - :rtype: JSON - :raises ~azure.core.exceptions.HttpResponseError: - - Example: - .. code-block:: python - - # response body for status code(s): 201 - response == { - "topic": { - "config": { - "cleanup_policy": "delete", # Optional. Default value is - "delete". The cleanup_policy sets the retention policy to use on log - segments. 'delete' will discard old segments when retention time/size - limits are reached. 'compact' will enable log compaction, resulting in - retention of the latest value for each key. Known values are: "delete", - "compact", and "compact_delete". - "compression_type": "producer", # Optional. Default value is - "producer". The compression_type specifies the compression type of the - topic. Known values are: "producer", "gzip", "snappy", "Iz4", "zstd", and - "uncompressed". - "delete_retention_ms": 86400000, # Optional. Default value - is 86400000. The delete_retention_ms specifies how long (in ms) to retain - delete tombstone markers for topics. - "file_delete_delay_ms": 60000, # Optional. Default value is - 60000. The file_delete_delay_ms specifies the time (in ms) to wait before - deleting a file from the filesystem. - "flush_messages": 9223372036854776000, # Optional. Default - value is 9223372036854776000. The flush_messages specifies the number of - messages to accumulate on a log partition before messages are flushed to - disk. - "flush_ms": 9223372036854776000, # Optional. Default value - is 9223372036854776000. The flush_ms specifies the maximum time (in ms) - that a message is kept in memory before being flushed to disk. - "index_interval_bytes": 4096, # Optional. Default value is - 4096. The index_interval_bytes specifies the number of bytes between - entries being added into te offset index. - "max_compaction_lag_ms": 9223372036854776000, # Optional. - Default value is 9223372036854776000. The max_compaction_lag_ms specifies - the maximum amount of time (in ms) that a message will remain - uncompacted. This is only applicable if the logs are have compaction - enabled. - "max_message_bytes": 1048588, # Optional. Default value is - 1048588. The max_messages_bytes specifies the largest record batch size - (in bytes) that can be sent to the server. This is calculated after - compression if compression is enabled. - "message_down_conversion_enable": True, # Optional. Default - value is True. The message_down_conversion_enable specifies whether - down-conversion of message formats is enabled to satisfy consumer - requests. When 'false', the broker will not perform conversion for - consumers expecting older message formats. The broker will respond with - an ``UNSUPPORTED_VERSION`` error for consume requests from these older - clients. - "message_format_version": "3.0-IV1", # Optional. Default - value is "3.0-IV1". The message_format_version specifies the message - format version used by the broker to append messages to the logs. The - value of this setting is assumed to be 3.0-IV1 if the broker protocol - version is 3.0 or higher. By setting a particular message format - version, all existing messages on disk must be smaller or equal to the - specified version. Known values are: "0.8.0", "0.8.1", "0.8.2", "0.9.0", - "0.10.0-IV0", "0.10.0-IV1", "0.10.1-IV0", "0.10.1-IV1", "0.10.1-IV2", - "0.10.2-IV0", "0.11.0-IV0", "0.11.0-IV1", "0.11.0-IV2", "1.0-IV0", - "1.1-IV0", "2.0-IV0", "2.0-IV1", "2.1-IV0", "2.1-IV1", "2.1-IV2", - "2.2-IV0", "2.2-IV1", "2.3-IV0", "2.3-IV1", "2.4-IV0", "2.4-IV1", - "2.5-IV0", "2.6-IV0", "2.7-IV0", "2.7-IV1", "2.7-IV2", "2.8-IV0", - "2.8-IV1", "3.0-IV0", "3.0-IV1", "3.1-IV0", "3.2-IV0", "3.3-IV0", - "3.3-IV1", "3.3-IV2", and "3.3-IV3". - "message_timestamp_type": "create_time", # Optional. Default - value is "create_time". The message_timestamp_type specifies whether to - use the message create time or log append time as the timestamp on a - message. Known values are: "create_time" and "log_append_time". - "min_cleanable_dirty_ratio": 0.5, # Optional. Default value - is 0.5. The min_cleanable_dirty_ratio specifies the frequency of log - compaction (if enabled) in relation to duplicates present in the logs. - For example, at 0.5, at most 50% of the log could be duplicates before - compaction would begin. - "min_compaction_lag_ms": 0, # Optional. Default value is 0. - The min_compaction_lag_ms specifies the minimum time (in ms) that a - message will remain uncompacted in the log. Only relevant if log - compaction is enabled. - "min_insync_replicas": 1, # Optional. Default value is 1. - The min_insync_replicas specifies the number of replicas that must ACK a - write for the write to be considered successful. - "preallocate": False, # Optional. Default value is False. - The preallocate specifies whether a file should be preallocated on disk - when creating a new log segment. - "retention_bytes": -1, # Optional. Default value is -1. The - retention_bytes specifies the maximum size of the log (in bytes) before - deleting messages. -1 indicates that there is no limit. - "retention_ms": 604800000, # Optional. Default value is - 604800000. The retention_ms specifies the maximum amount of time (in ms) - to keep a message before deleting it. - "segment_bytes": 209715200, # Optional. Default value is - 209715200. The segment_bytes specifies the maximum size of a single log - file (in bytes). - "segment_jitter_ms": 0, # Optional. Default value is 0. The - segment_jitter_ms specifies the maximum random jitter subtracted from the - scheduled segment roll time to avoid thundering herds of segment rolling. - "segment_ms": 604800000 # Optional. Default value is - 604800000. The segment_ms specifies the period of time after which the - log will be forced to roll if the segment file isn't full. This ensures - that retention can delete or compact old data. - }, - "name": "str", # Optional. The name of the Kafka topic. - "partitions": [ - { - "consumer_groups": [ - { - "group_name": "str", # Optional. - Name of the consumer group. - "offset": 0 # Optional. The current - offset of the consumer group. - } - ], - "earliest_offset": 0, # Optional. The earliest - consumer offset amongst consumer groups. - "id": 0, # Optional. An identifier for the - partition. - "in_sync_replicas": 0, # Optional. The number of - nodes that are in-sync (have the latest data) for the given - partition. - "size": 0 # Optional. Size of the topic partition in - bytes. - } - ], - "replication_factor": 0, # Optional. The number of nodes to - replicate data across the cluster. - "state": "str" # Optional. The state of the Kafka topic. Known - values are: "active", "configuring", "deleting", and "unknown". - } - } - # response body for status code(s): 404 - response == { - "id": "str", # A short identifier corresponding to the HTTP status code - returned. For example, the ID for a response returning a 404 status code would - be "not_found.". Required. - "message": "str", # A message providing additional information about the - error, including details to help resolve it when possible. Required. - "request_id": "str" # Optional. Optionally, some endpoints may include a - request ID that should be provided when reporting bugs or opening support - tickets to help identify the issue. - } - """ - - @distributed_trace - def create_kafka_topic( - self, - database_cluster_uuid: str, - body: Optional[Union[JSON, IO[bytes]]] = None, - **kwargs: Any, - ) -> JSON: - # pylint: disable=line-too-long - """Create Topic for a Kafka Cluster. - - To create a topic attached to a Kafka cluster, send a POST request to - ``/v2/databases/$DATABASE_ID/topics``. - - The result will be a JSON object with a ``topic`` key. - - :param database_cluster_uuid: A unique identifier for a database cluster. Required. - :type database_cluster_uuid: str - :param body: Is either a JSON type or a IO[bytes] type. Default value is None. - :type body: JSON or IO[bytes] - :return: JSON object - :rtype: JSON - :raises ~azure.core.exceptions.HttpResponseError: - - Example: - .. code-block:: python - - # JSON input template you can fill out and use as your body input. - body = { - "config": { - "cleanup_policy": "delete", # Optional. Default value is "delete". - The cleanup_policy sets the retention policy to use on log segments. 'delete' - will discard old segments when retention time/size limits are reached. - 'compact' will enable log compaction, resulting in retention of the latest - value for each key. Known values are: "delete", "compact", and - "compact_delete". - "compression_type": "producer", # Optional. Default value is - "producer". The compression_type specifies the compression type of the topic. - Known values are: "producer", "gzip", "snappy", "Iz4", "zstd", and - "uncompressed". - "delete_retention_ms": 86400000, # Optional. Default value is - 86400000. The delete_retention_ms specifies how long (in ms) to retain delete - tombstone markers for topics. - "file_delete_delay_ms": 60000, # Optional. Default value is 60000. - The file_delete_delay_ms specifies the time (in ms) to wait before deleting a - file from the filesystem. - "flush_messages": 9223372036854776000, # Optional. Default value is - 9223372036854776000. The flush_messages specifies the number of messages to - accumulate on a log partition before messages are flushed to disk. - "flush_ms": 9223372036854776000, # Optional. Default value is - 9223372036854776000. The flush_ms specifies the maximum time (in ms) that a - message is kept in memory before being flushed to disk. - "index_interval_bytes": 4096, # Optional. Default value is 4096. The - index_interval_bytes specifies the number of bytes between entries being - added into te offset index. - "max_compaction_lag_ms": 9223372036854776000, # Optional. Default - value is 9223372036854776000. The max_compaction_lag_ms specifies the maximum - amount of time (in ms) that a message will remain uncompacted. This is only - applicable if the logs are have compaction enabled. - "max_message_bytes": 1048588, # Optional. Default value is 1048588. - The max_messages_bytes specifies the largest record batch size (in bytes) - that can be sent to the server. This is calculated after compression if - compression is enabled. - "message_down_conversion_enable": True, # Optional. Default value is - True. The message_down_conversion_enable specifies whether down-conversion of - message formats is enabled to satisfy consumer requests. When 'false', the - broker will not perform conversion for consumers expecting older message - formats. The broker will respond with an ``UNSUPPORTED_VERSION`` error for - consume requests from these older clients. - "message_format_version": "3.0-IV1", # Optional. Default value is - "3.0-IV1". The message_format_version specifies the message format version - used by the broker to append messages to the logs. The value of this setting - is assumed to be 3.0-IV1 if the broker protocol version is 3.0 or higher. By - setting a particular message format version, all existing messages on disk - must be smaller or equal to the specified version. Known values are: "0.8.0", - "0.8.1", "0.8.2", "0.9.0", "0.10.0-IV0", "0.10.0-IV1", "0.10.1-IV0", - "0.10.1-IV1", "0.10.1-IV2", "0.10.2-IV0", "0.11.0-IV0", "0.11.0-IV1", - "0.11.0-IV2", "1.0-IV0", "1.1-IV0", "2.0-IV0", "2.0-IV1", "2.1-IV0", - "2.1-IV1", "2.1-IV2", "2.2-IV0", "2.2-IV1", "2.3-IV0", "2.3-IV1", "2.4-IV0", - "2.4-IV1", "2.5-IV0", "2.6-IV0", "2.7-IV0", "2.7-IV1", "2.7-IV2", "2.8-IV0", - "2.8-IV1", "3.0-IV0", "3.0-IV1", "3.1-IV0", "3.2-IV0", "3.3-IV0", "3.3-IV1", - "3.3-IV2", and "3.3-IV3". - "message_timestamp_type": "create_time", # Optional. Default value - is "create_time". The message_timestamp_type specifies whether to use the - message create time or log append time as the timestamp on a message. Known - values are: "create_time" and "log_append_time". - "min_cleanable_dirty_ratio": 0.5, # Optional. Default value is 0.5. - The min_cleanable_dirty_ratio specifies the frequency of log compaction (if - enabled) in relation to duplicates present in the logs. For example, at 0.5, - at most 50% of the log could be duplicates before compaction would begin. - "min_compaction_lag_ms": 0, # Optional. Default value is 0. The - min_compaction_lag_ms specifies the minimum time (in ms) that a message will - remain uncompacted in the log. Only relevant if log compaction is enabled. - "min_insync_replicas": 1, # Optional. Default value is 1. The - min_insync_replicas specifies the number of replicas that must ACK a write - for the write to be considered successful. - "preallocate": False, # Optional. Default value is False. The - preallocate specifies whether a file should be preallocated on disk when - creating a new log segment. - "retention_bytes": -1, # Optional. Default value is -1. The - retention_bytes specifies the maximum size of the log (in bytes) before - deleting messages. -1 indicates that there is no limit. - "retention_ms": 604800000, # Optional. Default value is 604800000. - The retention_ms specifies the maximum amount of time (in ms) to keep a - message before deleting it. - "segment_bytes": 209715200, # Optional. Default value is 209715200. - The segment_bytes specifies the maximum size of a single log file (in bytes). - "segment_jitter_ms": 0, # Optional. Default value is 0. The - segment_jitter_ms specifies the maximum random jitter subtracted from the - scheduled segment roll time to avoid thundering herds of segment rolling. - "segment_ms": 604800000 # Optional. Default value is 604800000. The - segment_ms specifies the period of time after which the log will be forced to - roll if the segment file isn't full. This ensures that retention can delete - or compact old data. - }, - "name": "str", # Optional. The name of the Kafka topic. - "partition_count": 0, # Optional. The number of partitions available for the - topic. On update, this value can only be increased. - "replication_factor": 0 # Optional. The number of nodes to replicate data - across the cluster. - } - - # response body for status code(s): 201 - response == { - "topic": { - "config": { - "cleanup_policy": "delete", # Optional. Default value is - "delete". The cleanup_policy sets the retention policy to use on log - segments. 'delete' will discard old segments when retention time/size - limits are reached. 'compact' will enable log compaction, resulting in - retention of the latest value for each key. Known values are: "delete", - "compact", and "compact_delete". - "compression_type": "producer", # Optional. Default value is - "producer". The compression_type specifies the compression type of the - topic. Known values are: "producer", "gzip", "snappy", "Iz4", "zstd", and - "uncompressed". - "delete_retention_ms": 86400000, # Optional. Default value - is 86400000. The delete_retention_ms specifies how long (in ms) to retain - delete tombstone markers for topics. - "file_delete_delay_ms": 60000, # Optional. Default value is - 60000. The file_delete_delay_ms specifies the time (in ms) to wait before - deleting a file from the filesystem. - "flush_messages": 9223372036854776000, # Optional. Default - value is 9223372036854776000. The flush_messages specifies the number of - messages to accumulate on a log partition before messages are flushed to - disk. - "flush_ms": 9223372036854776000, # Optional. Default value - is 9223372036854776000. The flush_ms specifies the maximum time (in ms) - that a message is kept in memory before being flushed to disk. - "index_interval_bytes": 4096, # Optional. Default value is - 4096. The index_interval_bytes specifies the number of bytes between - entries being added into te offset index. - "max_compaction_lag_ms": 9223372036854776000, # Optional. - Default value is 9223372036854776000. The max_compaction_lag_ms specifies - the maximum amount of time (in ms) that a message will remain - uncompacted. This is only applicable if the logs are have compaction - enabled. - "max_message_bytes": 1048588, # Optional. Default value is - 1048588. The max_messages_bytes specifies the largest record batch size - (in bytes) that can be sent to the server. This is calculated after - compression if compression is enabled. - "message_down_conversion_enable": True, # Optional. Default - value is True. The message_down_conversion_enable specifies whether - down-conversion of message formats is enabled to satisfy consumer - requests. When 'false', the broker will not perform conversion for - consumers expecting older message formats. The broker will respond with - an ``UNSUPPORTED_VERSION`` error for consume requests from these older - clients. - "message_format_version": "3.0-IV1", # Optional. Default - value is "3.0-IV1". The message_format_version specifies the message - format version used by the broker to append messages to the logs. The - value of this setting is assumed to be 3.0-IV1 if the broker protocol - version is 3.0 or higher. By setting a particular message format - version, all existing messages on disk must be smaller or equal to the - specified version. Known values are: "0.8.0", "0.8.1", "0.8.2", "0.9.0", - "0.10.0-IV0", "0.10.0-IV1", "0.10.1-IV0", "0.10.1-IV1", "0.10.1-IV2", - "0.10.2-IV0", "0.11.0-IV0", "0.11.0-IV1", "0.11.0-IV2", "1.0-IV0", - "1.1-IV0", "2.0-IV0", "2.0-IV1", "2.1-IV0", "2.1-IV1", "2.1-IV2", - "2.2-IV0", "2.2-IV1", "2.3-IV0", "2.3-IV1", "2.4-IV0", "2.4-IV1", - "2.5-IV0", "2.6-IV0", "2.7-IV0", "2.7-IV1", "2.7-IV2", "2.8-IV0", - "2.8-IV1", "3.0-IV0", "3.0-IV1", "3.1-IV0", "3.2-IV0", "3.3-IV0", - "3.3-IV1", "3.3-IV2", and "3.3-IV3". - "message_timestamp_type": "create_time", # Optional. Default - value is "create_time". The message_timestamp_type specifies whether to - use the message create time or log append time as the timestamp on a - message. Known values are: "create_time" and "log_append_time". - "min_cleanable_dirty_ratio": 0.5, # Optional. Default value - is 0.5. The min_cleanable_dirty_ratio specifies the frequency of log - compaction (if enabled) in relation to duplicates present in the logs. - For example, at 0.5, at most 50% of the log could be duplicates before - compaction would begin. - "min_compaction_lag_ms": 0, # Optional. Default value is 0. - The min_compaction_lag_ms specifies the minimum time (in ms) that a - message will remain uncompacted in the log. Only relevant if log - compaction is enabled. - "min_insync_replicas": 1, # Optional. Default value is 1. - The min_insync_replicas specifies the number of replicas that must ACK a - write for the write to be considered successful. - "preallocate": False, # Optional. Default value is False. - The preallocate specifies whether a file should be preallocated on disk - when creating a new log segment. - "retention_bytes": -1, # Optional. Default value is -1. The - retention_bytes specifies the maximum size of the log (in bytes) before - deleting messages. -1 indicates that there is no limit. - "retention_ms": 604800000, # Optional. Default value is - 604800000. The retention_ms specifies the maximum amount of time (in ms) - to keep a message before deleting it. - "segment_bytes": 209715200, # Optional. Default value is - 209715200. The segment_bytes specifies the maximum size of a single log - file (in bytes). - "segment_jitter_ms": 0, # Optional. Default value is 0. The - segment_jitter_ms specifies the maximum random jitter subtracted from the - scheduled segment roll time to avoid thundering herds of segment rolling. - "segment_ms": 604800000 # Optional. Default value is - 604800000. The segment_ms specifies the period of time after which the - log will be forced to roll if the segment file isn't full. This ensures - that retention can delete or compact old data. - }, - "name": "str", # Optional. The name of the Kafka topic. - "partitions": [ - { - "consumer_groups": [ - { - "group_name": "str", # Optional. - Name of the consumer group. - "offset": 0 # Optional. The current - offset of the consumer group. - } - ], - "earliest_offset": 0, # Optional. The earliest - consumer offset amongst consumer groups. - "id": 0, # Optional. An identifier for the - partition. - "in_sync_replicas": 0, # Optional. The number of - nodes that are in-sync (have the latest data) for the given - partition. - "size": 0 # Optional. Size of the topic partition in - bytes. - } - ], - "replication_factor": 0, # Optional. The number of nodes to - replicate data across the cluster. - "state": "str" # Optional. The state of the Kafka topic. Known - values are: "active", "configuring", "deleting", and "unknown". - } + Default value is "application/json". + :paramtype content_type: str + :return: JSON object + :rtype: JSON + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 201 + response == { + "schema": "str", # Optional. The schema definition in the specified format. + "schema_id": 0, # Optional. The id for schema. + "schema_type": "str", # Optional. The type of the schema. Known values are: + "AVRO", "JSON", and "PROTOBUF". + "subject_name": "str" # Optional. The name of the schema subject. + } + # response body for status code(s): 404 + response == { + "id": "str", # A short identifier corresponding to the HTTP status code + returned. For example, the ID for a response returning a 404 status code would + be "not_found.". Required. + "message": "str", # A message providing additional information about the + error, including details to help resolve it when possible. Required. + "request_id": "str" # Optional. Optionally, some endpoints may include a + request ID that should be provided when reporting bugs or opening support + tickets to help identify the issue. + } + """ + + @distributed_trace + def create_kafka_schema( + self, database_cluster_uuid: str, body: Union[JSON, IO[bytes]], **kwargs: Any + ) -> JSON: + # pylint: disable=line-too-long + """Create Schema Registry for Kafka Cluster. + + To create a Kafka schema for a database cluster, send a POST request to + ``/v2/databases/$DATABASE_ID/schema-registry``. + + :param database_cluster_uuid: A unique identifier for a database cluster. Required. + :type database_cluster_uuid: str + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :return: JSON object + :rtype: JSON + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "schema": "str", # Optional. The schema definition in the specified format. + "schema_type": "str", # Optional. The type of the schema. Known values are: + "AVRO", "JSON", and "PROTOBUF". + "subject_name": "str" # Optional. The name of the schema subject. + } + + # response body for status code(s): 201 + response == { + "schema": "str", # Optional. The schema definition in the specified format. + "schema_id": 0, # Optional. The id for schema. + "schema_type": "str", # Optional. The type of the schema. Known values are: + "AVRO", "JSON", and "PROTOBUF". + "subject_name": "str" # Optional. The name of the schema subject. } # response body for status code(s): 404 response == { @@ -130957,12 +139440,9 @@ def create_kafka_topic( if isinstance(body, (IOBase, bytes)): _content = body else: - if body is not None: - _json = body - else: - _json = None + _json = body - _request = build_databases_create_kafka_topic_request( + _request = build_databases_create_kafka_schema_request( database_cluster_uuid=database_cluster_uuid, content_type=content_type, json=_json, @@ -131026,21 +139506,19 @@ def create_kafka_topic( return cast(JSON, deserialized) # type: ignore @distributed_trace - def get_kafka_topic( - self, database_cluster_uuid: str, topic_name: str, **kwargs: Any + def get_kafka_schema( + self, database_cluster_uuid: str, subject_name: str, **kwargs: Any ) -> JSON: # pylint: disable=line-too-long - """Get Topic for a Kafka Cluster. - - To retrieve a given topic by name from the set of a Kafka cluster's topics, - send a GET request to ``/v2/databases/$DATABASE_ID/topics/$TOPIC_NAME``. + """Get a Kafka Schema by Subject Name. - The result will be a JSON object with a ``topic`` key. + To get a specific schema by subject name for a Kafka cluster, send a GET request to + ``/v2/databases/$DATABASE_ID/schema-registry/$SUBJECT_NAME``. :param database_cluster_uuid: A unique identifier for a database cluster. Required. :type database_cluster_uuid: str - :param topic_name: The name used to identify the Kafka topic. Required. - :type topic_name: str + :param subject_name: The name of the Kafka schema subject. Required. + :type subject_name: str :return: JSON object :rtype: JSON :raises ~azure.core.exceptions.HttpResponseError: @@ -131050,127 +139528,12 @@ def get_kafka_topic( # response body for status code(s): 200 response == { - "topic": { - "config": { - "cleanup_policy": "delete", # Optional. Default value is - "delete". The cleanup_policy sets the retention policy to use on log - segments. 'delete' will discard old segments when retention time/size - limits are reached. 'compact' will enable log compaction, resulting in - retention of the latest value for each key. Known values are: "delete", - "compact", and "compact_delete". - "compression_type": "producer", # Optional. Default value is - "producer". The compression_type specifies the compression type of the - topic. Known values are: "producer", "gzip", "snappy", "Iz4", "zstd", and - "uncompressed". - "delete_retention_ms": 86400000, # Optional. Default value - is 86400000. The delete_retention_ms specifies how long (in ms) to retain - delete tombstone markers for topics. - "file_delete_delay_ms": 60000, # Optional. Default value is - 60000. The file_delete_delay_ms specifies the time (in ms) to wait before - deleting a file from the filesystem. - "flush_messages": 9223372036854776000, # Optional. Default - value is 9223372036854776000. The flush_messages specifies the number of - messages to accumulate on a log partition before messages are flushed to - disk. - "flush_ms": 9223372036854776000, # Optional. Default value - is 9223372036854776000. The flush_ms specifies the maximum time (in ms) - that a message is kept in memory before being flushed to disk. - "index_interval_bytes": 4096, # Optional. Default value is - 4096. The index_interval_bytes specifies the number of bytes between - entries being added into te offset index. - "max_compaction_lag_ms": 9223372036854776000, # Optional. - Default value is 9223372036854776000. The max_compaction_lag_ms specifies - the maximum amount of time (in ms) that a message will remain - uncompacted. This is only applicable if the logs are have compaction - enabled. - "max_message_bytes": 1048588, # Optional. Default value is - 1048588. The max_messages_bytes specifies the largest record batch size - (in bytes) that can be sent to the server. This is calculated after - compression if compression is enabled. - "message_down_conversion_enable": True, # Optional. Default - value is True. The message_down_conversion_enable specifies whether - down-conversion of message formats is enabled to satisfy consumer - requests. When 'false', the broker will not perform conversion for - consumers expecting older message formats. The broker will respond with - an ``UNSUPPORTED_VERSION`` error for consume requests from these older - clients. - "message_format_version": "3.0-IV1", # Optional. Default - value is "3.0-IV1". The message_format_version specifies the message - format version used by the broker to append messages to the logs. The - value of this setting is assumed to be 3.0-IV1 if the broker protocol - version is 3.0 or higher. By setting a particular message format - version, all existing messages on disk must be smaller or equal to the - specified version. Known values are: "0.8.0", "0.8.1", "0.8.2", "0.9.0", - "0.10.0-IV0", "0.10.0-IV1", "0.10.1-IV0", "0.10.1-IV1", "0.10.1-IV2", - "0.10.2-IV0", "0.11.0-IV0", "0.11.0-IV1", "0.11.0-IV2", "1.0-IV0", - "1.1-IV0", "2.0-IV0", "2.0-IV1", "2.1-IV0", "2.1-IV1", "2.1-IV2", - "2.2-IV0", "2.2-IV1", "2.3-IV0", "2.3-IV1", "2.4-IV0", "2.4-IV1", - "2.5-IV0", "2.6-IV0", "2.7-IV0", "2.7-IV1", "2.7-IV2", "2.8-IV0", - "2.8-IV1", "3.0-IV0", "3.0-IV1", "3.1-IV0", "3.2-IV0", "3.3-IV0", - "3.3-IV1", "3.3-IV2", and "3.3-IV3". - "message_timestamp_type": "create_time", # Optional. Default - value is "create_time". The message_timestamp_type specifies whether to - use the message create time or log append time as the timestamp on a - message. Known values are: "create_time" and "log_append_time". - "min_cleanable_dirty_ratio": 0.5, # Optional. Default value - is 0.5. The min_cleanable_dirty_ratio specifies the frequency of log - compaction (if enabled) in relation to duplicates present in the logs. - For example, at 0.5, at most 50% of the log could be duplicates before - compaction would begin. - "min_compaction_lag_ms": 0, # Optional. Default value is 0. - The min_compaction_lag_ms specifies the minimum time (in ms) that a - message will remain uncompacted in the log. Only relevant if log - compaction is enabled. - "min_insync_replicas": 1, # Optional. Default value is 1. - The min_insync_replicas specifies the number of replicas that must ACK a - write for the write to be considered successful. - "preallocate": False, # Optional. Default value is False. - The preallocate specifies whether a file should be preallocated on disk - when creating a new log segment. - "retention_bytes": -1, # Optional. Default value is -1. The - retention_bytes specifies the maximum size of the log (in bytes) before - deleting messages. -1 indicates that there is no limit. - "retention_ms": 604800000, # Optional. Default value is - 604800000. The retention_ms specifies the maximum amount of time (in ms) - to keep a message before deleting it. - "segment_bytes": 209715200, # Optional. Default value is - 209715200. The segment_bytes specifies the maximum size of a single log - file (in bytes). - "segment_jitter_ms": 0, # Optional. Default value is 0. The - segment_jitter_ms specifies the maximum random jitter subtracted from the - scheduled segment roll time to avoid thundering herds of segment rolling. - "segment_ms": 604800000 # Optional. Default value is - 604800000. The segment_ms specifies the period of time after which the - log will be forced to roll if the segment file isn't full. This ensures - that retention can delete or compact old data. - }, - "name": "str", # Optional. The name of the Kafka topic. - "partitions": [ - { - "consumer_groups": [ - { - "group_name": "str", # Optional. - Name of the consumer group. - "offset": 0 # Optional. The current - offset of the consumer group. - } - ], - "earliest_offset": 0, # Optional. The earliest - consumer offset amongst consumer groups. - "id": 0, # Optional. An identifier for the - partition. - "in_sync_replicas": 0, # Optional. The number of - nodes that are in-sync (have the latest data) for the given - partition. - "size": 0 # Optional. Size of the topic partition in - bytes. - } - ], - "replication_factor": 0, # Optional. The number of nodes to - replicate data across the cluster. - "state": "str" # Optional. The state of the Kafka topic. Known - values are: "active", "configuring", "deleting", and "unknown". - } + "schema": "str", # Optional. The schema definition in the specified format. + "schema_id": 0, # Optional. The id for schema. + "schema_type": "str", # Optional. The type of the schema. Known values are: + "AVRO", "JSON", and "PROTOBUF". + "subject_name": "str", # Optional. The name of the schema subject. + "version": "str" # Optional. The version of the schema. } # response body for status code(s): 404 response == { @@ -131202,9 +139565,9 @@ def get_kafka_topic( cls: ClsType[JSON] = kwargs.pop("cls", None) - _request = build_databases_get_kafka_topic_request( + _request = build_databases_get_kafka_schema_request( database_cluster_uuid=database_cluster_uuid, - topic_name=topic_name, + subject_name=subject_name, headers=_headers, params=_params, ) @@ -131263,258 +139626,27 @@ def get_kafka_topic( return cast(JSON, deserialized) # type: ignore - @overload - def update_kafka_topic( - self, - database_cluster_uuid: str, - topic_name: str, - body: Optional[JSON] = None, - *, - content_type: str = "application/json", - **kwargs: Any, - ) -> JSON: + @distributed_trace + def delete_kafka_schema( + self, database_cluster_uuid: str, subject_name: str, **kwargs: Any + ) -> Optional[JSON]: # pylint: disable=line-too-long - """Update Topic for a Kafka Cluster. - - To update a topic attached to a Kafka cluster, send a PUT request to - ``/v2/databases/$DATABASE_ID/topics/$TOPIC_NAME``. + """Delete a Kafka Schema by Subject Name. - The result will be a JSON object with a ``topic`` key. + To delete a specific schema by subject name for a Kafka cluster, send a DELETE request to + ``/v2/databases/$DATABASE_ID/schema-registry/$SUBJECT_NAME``. :param database_cluster_uuid: A unique identifier for a database cluster. Required. :type database_cluster_uuid: str - :param topic_name: The name used to identify the Kafka topic. Required. - :type topic_name: str - :param body: Default value is None. - :type body: JSON - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: JSON object - :rtype: JSON + :param subject_name: The name of the Kafka schema subject. Required. + :type subject_name: str + :return: JSON object or None + :rtype: JSON or None :raises ~azure.core.exceptions.HttpResponseError: Example: .. code-block:: python - # JSON input template you can fill out and use as your body input. - body = { - "config": { - "cleanup_policy": "delete", # Optional. Default value is "delete". - The cleanup_policy sets the retention policy to use on log segments. 'delete' - will discard old segments when retention time/size limits are reached. - 'compact' will enable log compaction, resulting in retention of the latest - value for each key. Known values are: "delete", "compact", and - "compact_delete". - "compression_type": "producer", # Optional. Default value is - "producer". The compression_type specifies the compression type of the topic. - Known values are: "producer", "gzip", "snappy", "Iz4", "zstd", and - "uncompressed". - "delete_retention_ms": 86400000, # Optional. Default value is - 86400000. The delete_retention_ms specifies how long (in ms) to retain delete - tombstone markers for topics. - "file_delete_delay_ms": 60000, # Optional. Default value is 60000. - The file_delete_delay_ms specifies the time (in ms) to wait before deleting a - file from the filesystem. - "flush_messages": 9223372036854776000, # Optional. Default value is - 9223372036854776000. The flush_messages specifies the number of messages to - accumulate on a log partition before messages are flushed to disk. - "flush_ms": 9223372036854776000, # Optional. Default value is - 9223372036854776000. The flush_ms specifies the maximum time (in ms) that a - message is kept in memory before being flushed to disk. - "index_interval_bytes": 4096, # Optional. Default value is 4096. The - index_interval_bytes specifies the number of bytes between entries being - added into te offset index. - "max_compaction_lag_ms": 9223372036854776000, # Optional. Default - value is 9223372036854776000. The max_compaction_lag_ms specifies the maximum - amount of time (in ms) that a message will remain uncompacted. This is only - applicable if the logs are have compaction enabled. - "max_message_bytes": 1048588, # Optional. Default value is 1048588. - The max_messages_bytes specifies the largest record batch size (in bytes) - that can be sent to the server. This is calculated after compression if - compression is enabled. - "message_down_conversion_enable": True, # Optional. Default value is - True. The message_down_conversion_enable specifies whether down-conversion of - message formats is enabled to satisfy consumer requests. When 'false', the - broker will not perform conversion for consumers expecting older message - formats. The broker will respond with an ``UNSUPPORTED_VERSION`` error for - consume requests from these older clients. - "message_format_version": "3.0-IV1", # Optional. Default value is - "3.0-IV1". The message_format_version specifies the message format version - used by the broker to append messages to the logs. The value of this setting - is assumed to be 3.0-IV1 if the broker protocol version is 3.0 or higher. By - setting a particular message format version, all existing messages on disk - must be smaller or equal to the specified version. Known values are: "0.8.0", - "0.8.1", "0.8.2", "0.9.0", "0.10.0-IV0", "0.10.0-IV1", "0.10.1-IV0", - "0.10.1-IV1", "0.10.1-IV2", "0.10.2-IV0", "0.11.0-IV0", "0.11.0-IV1", - "0.11.0-IV2", "1.0-IV0", "1.1-IV0", "2.0-IV0", "2.0-IV1", "2.1-IV0", - "2.1-IV1", "2.1-IV2", "2.2-IV0", "2.2-IV1", "2.3-IV0", "2.3-IV1", "2.4-IV0", - "2.4-IV1", "2.5-IV0", "2.6-IV0", "2.7-IV0", "2.7-IV1", "2.7-IV2", "2.8-IV0", - "2.8-IV1", "3.0-IV0", "3.0-IV1", "3.1-IV0", "3.2-IV0", "3.3-IV0", "3.3-IV1", - "3.3-IV2", and "3.3-IV3". - "message_timestamp_type": "create_time", # Optional. Default value - is "create_time". The message_timestamp_type specifies whether to use the - message create time or log append time as the timestamp on a message. Known - values are: "create_time" and "log_append_time". - "min_cleanable_dirty_ratio": 0.5, # Optional. Default value is 0.5. - The min_cleanable_dirty_ratio specifies the frequency of log compaction (if - enabled) in relation to duplicates present in the logs. For example, at 0.5, - at most 50% of the log could be duplicates before compaction would begin. - "min_compaction_lag_ms": 0, # Optional. Default value is 0. The - min_compaction_lag_ms specifies the minimum time (in ms) that a message will - remain uncompacted in the log. Only relevant if log compaction is enabled. - "min_insync_replicas": 1, # Optional. Default value is 1. The - min_insync_replicas specifies the number of replicas that must ACK a write - for the write to be considered successful. - "preallocate": False, # Optional. Default value is False. The - preallocate specifies whether a file should be preallocated on disk when - creating a new log segment. - "retention_bytes": -1, # Optional. Default value is -1. The - retention_bytes specifies the maximum size of the log (in bytes) before - deleting messages. -1 indicates that there is no limit. - "retention_ms": 604800000, # Optional. Default value is 604800000. - The retention_ms specifies the maximum amount of time (in ms) to keep a - message before deleting it. - "segment_bytes": 209715200, # Optional. Default value is 209715200. - The segment_bytes specifies the maximum size of a single log file (in bytes). - "segment_jitter_ms": 0, # Optional. Default value is 0. The - segment_jitter_ms specifies the maximum random jitter subtracted from the - scheduled segment roll time to avoid thundering herds of segment rolling. - "segment_ms": 604800000 # Optional. Default value is 604800000. The - segment_ms specifies the period of time after which the log will be forced to - roll if the segment file isn't full. This ensures that retention can delete - or compact old data. - }, - "partition_count": 0, # Optional. The number of partitions available for the - topic. On update, this value can only be increased. - "replication_factor": 0 # Optional. The number of nodes to replicate data - across the cluster. - } - - # response body for status code(s): 200 - response == { - "topic": { - "config": { - "cleanup_policy": "delete", # Optional. Default value is - "delete". The cleanup_policy sets the retention policy to use on log - segments. 'delete' will discard old segments when retention time/size - limits are reached. 'compact' will enable log compaction, resulting in - retention of the latest value for each key. Known values are: "delete", - "compact", and "compact_delete". - "compression_type": "producer", # Optional. Default value is - "producer". The compression_type specifies the compression type of the - topic. Known values are: "producer", "gzip", "snappy", "Iz4", "zstd", and - "uncompressed". - "delete_retention_ms": 86400000, # Optional. Default value - is 86400000. The delete_retention_ms specifies how long (in ms) to retain - delete tombstone markers for topics. - "file_delete_delay_ms": 60000, # Optional. Default value is - 60000. The file_delete_delay_ms specifies the time (in ms) to wait before - deleting a file from the filesystem. - "flush_messages": 9223372036854776000, # Optional. Default - value is 9223372036854776000. The flush_messages specifies the number of - messages to accumulate on a log partition before messages are flushed to - disk. - "flush_ms": 9223372036854776000, # Optional. Default value - is 9223372036854776000. The flush_ms specifies the maximum time (in ms) - that a message is kept in memory before being flushed to disk. - "index_interval_bytes": 4096, # Optional. Default value is - 4096. The index_interval_bytes specifies the number of bytes between - entries being added into te offset index. - "max_compaction_lag_ms": 9223372036854776000, # Optional. - Default value is 9223372036854776000. The max_compaction_lag_ms specifies - the maximum amount of time (in ms) that a message will remain - uncompacted. This is only applicable if the logs are have compaction - enabled. - "max_message_bytes": 1048588, # Optional. Default value is - 1048588. The max_messages_bytes specifies the largest record batch size - (in bytes) that can be sent to the server. This is calculated after - compression if compression is enabled. - "message_down_conversion_enable": True, # Optional. Default - value is True. The message_down_conversion_enable specifies whether - down-conversion of message formats is enabled to satisfy consumer - requests. When 'false', the broker will not perform conversion for - consumers expecting older message formats. The broker will respond with - an ``UNSUPPORTED_VERSION`` error for consume requests from these older - clients. - "message_format_version": "3.0-IV1", # Optional. Default - value is "3.0-IV1". The message_format_version specifies the message - format version used by the broker to append messages to the logs. The - value of this setting is assumed to be 3.0-IV1 if the broker protocol - version is 3.0 or higher. By setting a particular message format - version, all existing messages on disk must be smaller or equal to the - specified version. Known values are: "0.8.0", "0.8.1", "0.8.2", "0.9.0", - "0.10.0-IV0", "0.10.0-IV1", "0.10.1-IV0", "0.10.1-IV1", "0.10.1-IV2", - "0.10.2-IV0", "0.11.0-IV0", "0.11.0-IV1", "0.11.0-IV2", "1.0-IV0", - "1.1-IV0", "2.0-IV0", "2.0-IV1", "2.1-IV0", "2.1-IV1", "2.1-IV2", - "2.2-IV0", "2.2-IV1", "2.3-IV0", "2.3-IV1", "2.4-IV0", "2.4-IV1", - "2.5-IV0", "2.6-IV0", "2.7-IV0", "2.7-IV1", "2.7-IV2", "2.8-IV0", - "2.8-IV1", "3.0-IV0", "3.0-IV1", "3.1-IV0", "3.2-IV0", "3.3-IV0", - "3.3-IV1", "3.3-IV2", and "3.3-IV3". - "message_timestamp_type": "create_time", # Optional. Default - value is "create_time". The message_timestamp_type specifies whether to - use the message create time or log append time as the timestamp on a - message. Known values are: "create_time" and "log_append_time". - "min_cleanable_dirty_ratio": 0.5, # Optional. Default value - is 0.5. The min_cleanable_dirty_ratio specifies the frequency of log - compaction (if enabled) in relation to duplicates present in the logs. - For example, at 0.5, at most 50% of the log could be duplicates before - compaction would begin. - "min_compaction_lag_ms": 0, # Optional. Default value is 0. - The min_compaction_lag_ms specifies the minimum time (in ms) that a - message will remain uncompacted in the log. Only relevant if log - compaction is enabled. - "min_insync_replicas": 1, # Optional. Default value is 1. - The min_insync_replicas specifies the number of replicas that must ACK a - write for the write to be considered successful. - "preallocate": False, # Optional. Default value is False. - The preallocate specifies whether a file should be preallocated on disk - when creating a new log segment. - "retention_bytes": -1, # Optional. Default value is -1. The - retention_bytes specifies the maximum size of the log (in bytes) before - deleting messages. -1 indicates that there is no limit. - "retention_ms": 604800000, # Optional. Default value is - 604800000. The retention_ms specifies the maximum amount of time (in ms) - to keep a message before deleting it. - "segment_bytes": 209715200, # Optional. Default value is - 209715200. The segment_bytes specifies the maximum size of a single log - file (in bytes). - "segment_jitter_ms": 0, # Optional. Default value is 0. The - segment_jitter_ms specifies the maximum random jitter subtracted from the - scheduled segment roll time to avoid thundering herds of segment rolling. - "segment_ms": 604800000 # Optional. Default value is - 604800000. The segment_ms specifies the period of time after which the - log will be forced to roll if the segment file isn't full. This ensures - that retention can delete or compact old data. - }, - "name": "str", # Optional. The name of the Kafka topic. - "partitions": [ - { - "consumer_groups": [ - { - "group_name": "str", # Optional. - Name of the consumer group. - "offset": 0 # Optional. The current - offset of the consumer group. - } - ], - "earliest_offset": 0, # Optional. The earliest - consumer offset amongst consumer groups. - "id": 0, # Optional. An identifier for the - partition. - "in_sync_replicas": 0, # Optional. The number of - nodes that are in-sync (have the latest data) for the given - partition. - "size": 0 # Optional. Size of the topic partition in - bytes. - } - ], - "replication_factor": 0, # Optional. The number of nodes to - replicate data across the cluster. - "state": "str" # Optional. The state of the Kafka topic. Known - values are: "active", "configuring", "deleting", and "unknown". - } - } # response body for status code(s): 404 response == { "id": "str", # A short identifier corresponding to the HTTP status code @@ -131527,34 +139659,97 @@ def update_kafka_topic( tickets to help identify the issue. } """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + 401: cast( + Type[HttpResponseError], + lambda response: ClientAuthenticationError(response=response), + ), + 429: HttpResponseError, + 500: HttpResponseError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) - @overload - def update_kafka_topic( - self, - database_cluster_uuid: str, - topic_name: str, - body: Optional[IO[bytes]] = None, - *, - content_type: str = "application/json", - **kwargs: Any, + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[Optional[JSON]] = kwargs.pop("cls", None) + + _request = build_databases_delete_kafka_schema_request( + database_cluster_uuid=database_cluster_uuid, + subject_name=subject_name, + headers=_headers, + params=_params, + ) + _request.url = self._client.format_url(_request.url) + + _stream = False + pipeline_response: PipelineResponse = ( + self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + ) + + response = pipeline_response.http_response + + if response.status_code not in [204, 404]: + if _stream: + response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore + raise HttpResponseError(response=response) + + deserialized = None + response_headers = {} + if response.status_code == 204: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.status_code == 404: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.content: + deserialized = response.json() + else: + deserialized = None + + if cls: + return cls(pipeline_response, deserialized, response_headers) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + def get_kafka_schema_version( + self, database_cluster_uuid: str, subject_name: str, version: str, **kwargs: Any ) -> JSON: # pylint: disable=line-too-long - """Update Topic for a Kafka Cluster. - - To update a topic attached to a Kafka cluster, send a PUT request to - ``/v2/databases/$DATABASE_ID/topics/$TOPIC_NAME``. + """Get Kafka Schema by Subject Version. - The result will be a JSON object with a ``topic`` key. + To get a specific schema by subject name for a Kafka cluster, send a GET request to + ``/v2/databases/$DATABASE_ID/schema-registry/$SUBJECT_NAME/versions/$VERSION``. :param database_cluster_uuid: A unique identifier for a database cluster. Required. :type database_cluster_uuid: str - :param topic_name: The name used to identify the Kafka topic. Required. - :type topic_name: str - :param body: Default value is None. - :type body: IO[bytes] - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str + :param subject_name: The name of the Kafka schema subject. Required. + :type subject_name: str + :param version: The version of the Kafka schema subject. Required. + :type version: str :return: JSON object :rtype: JSON :raises ~azure.core.exceptions.HttpResponseError: @@ -131564,127 +139759,12 @@ def update_kafka_topic( # response body for status code(s): 200 response == { - "topic": { - "config": { - "cleanup_policy": "delete", # Optional. Default value is - "delete". The cleanup_policy sets the retention policy to use on log - segments. 'delete' will discard old segments when retention time/size - limits are reached. 'compact' will enable log compaction, resulting in - retention of the latest value for each key. Known values are: "delete", - "compact", and "compact_delete". - "compression_type": "producer", # Optional. Default value is - "producer". The compression_type specifies the compression type of the - topic. Known values are: "producer", "gzip", "snappy", "Iz4", "zstd", and - "uncompressed". - "delete_retention_ms": 86400000, # Optional. Default value - is 86400000. The delete_retention_ms specifies how long (in ms) to retain - delete tombstone markers for topics. - "file_delete_delay_ms": 60000, # Optional. Default value is - 60000. The file_delete_delay_ms specifies the time (in ms) to wait before - deleting a file from the filesystem. - "flush_messages": 9223372036854776000, # Optional. Default - value is 9223372036854776000. The flush_messages specifies the number of - messages to accumulate on a log partition before messages are flushed to - disk. - "flush_ms": 9223372036854776000, # Optional. Default value - is 9223372036854776000. The flush_ms specifies the maximum time (in ms) - that a message is kept in memory before being flushed to disk. - "index_interval_bytes": 4096, # Optional. Default value is - 4096. The index_interval_bytes specifies the number of bytes between - entries being added into te offset index. - "max_compaction_lag_ms": 9223372036854776000, # Optional. - Default value is 9223372036854776000. The max_compaction_lag_ms specifies - the maximum amount of time (in ms) that a message will remain - uncompacted. This is only applicable if the logs are have compaction - enabled. - "max_message_bytes": 1048588, # Optional. Default value is - 1048588. The max_messages_bytes specifies the largest record batch size - (in bytes) that can be sent to the server. This is calculated after - compression if compression is enabled. - "message_down_conversion_enable": True, # Optional. Default - value is True. The message_down_conversion_enable specifies whether - down-conversion of message formats is enabled to satisfy consumer - requests. When 'false', the broker will not perform conversion for - consumers expecting older message formats. The broker will respond with - an ``UNSUPPORTED_VERSION`` error for consume requests from these older - clients. - "message_format_version": "3.0-IV1", # Optional. Default - value is "3.0-IV1". The message_format_version specifies the message - format version used by the broker to append messages to the logs. The - value of this setting is assumed to be 3.0-IV1 if the broker protocol - version is 3.0 or higher. By setting a particular message format - version, all existing messages on disk must be smaller or equal to the - specified version. Known values are: "0.8.0", "0.8.1", "0.8.2", "0.9.0", - "0.10.0-IV0", "0.10.0-IV1", "0.10.1-IV0", "0.10.1-IV1", "0.10.1-IV2", - "0.10.2-IV0", "0.11.0-IV0", "0.11.0-IV1", "0.11.0-IV2", "1.0-IV0", - "1.1-IV0", "2.0-IV0", "2.0-IV1", "2.1-IV0", "2.1-IV1", "2.1-IV2", - "2.2-IV0", "2.2-IV1", "2.3-IV0", "2.3-IV1", "2.4-IV0", "2.4-IV1", - "2.5-IV0", "2.6-IV0", "2.7-IV0", "2.7-IV1", "2.7-IV2", "2.8-IV0", - "2.8-IV1", "3.0-IV0", "3.0-IV1", "3.1-IV0", "3.2-IV0", "3.3-IV0", - "3.3-IV1", "3.3-IV2", and "3.3-IV3". - "message_timestamp_type": "create_time", # Optional. Default - value is "create_time". The message_timestamp_type specifies whether to - use the message create time or log append time as the timestamp on a - message. Known values are: "create_time" and "log_append_time". - "min_cleanable_dirty_ratio": 0.5, # Optional. Default value - is 0.5. The min_cleanable_dirty_ratio specifies the frequency of log - compaction (if enabled) in relation to duplicates present in the logs. - For example, at 0.5, at most 50% of the log could be duplicates before - compaction would begin. - "min_compaction_lag_ms": 0, # Optional. Default value is 0. - The min_compaction_lag_ms specifies the minimum time (in ms) that a - message will remain uncompacted in the log. Only relevant if log - compaction is enabled. - "min_insync_replicas": 1, # Optional. Default value is 1. - The min_insync_replicas specifies the number of replicas that must ACK a - write for the write to be considered successful. - "preallocate": False, # Optional. Default value is False. - The preallocate specifies whether a file should be preallocated on disk - when creating a new log segment. - "retention_bytes": -1, # Optional. Default value is -1. The - retention_bytes specifies the maximum size of the log (in bytes) before - deleting messages. -1 indicates that there is no limit. - "retention_ms": 604800000, # Optional. Default value is - 604800000. The retention_ms specifies the maximum amount of time (in ms) - to keep a message before deleting it. - "segment_bytes": 209715200, # Optional. Default value is - 209715200. The segment_bytes specifies the maximum size of a single log - file (in bytes). - "segment_jitter_ms": 0, # Optional. Default value is 0. The - segment_jitter_ms specifies the maximum random jitter subtracted from the - scheduled segment roll time to avoid thundering herds of segment rolling. - "segment_ms": 604800000 # Optional. Default value is - 604800000. The segment_ms specifies the period of time after which the - log will be forced to roll if the segment file isn't full. This ensures - that retention can delete or compact old data. - }, - "name": "str", # Optional. The name of the Kafka topic. - "partitions": [ - { - "consumer_groups": [ - { - "group_name": "str", # Optional. - Name of the consumer group. - "offset": 0 # Optional. The current - offset of the consumer group. - } - ], - "earliest_offset": 0, # Optional. The earliest - consumer offset amongst consumer groups. - "id": 0, # Optional. An identifier for the - partition. - "in_sync_replicas": 0, # Optional. The number of - nodes that are in-sync (have the latest data) for the given - partition. - "size": 0 # Optional. Size of the topic partition in - bytes. - } - ], - "replication_factor": 0, # Optional. The number of nodes to - replicate data across the cluster. - "state": "str" # Optional. The state of the Kafka topic. Known - values are: "active", "configuring", "deleting", and "unknown". - } + "schema": "str", # Optional. The schema definition in the specified format. + "schema_id": 0, # Optional. The id for schema. + "schema_type": "str", # Optional. The type of the schema. Known values are: + "AVRO", "JSON", and "PROTOBUF". + "subject_name": "str", # Optional. The name of the schema subject. + "version": "str" # Optional. The version of the schema. } # response body for status code(s): 404 response == { @@ -131698,253 +139778,112 @@ def update_kafka_topic( tickets to help identify the issue. } """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + 401: cast( + Type[HttpResponseError], + lambda response: ClientAuthenticationError(response=response), + ), + 429: HttpResponseError, + 500: HttpResponseError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[JSON] = kwargs.pop("cls", None) + + _request = build_databases_get_kafka_schema_version_request( + database_cluster_uuid=database_cluster_uuid, + subject_name=subject_name, + version=version, + headers=_headers, + params=_params, + ) + _request.url = self._client.format_url(_request.url) + + _stream = False + pipeline_response: PipelineResponse = ( + self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + ) + + response = pipeline_response.http_response + + if response.status_code not in [200, 404]: + if _stream: + response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore + raise HttpResponseError(response=response) + + response_headers = {} + if response.status_code == 200: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.content: + deserialized = response.json() + else: + deserialized = None + + if response.status_code == 404: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.content: + deserialized = response.json() + else: + deserialized = None + + if cls: + return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore + + return cast(JSON, deserialized) # type: ignore @distributed_trace - def update_kafka_topic( - self, - database_cluster_uuid: str, - topic_name: str, - body: Optional[Union[JSON, IO[bytes]]] = None, - **kwargs: Any, + def get_kafka_schema_config( + self, database_cluster_uuid: str, **kwargs: Any ) -> JSON: # pylint: disable=line-too-long - """Update Topic for a Kafka Cluster. - - To update a topic attached to a Kafka cluster, send a PUT request to - ``/v2/databases/$DATABASE_ID/topics/$TOPIC_NAME``. + """Retrieve Schema Registry Configuration for a kafka Cluster. - The result will be a JSON object with a ``topic`` key. + To retrieve the Schema Registry configuration for a Kafka cluster, send a GET request to + ``/v2/databases/$DATABASE_ID/schema-registry/config``. + The response is a JSON object with a ``compatibility_level`` key, which is set to an object + containing any database configuration parameters. :param database_cluster_uuid: A unique identifier for a database cluster. Required. :type database_cluster_uuid: str - :param topic_name: The name used to identify the Kafka topic. Required. - :type topic_name: str - :param body: Is either a JSON type or a IO[bytes] type. Default value is None. - :type body: JSON or IO[bytes] :return: JSON object :rtype: JSON :raises ~azure.core.exceptions.HttpResponseError: Example: .. code-block:: python - - # JSON input template you can fill out and use as your body input. - body = { - "config": { - "cleanup_policy": "delete", # Optional. Default value is "delete". - The cleanup_policy sets the retention policy to use on log segments. 'delete' - will discard old segments when retention time/size limits are reached. - 'compact' will enable log compaction, resulting in retention of the latest - value for each key. Known values are: "delete", "compact", and - "compact_delete". - "compression_type": "producer", # Optional. Default value is - "producer". The compression_type specifies the compression type of the topic. - Known values are: "producer", "gzip", "snappy", "Iz4", "zstd", and - "uncompressed". - "delete_retention_ms": 86400000, # Optional. Default value is - 86400000. The delete_retention_ms specifies how long (in ms) to retain delete - tombstone markers for topics. - "file_delete_delay_ms": 60000, # Optional. Default value is 60000. - The file_delete_delay_ms specifies the time (in ms) to wait before deleting a - file from the filesystem. - "flush_messages": 9223372036854776000, # Optional. Default value is - 9223372036854776000. The flush_messages specifies the number of messages to - accumulate on a log partition before messages are flushed to disk. - "flush_ms": 9223372036854776000, # Optional. Default value is - 9223372036854776000. The flush_ms specifies the maximum time (in ms) that a - message is kept in memory before being flushed to disk. - "index_interval_bytes": 4096, # Optional. Default value is 4096. The - index_interval_bytes specifies the number of bytes between entries being - added into te offset index. - "max_compaction_lag_ms": 9223372036854776000, # Optional. Default - value is 9223372036854776000. The max_compaction_lag_ms specifies the maximum - amount of time (in ms) that a message will remain uncompacted. This is only - applicable if the logs are have compaction enabled. - "max_message_bytes": 1048588, # Optional. Default value is 1048588. - The max_messages_bytes specifies the largest record batch size (in bytes) - that can be sent to the server. This is calculated after compression if - compression is enabled. - "message_down_conversion_enable": True, # Optional. Default value is - True. The message_down_conversion_enable specifies whether down-conversion of - message formats is enabled to satisfy consumer requests. When 'false', the - broker will not perform conversion for consumers expecting older message - formats. The broker will respond with an ``UNSUPPORTED_VERSION`` error for - consume requests from these older clients. - "message_format_version": "3.0-IV1", # Optional. Default value is - "3.0-IV1". The message_format_version specifies the message format version - used by the broker to append messages to the logs. The value of this setting - is assumed to be 3.0-IV1 if the broker protocol version is 3.0 or higher. By - setting a particular message format version, all existing messages on disk - must be smaller or equal to the specified version. Known values are: "0.8.0", - "0.8.1", "0.8.2", "0.9.0", "0.10.0-IV0", "0.10.0-IV1", "0.10.1-IV0", - "0.10.1-IV1", "0.10.1-IV2", "0.10.2-IV0", "0.11.0-IV0", "0.11.0-IV1", - "0.11.0-IV2", "1.0-IV0", "1.1-IV0", "2.0-IV0", "2.0-IV1", "2.1-IV0", - "2.1-IV1", "2.1-IV2", "2.2-IV0", "2.2-IV1", "2.3-IV0", "2.3-IV1", "2.4-IV0", - "2.4-IV1", "2.5-IV0", "2.6-IV0", "2.7-IV0", "2.7-IV1", "2.7-IV2", "2.8-IV0", - "2.8-IV1", "3.0-IV0", "3.0-IV1", "3.1-IV0", "3.2-IV0", "3.3-IV0", "3.3-IV1", - "3.3-IV2", and "3.3-IV3". - "message_timestamp_type": "create_time", # Optional. Default value - is "create_time". The message_timestamp_type specifies whether to use the - message create time or log append time as the timestamp on a message. Known - values are: "create_time" and "log_append_time". - "min_cleanable_dirty_ratio": 0.5, # Optional. Default value is 0.5. - The min_cleanable_dirty_ratio specifies the frequency of log compaction (if - enabled) in relation to duplicates present in the logs. For example, at 0.5, - at most 50% of the log could be duplicates before compaction would begin. - "min_compaction_lag_ms": 0, # Optional. Default value is 0. The - min_compaction_lag_ms specifies the minimum time (in ms) that a message will - remain uncompacted in the log. Only relevant if log compaction is enabled. - "min_insync_replicas": 1, # Optional. Default value is 1. The - min_insync_replicas specifies the number of replicas that must ACK a write - for the write to be considered successful. - "preallocate": False, # Optional. Default value is False. The - preallocate specifies whether a file should be preallocated on disk when - creating a new log segment. - "retention_bytes": -1, # Optional. Default value is -1. The - retention_bytes specifies the maximum size of the log (in bytes) before - deleting messages. -1 indicates that there is no limit. - "retention_ms": 604800000, # Optional. Default value is 604800000. - The retention_ms specifies the maximum amount of time (in ms) to keep a - message before deleting it. - "segment_bytes": 209715200, # Optional. Default value is 209715200. - The segment_bytes specifies the maximum size of a single log file (in bytes). - "segment_jitter_ms": 0, # Optional. Default value is 0. The - segment_jitter_ms specifies the maximum random jitter subtracted from the - scheduled segment roll time to avoid thundering herds of segment rolling. - "segment_ms": 604800000 # Optional. Default value is 604800000. The - segment_ms specifies the period of time after which the log will be forced to - roll if the segment file isn't full. This ensures that retention can delete - or compact old data. - }, - "partition_count": 0, # Optional. The number of partitions available for the - topic. On update, this value can only be increased. - "replication_factor": 0 # Optional. The number of nodes to replicate data - across the cluster. - } - - # response body for status code(s): 200 - response == { - "topic": { - "config": { - "cleanup_policy": "delete", # Optional. Default value is - "delete". The cleanup_policy sets the retention policy to use on log - segments. 'delete' will discard old segments when retention time/size - limits are reached. 'compact' will enable log compaction, resulting in - retention of the latest value for each key. Known values are: "delete", - "compact", and "compact_delete". - "compression_type": "producer", # Optional. Default value is - "producer". The compression_type specifies the compression type of the - topic. Known values are: "producer", "gzip", "snappy", "Iz4", "zstd", and - "uncompressed". - "delete_retention_ms": 86400000, # Optional. Default value - is 86400000. The delete_retention_ms specifies how long (in ms) to retain - delete tombstone markers for topics. - "file_delete_delay_ms": 60000, # Optional. Default value is - 60000. The file_delete_delay_ms specifies the time (in ms) to wait before - deleting a file from the filesystem. - "flush_messages": 9223372036854776000, # Optional. Default - value is 9223372036854776000. The flush_messages specifies the number of - messages to accumulate on a log partition before messages are flushed to - disk. - "flush_ms": 9223372036854776000, # Optional. Default value - is 9223372036854776000. The flush_ms specifies the maximum time (in ms) - that a message is kept in memory before being flushed to disk. - "index_interval_bytes": 4096, # Optional. Default value is - 4096. The index_interval_bytes specifies the number of bytes between - entries being added into te offset index. - "max_compaction_lag_ms": 9223372036854776000, # Optional. - Default value is 9223372036854776000. The max_compaction_lag_ms specifies - the maximum amount of time (in ms) that a message will remain - uncompacted. This is only applicable if the logs are have compaction - enabled. - "max_message_bytes": 1048588, # Optional. Default value is - 1048588. The max_messages_bytes specifies the largest record batch size - (in bytes) that can be sent to the server. This is calculated after - compression if compression is enabled. - "message_down_conversion_enable": True, # Optional. Default - value is True. The message_down_conversion_enable specifies whether - down-conversion of message formats is enabled to satisfy consumer - requests. When 'false', the broker will not perform conversion for - consumers expecting older message formats. The broker will respond with - an ``UNSUPPORTED_VERSION`` error for consume requests from these older - clients. - "message_format_version": "3.0-IV1", # Optional. Default - value is "3.0-IV1". The message_format_version specifies the message - format version used by the broker to append messages to the logs. The - value of this setting is assumed to be 3.0-IV1 if the broker protocol - version is 3.0 or higher. By setting a particular message format - version, all existing messages on disk must be smaller or equal to the - specified version. Known values are: "0.8.0", "0.8.1", "0.8.2", "0.9.0", - "0.10.0-IV0", "0.10.0-IV1", "0.10.1-IV0", "0.10.1-IV1", "0.10.1-IV2", - "0.10.2-IV0", "0.11.0-IV0", "0.11.0-IV1", "0.11.0-IV2", "1.0-IV0", - "1.1-IV0", "2.0-IV0", "2.0-IV1", "2.1-IV0", "2.1-IV1", "2.1-IV2", - "2.2-IV0", "2.2-IV1", "2.3-IV0", "2.3-IV1", "2.4-IV0", "2.4-IV1", - "2.5-IV0", "2.6-IV0", "2.7-IV0", "2.7-IV1", "2.7-IV2", "2.8-IV0", - "2.8-IV1", "3.0-IV0", "3.0-IV1", "3.1-IV0", "3.2-IV0", "3.3-IV0", - "3.3-IV1", "3.3-IV2", and "3.3-IV3". - "message_timestamp_type": "create_time", # Optional. Default - value is "create_time". The message_timestamp_type specifies whether to - use the message create time or log append time as the timestamp on a - message. Known values are: "create_time" and "log_append_time". - "min_cleanable_dirty_ratio": 0.5, # Optional. Default value - is 0.5. The min_cleanable_dirty_ratio specifies the frequency of log - compaction (if enabled) in relation to duplicates present in the logs. - For example, at 0.5, at most 50% of the log could be duplicates before - compaction would begin. - "min_compaction_lag_ms": 0, # Optional. Default value is 0. - The min_compaction_lag_ms specifies the minimum time (in ms) that a - message will remain uncompacted in the log. Only relevant if log - compaction is enabled. - "min_insync_replicas": 1, # Optional. Default value is 1. - The min_insync_replicas specifies the number of replicas that must ACK a - write for the write to be considered successful. - "preallocate": False, # Optional. Default value is False. - The preallocate specifies whether a file should be preallocated on disk - when creating a new log segment. - "retention_bytes": -1, # Optional. Default value is -1. The - retention_bytes specifies the maximum size of the log (in bytes) before - deleting messages. -1 indicates that there is no limit. - "retention_ms": 604800000, # Optional. Default value is - 604800000. The retention_ms specifies the maximum amount of time (in ms) - to keep a message before deleting it. - "segment_bytes": 209715200, # Optional. Default value is - 209715200. The segment_bytes specifies the maximum size of a single log - file (in bytes). - "segment_jitter_ms": 0, # Optional. Default value is 0. The - segment_jitter_ms specifies the maximum random jitter subtracted from the - scheduled segment roll time to avoid thundering herds of segment rolling. - "segment_ms": 604800000 # Optional. Default value is - 604800000. The segment_ms specifies the period of time after which the - log will be forced to roll if the segment file isn't full. This ensures - that retention can delete or compact old data. - }, - "name": "str", # Optional. The name of the Kafka topic. - "partitions": [ - { - "consumer_groups": [ - { - "group_name": "str", # Optional. - Name of the consumer group. - "offset": 0 # Optional. The current - offset of the consumer group. - } - ], - "earliest_offset": 0, # Optional. The earliest - consumer offset amongst consumer groups. - "id": 0, # Optional. An identifier for the - partition. - "in_sync_replicas": 0, # Optional. The number of - nodes that are in-sync (have the latest data) for the given - partition. - "size": 0 # Optional. Size of the topic partition in - bytes. - } - ], - "replication_factor": 0, # Optional. The number of nodes to - replicate data across the cluster. - "state": "str" # Optional. The state of the Kafka topic. Known - values are: "active", "configuring", "deleting", and "unknown". - } + + # response body for status code(s): 200 + response == { + "compatibility_level": "str" # The compatibility level of the schema + registry. Required. Known values are: "NONE", "BACKWARD", "BACKWARD_TRANSITIVE", + "FORWARD", "FORWARD_TRANSITIVE", "FULL", and "FULL_TRANSITIVE". } # response body for status code(s): 404 response == { @@ -131971,31 +139910,13 @@ def update_kafka_topic( } error_map.update(kwargs.pop("error_map", {}) or {}) - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _headers = kwargs.pop("headers", {}) or {} _params = kwargs.pop("params", {}) or {} - content_type: Optional[str] = kwargs.pop( - "content_type", _headers.pop("Content-Type", None) - ) cls: ClsType[JSON] = kwargs.pop("cls", None) - content_type = content_type or "application/json" - _json = None - _content = None - if isinstance(body, (IOBase, bytes)): - _content = body - else: - if body is not None: - _json = body - else: - _json = None - - _request = build_databases_update_kafka_topic_request( + _request = build_databases_get_kafka_schema_config_request( database_cluster_uuid=database_cluster_uuid, - topic_name=topic_name, - content_type=content_type, - json=_json, - content=_content, headers=_headers, params=_params, ) @@ -132054,30 +139975,152 @@ def update_kafka_topic( return cast(JSON, deserialized) # type: ignore - @distributed_trace - def delete_kafka_topic( - self, database_cluster_uuid: str, topic_name: str, **kwargs: Any - ) -> Optional[JSON]: + @overload + def update_kafka_schema_config( + self, + database_cluster_uuid: str, + body: Optional[JSON] = None, + *, + content_type: str = "application/json", + **kwargs: Any, + ) -> JSON: # pylint: disable=line-too-long - """Delete Topic for a Kafka Cluster. + """Update Schema Registry Configuration for a kafka Cluster. - To delete a single topic within a Kafka cluster, send a DELETE request - to ``/v2/databases/$DATABASE_ID/topics/$TOPIC_NAME``. + To update the Schema Registry configuration for a Kafka cluster, send a PUT request to + ``/v2/databases/$DATABASE_ID/schema-registry/config``. + The response is a JSON object with a ``compatibility_level`` key, which is set to an object + containing any database configuration parameters. - A status of 204 will be given. This indicates that the request was - processed successfully, but that no response body is needed. + :param database_cluster_uuid: A unique identifier for a database cluster. Required. + :type database_cluster_uuid: str + :param body: Default value is None. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: JSON object + :rtype: JSON + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "compatibility_level": "str" # The compatibility level of the schema + registry. Required. Known values are: "NONE", "BACKWARD", "BACKWARD_TRANSITIVE", + "FORWARD", "FORWARD_TRANSITIVE", "FULL", and "FULL_TRANSITIVE". + } + + # response body for status code(s): 200 + response == { + "compatibility_level": "str" # The compatibility level of the schema + registry. Required. Known values are: "NONE", "BACKWARD", "BACKWARD_TRANSITIVE", + "FORWARD", "FORWARD_TRANSITIVE", "FULL", and "FULL_TRANSITIVE". + } + # response body for status code(s): 404 + response == { + "id": "str", # A short identifier corresponding to the HTTP status code + returned. For example, the ID for a response returning a 404 status code would + be "not_found.". Required. + "message": "str", # A message providing additional information about the + error, including details to help resolve it when possible. Required. + "request_id": "str" # Optional. Optionally, some endpoints may include a + request ID that should be provided when reporting bugs or opening support + tickets to help identify the issue. + } + """ + + @overload + def update_kafka_schema_config( + self, + database_cluster_uuid: str, + body: Optional[IO[bytes]] = None, + *, + content_type: str = "application/json", + **kwargs: Any, + ) -> JSON: + # pylint: disable=line-too-long + """Update Schema Registry Configuration for a kafka Cluster. + + To update the Schema Registry configuration for a Kafka cluster, send a PUT request to + ``/v2/databases/$DATABASE_ID/schema-registry/config``. + The response is a JSON object with a ``compatibility_level`` key, which is set to an object + containing any database configuration parameters. :param database_cluster_uuid: A unique identifier for a database cluster. Required. :type database_cluster_uuid: str - :param topic_name: The name used to identify the Kafka topic. Required. - :type topic_name: str - :return: JSON object or None - :rtype: JSON or None + :param body: Default value is None. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: JSON object + :rtype: JSON + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 200 + response == { + "compatibility_level": "str" # The compatibility level of the schema + registry. Required. Known values are: "NONE", "BACKWARD", "BACKWARD_TRANSITIVE", + "FORWARD", "FORWARD_TRANSITIVE", "FULL", and "FULL_TRANSITIVE". + } + # response body for status code(s): 404 + response == { + "id": "str", # A short identifier corresponding to the HTTP status code + returned. For example, the ID for a response returning a 404 status code would + be "not_found.". Required. + "message": "str", # A message providing additional information about the + error, including details to help resolve it when possible. Required. + "request_id": "str" # Optional. Optionally, some endpoints may include a + request ID that should be provided when reporting bugs or opening support + tickets to help identify the issue. + } + """ + + @distributed_trace + def update_kafka_schema_config( + self, + database_cluster_uuid: str, + body: Optional[Union[JSON, IO[bytes]]] = None, + **kwargs: Any, + ) -> JSON: + # pylint: disable=line-too-long + """Update Schema Registry Configuration for a kafka Cluster. + + To update the Schema Registry configuration for a Kafka cluster, send a PUT request to + ``/v2/databases/$DATABASE_ID/schema-registry/config``. + The response is a JSON object with a ``compatibility_level`` key, which is set to an object + containing any database configuration parameters. + + :param database_cluster_uuid: A unique identifier for a database cluster. Required. + :type database_cluster_uuid: str + :param body: Is either a JSON type or a IO[bytes] type. Default value is None. + :type body: JSON or IO[bytes] + :return: JSON object + :rtype: JSON :raises ~azure.core.exceptions.HttpResponseError: Example: .. code-block:: python + # JSON input template you can fill out and use as your body input. + body = { + "compatibility_level": "str" # The compatibility level of the schema + registry. Required. Known values are: "NONE", "BACKWARD", "BACKWARD_TRANSITIVE", + "FORWARD", "FORWARD_TRANSITIVE", "FULL", and "FULL_TRANSITIVE". + } + + # response body for status code(s): 200 + response == { + "compatibility_level": "str" # The compatibility level of the schema + registry. Required. Known values are: "NONE", "BACKWARD", "BACKWARD_TRANSITIVE", + "FORWARD", "FORWARD_TRANSITIVE", "FULL", and "FULL_TRANSITIVE". + } # response body for status code(s): 404 response == { "id": "str", # A short identifier corresponding to the HTTP status code @@ -132103,14 +140146,30 @@ def delete_kafka_topic( } error_map.update(kwargs.pop("error_map", {}) or {}) - _headers = kwargs.pop("headers", {}) or {} + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = kwargs.pop("params", {}) or {} - cls: ClsType[Optional[JSON]] = kwargs.pop("cls", None) + content_type: Optional[str] = kwargs.pop( + "content_type", _headers.pop("Content-Type", None) + ) + cls: ClsType[JSON] = kwargs.pop("cls", None) - _request = build_databases_delete_kafka_topic_request( + content_type = content_type or "application/json" + _json = None + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + if body is not None: + _json = body + else: + _json = None + + _request = build_databases_update_kafka_schema_config_request( database_cluster_uuid=database_cluster_uuid, - topic_name=topic_name, + content_type=content_type, + json=_json, + content=_content, headers=_headers, params=_params, ) @@ -132125,15 +140184,14 @@ def delete_kafka_topic( response = pipeline_response.http_response - if response.status_code not in [204, 404]: + if response.status_code not in [200, 404]: if _stream: response.read() # Load the body in memory and close the socket map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore raise HttpResponseError(response=response) - deserialized = None response_headers = {} - if response.status_code == 204: + if response.status_code == 200: response_headers["ratelimit-limit"] = self._deserialize( "int", response.headers.get("ratelimit-limit") ) @@ -132144,6 +140202,11 @@ def delete_kafka_topic( "int", response.headers.get("ratelimit-reset") ) + if response.content: + deserialized = response.json() + else: + deserialized = None + if response.status_code == 404: response_headers["ratelimit-limit"] = self._deserialize( "int", response.headers.get("ratelimit-limit") @@ -132161,20 +140224,27 @@ def delete_kafka_topic( deserialized = None if cls: - return cls(pipeline_response, deserialized, response_headers) # type: ignore + return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore - return deserialized # type: ignore + return cast(JSON, deserialized) # type: ignore @distributed_trace - def list_logsink(self, database_cluster_uuid: str, **kwargs: Any) -> JSON: + def get_kafka_schema_subject_config( + self, database_cluster_uuid: str, subject_name: str, **kwargs: Any + ) -> JSON: # pylint: disable=line-too-long - """List Logsinks for a Database Cluster. + """Retrieve Schema Registry Configuration for a Subject of kafka Cluster. - To list logsinks for a database cluster, send a GET request to - ``/v2/databases/$DATABASE_ID/logsink``. + To retrieve the Schema Registry configuration for a Subject of a Kafka cluster, send a GET + request to + ``/v2/databases/$DATABASE_ID/schema-registry/config/$SUBJECT_NAME``. + The response is a JSON object with a ``compatibility_level`` key, which is set to an object + containing any database configuration parameters. :param database_cluster_uuid: A unique identifier for a database cluster. Required. :type database_cluster_uuid: str + :param subject_name: The name of the Kafka schema subject. Required. + :type subject_name: str :return: JSON object :rtype: JSON :raises ~azure.core.exceptions.HttpResponseError: @@ -132184,16 +140254,10 @@ def list_logsink(self, database_cluster_uuid: str, **kwargs: Any) -> JSON: # response body for status code(s): 200 response == { - "sinks": [ - { - "config": {}, - "sink_id": "str", # Optional. A unique identifier for - Logsink. - "sink_name": "str", # Optional. The name of the Logsink. - "sink_type": "str" # Optional. Known values are: "rsyslog", - "elasticsearch", and "opensearch". - } - ] + "compatibility_level": "str", # The compatibility level of the schema + registry. Required. Known values are: "NONE", "BACKWARD", "BACKWARD_TRANSITIVE", + "FORWARD", "FORWARD_TRANSITIVE", "FULL", and "FULL_TRANSITIVE". + "subject_name": "str" # The name of the schema subject. Required. } # response body for status code(s): 404 response == { @@ -132225,8 +140289,9 @@ def list_logsink(self, database_cluster_uuid: str, **kwargs: Any) -> JSON: cls: ClsType[JSON] = kwargs.pop("cls", None) - _request = build_databases_list_logsink_request( + _request = build_databases_get_kafka_schema_subject_config_request( database_cluster_uuid=database_cluster_uuid, + subject_name=subject_name, headers=_headers, params=_params, ) @@ -132286,23 +140351,29 @@ def list_logsink(self, database_cluster_uuid: str, **kwargs: Any) -> JSON: return cast(JSON, deserialized) # type: ignore @overload - def create_logsink( + def update_kafka_schema_subject_config( self, database_cluster_uuid: str, - body: JSON, + subject_name: str, + body: Optional[JSON] = None, *, content_type: str = "application/json", **kwargs: Any, ) -> JSON: # pylint: disable=line-too-long - """Create Logsink for a Database Cluster. + """Update Schema Registry Configuration for a Subject of kafka Cluster. - To create logsink for a database cluster, send a POST request to - ``/v2/databases/$DATABASE_ID/logsink``. + To update the Schema Registry configuration for a Subject of a Kafka cluster, send a PUT + request to + ``/v2/databases/$DATABASE_ID/schema-registry/config/$SUBJECT_NAME``. + The response is a JSON object with a ``compatibility_level`` key, which is set to an object + containing any database configuration parameters. :param database_cluster_uuid: A unique identifier for a database cluster. Required. :type database_cluster_uuid: str - :param body: Required. + :param subject_name: The name of the Kafka schema subject. Required. + :type subject_name: str + :param body: Default value is None. :type body: JSON :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". @@ -132316,25 +140387,17 @@ def create_logsink( # JSON input template you can fill out and use as your body input. body = { - "config": {}, - "sink_name": "str", # Optional. The name of the Logsink. - "sink_type": "str" # Optional. Type of logsink integration. * Use - ``datadog`` for Datadog integration **only with MongoDB clusters**. * For - non-MongoDB clusters, use ``rsyslog`` for general syslog forwarding. * Other - supported types include ``elasticsearch`` and ``opensearch``. More details about - the configuration can be found in the ``config`` property. Known values are: - "rsyslog", "elasticsearch", "opensearch", and "datadog". + "compatibility_level": "str" # The compatibility level of the schema + registry. Required. Known values are: "NONE", "BACKWARD", "BACKWARD_TRANSITIVE", + "FORWARD", "FORWARD_TRANSITIVE", "FULL", and "FULL_TRANSITIVE". } - # response body for status code(s): 201 + # response body for status code(s): 200 response == { - "sink": { - "config": {}, - "sink_id": "str", # Optional. A unique identifier for Logsink. - "sink_name": "str", # Optional. The name of the Logsink. - "sink_type": "str" # Optional. Known values are: "rsyslog", - "elasticsearch", and "opensearch". - } + "compatibility_level": "str", # The compatibility level of the schema + registry. Required. Known values are: "NONE", "BACKWARD", "BACKWARD_TRANSITIVE", + "FORWARD", "FORWARD_TRANSITIVE", "FULL", and "FULL_TRANSITIVE". + "subject_name": "str" # The name of the schema subject. Required. } # response body for status code(s): 404 response == { @@ -132350,23 +140413,29 @@ def create_logsink( """ @overload - def create_logsink( + def update_kafka_schema_subject_config( self, database_cluster_uuid: str, - body: IO[bytes], + subject_name: str, + body: Optional[IO[bytes]] = None, *, content_type: str = "application/json", **kwargs: Any, ) -> JSON: # pylint: disable=line-too-long - """Create Logsink for a Database Cluster. + """Update Schema Registry Configuration for a Subject of kafka Cluster. - To create logsink for a database cluster, send a POST request to - ``/v2/databases/$DATABASE_ID/logsink``. + To update the Schema Registry configuration for a Subject of a Kafka cluster, send a PUT + request to + ``/v2/databases/$DATABASE_ID/schema-registry/config/$SUBJECT_NAME``. + The response is a JSON object with a ``compatibility_level`` key, which is set to an object + containing any database configuration parameters. :param database_cluster_uuid: A unique identifier for a database cluster. Required. :type database_cluster_uuid: str - :param body: Required. + :param subject_name: The name of the Kafka schema subject. Required. + :type subject_name: str + :param body: Default value is None. :type body: IO[bytes] :keyword content_type: Body Parameter content-type. Content type parameter for binary body. Default value is "application/json". @@ -132378,15 +140447,12 @@ def create_logsink( Example: .. code-block:: python - # response body for status code(s): 201 + # response body for status code(s): 200 response == { - "sink": { - "config": {}, - "sink_id": "str", # Optional. A unique identifier for Logsink. - "sink_name": "str", # Optional. The name of the Logsink. - "sink_type": "str" # Optional. Known values are: "rsyslog", - "elasticsearch", and "opensearch". - } + "compatibility_level": "str", # The compatibility level of the schema + registry. Required. Known values are: "NONE", "BACKWARD", "BACKWARD_TRANSITIVE", + "FORWARD", "FORWARD_TRANSITIVE", "FULL", and "FULL_TRANSITIVE". + "subject_name": "str" # The name of the schema subject. Required. } # response body for status code(s): 404 response == { @@ -132402,18 +140468,27 @@ def create_logsink( """ @distributed_trace - def create_logsink( - self, database_cluster_uuid: str, body: Union[JSON, IO[bytes]], **kwargs: Any + def update_kafka_schema_subject_config( + self, + database_cluster_uuid: str, + subject_name: str, + body: Optional[Union[JSON, IO[bytes]]] = None, + **kwargs: Any, ) -> JSON: # pylint: disable=line-too-long - """Create Logsink for a Database Cluster. + """Update Schema Registry Configuration for a Subject of kafka Cluster. - To create logsink for a database cluster, send a POST request to - ``/v2/databases/$DATABASE_ID/logsink``. + To update the Schema Registry configuration for a Subject of a Kafka cluster, send a PUT + request to + ``/v2/databases/$DATABASE_ID/schema-registry/config/$SUBJECT_NAME``. + The response is a JSON object with a ``compatibility_level`` key, which is set to an object + containing any database configuration parameters. :param database_cluster_uuid: A unique identifier for a database cluster. Required. :type database_cluster_uuid: str - :param body: Is either a JSON type or a IO[bytes] type. Required. + :param subject_name: The name of the Kafka schema subject. Required. + :type subject_name: str + :param body: Is either a JSON type or a IO[bytes] type. Default value is None. :type body: JSON or IO[bytes] :return: JSON object :rtype: JSON @@ -132424,25 +140499,17 @@ def create_logsink( # JSON input template you can fill out and use as your body input. body = { - "config": {}, - "sink_name": "str", # Optional. The name of the Logsink. - "sink_type": "str" # Optional. Type of logsink integration. * Use - ``datadog`` for Datadog integration **only with MongoDB clusters**. * For - non-MongoDB clusters, use ``rsyslog`` for general syslog forwarding. * Other - supported types include ``elasticsearch`` and ``opensearch``. More details about - the configuration can be found in the ``config`` property. Known values are: - "rsyslog", "elasticsearch", "opensearch", and "datadog". + "compatibility_level": "str" # The compatibility level of the schema + registry. Required. Known values are: "NONE", "BACKWARD", "BACKWARD_TRANSITIVE", + "FORWARD", "FORWARD_TRANSITIVE", "FULL", and "FULL_TRANSITIVE". } - # response body for status code(s): 201 + # response body for status code(s): 200 response == { - "sink": { - "config": {}, - "sink_id": "str", # Optional. A unique identifier for Logsink. - "sink_name": "str", # Optional. The name of the Logsink. - "sink_type": "str" # Optional. Known values are: "rsyslog", - "elasticsearch", and "opensearch". - } + "compatibility_level": "str", # The compatibility level of the schema + registry. Required. Known values are: "NONE", "BACKWARD", "BACKWARD_TRANSITIVE", + "FORWARD", "FORWARD_TRANSITIVE", "FULL", and "FULL_TRANSITIVE". + "subject_name": "str" # The name of the schema subject. Required. } # response body for status code(s): 404 response == { @@ -132483,10 +140550,14 @@ def create_logsink( if isinstance(body, (IOBase, bytes)): _content = body else: - _json = body + if body is not None: + _json = body + else: + _json = None - _request = build_databases_create_logsink_request( + _request = build_databases_update_kafka_schema_subject_config_request( database_cluster_uuid=database_cluster_uuid, + subject_name=subject_name, content_type=content_type, json=_json, content=_content, @@ -132504,14 +140575,14 @@ def create_logsink( response = pipeline_response.http_response - if response.status_code not in [201, 404]: + if response.status_code not in [200, 404]: if _stream: response.read() # Load the body in memory and close the socket map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore raise HttpResponseError(response=response) response_headers = {} - if response.status_code == 201: + if response.status_code == 200: response_headers["ratelimit-limit"] = self._deserialize( "int", response.headers.get("ratelimit-limit") ) @@ -132549,19 +140620,14 @@ def create_logsink( return cast(JSON, deserialized) # type: ignore @distributed_trace - def get_logsink( - self, database_cluster_uuid: str, logsink_id: str, **kwargs: Any - ) -> JSON: + def get_cluster_metrics_credentials(self, **kwargs: Any) -> JSON: # pylint: disable=line-too-long - """Get Logsink for a Database Cluster. + """Retrieve Database Clusters' Metrics Endpoint Credentials. - To get a logsink for a database cluster, send a GET request to - ``/v2/databases/$DATABASE_ID/logsink/$LOGSINK_ID``. + To show the credentials for all database clusters' metrics endpoints, send a GET request to + ``/v2/databases/metrics/credentials``. The result will be a JSON object with a ``credentials`` + key. - :param database_cluster_uuid: A unique identifier for a database cluster. Required. - :type database_cluster_uuid: str - :param logsink_id: A unique identifier for a logsink of a database cluster. Required. - :type logsink_id: str :return: JSON object :rtype: JSON :raises ~azure.core.exceptions.HttpResponseError: @@ -132571,11 +140637,14 @@ def get_logsink( # response body for status code(s): 200 response == { - "config": {}, - "sink_id": "str", # Optional. A unique identifier for Logsink. - "sink_name": "str", # Optional. The name of the Logsink. - "sink_type": "str" # Optional. Known values are: "rsyslog", "elasticsearch", - and "opensearch". + "credentials": { + "credentials": { + "basic_auth_password": "str", # Optional. basic + authentication password for metrics HTTP endpoint. + "basic_auth_username": "str" # Optional. basic + authentication username for metrics HTTP endpoint. + } + } } # response body for status code(s): 404 response == { @@ -132607,9 +140676,7 @@ def get_logsink( cls: ClsType[JSON] = kwargs.pop("cls", None) - _request = build_databases_get_logsink_request( - database_cluster_uuid=database_cluster_uuid, - logsink_id=logsink_id, + _request = build_databases_get_cluster_metrics_credentials_request( headers=_headers, params=_params, ) @@ -132669,32 +140736,26 @@ def get_logsink( return cast(JSON, deserialized) # type: ignore @overload - def update_logsink( + def update_cluster_metrics_credentials( # pylint: disable=inconsistent-return-statements self, - database_cluster_uuid: str, - logsink_id: str, - body: JSON, + body: Optional[JSON] = None, *, content_type: str = "application/json", **kwargs: Any, - ) -> Optional[JSON]: - # pylint: disable=line-too-long - """Update Logsink for a Database Cluster. + ) -> None: + """Update Database Clusters' Metrics Endpoint Credentials. - To update a logsink for a database cluster, send a PUT request to - ``/v2/databases/$DATABASE_ID/logsink/$LOGSINK_ID``. + To update the credentials for all database clusters' metrics endpoints, send a PUT request to + ``/v2/databases/metrics/credentials``. A successful request will receive a 204 No Content + status code with no body in response. - :param database_cluster_uuid: A unique identifier for a database cluster. Required. - :type database_cluster_uuid: str - :param logsink_id: A unique identifier for a logsink of a database cluster. Required. - :type logsink_id: str - :param body: Required. + :param body: Default value is None. :type body: JSON :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str - :return: JSON object or None - :rtype: JSON or None + :return: None + :rtype: None :raises ~azure.core.exceptions.HttpResponseError: Example: @@ -132702,99 +140763,179 @@ def update_logsink( # JSON input template you can fill out and use as your body input. body = { - "config": {} - } - - # response body for status code(s): 404 - response == { - "id": "str", # A short identifier corresponding to the HTTP status code - returned. For example, the ID for a response returning a 404 status code would - be "not_found.". Required. - "message": "str", # A message providing additional information about the - error, including details to help resolve it when possible. Required. - "request_id": "str" # Optional. Optionally, some endpoints may include a - request ID that should be provided when reporting bugs or opening support - tickets to help identify the issue. + "credentials": { + "basic_auth_password": "str", # Optional. basic authentication + password for metrics HTTP endpoint. + "basic_auth_username": "str" # Optional. basic authentication + username for metrics HTTP endpoint. + } } """ @overload - def update_logsink( + def update_cluster_metrics_credentials( # pylint: disable=inconsistent-return-statements self, - database_cluster_uuid: str, - logsink_id: str, - body: IO[bytes], + body: Optional[IO[bytes]] = None, *, content_type: str = "application/json", **kwargs: Any, - ) -> Optional[JSON]: - # pylint: disable=line-too-long - """Update Logsink for a Database Cluster. + ) -> None: + """Update Database Clusters' Metrics Endpoint Credentials. - To update a logsink for a database cluster, send a PUT request to - ``/v2/databases/$DATABASE_ID/logsink/$LOGSINK_ID``. + To update the credentials for all database clusters' metrics endpoints, send a PUT request to + ``/v2/databases/metrics/credentials``. A successful request will receive a 204 No Content + status code with no body in response. - :param database_cluster_uuid: A unique identifier for a database cluster. Required. - :type database_cluster_uuid: str - :param logsink_id: A unique identifier for a logsink of a database cluster. Required. - :type logsink_id: str - :param body: Required. + :param body: Default value is None. :type body: IO[bytes] :keyword content_type: Body Parameter content-type. Content type parameter for binary body. Default value is "application/json". :paramtype content_type: str - :return: JSON object or None - :rtype: JSON or None + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace + def update_cluster_metrics_credentials( # pylint: disable=inconsistent-return-statements + self, body: Optional[Union[JSON, IO[bytes]]] = None, **kwargs: Any + ) -> None: + """Update Database Clusters' Metrics Endpoint Credentials. + + To update the credentials for all database clusters' metrics endpoints, send a PUT request to + ``/v2/databases/metrics/credentials``. A successful request will receive a 204 No Content + status code with no body in response. + + :param body: Is either a JSON type or a IO[bytes] type. Default value is None. + :type body: JSON or IO[bytes] + :return: None + :rtype: None :raises ~azure.core.exceptions.HttpResponseError: Example: .. code-block:: python - # response body for status code(s): 404 - response == { - "id": "str", # A short identifier corresponding to the HTTP status code - returned. For example, the ID for a response returning a 404 status code would - be "not_found.". Required. - "message": "str", # A message providing additional information about the - error, including details to help resolve it when possible. Required. - "request_id": "str" # Optional. Optionally, some endpoints may include a - request ID that should be provided when reporting bugs or opening support - tickets to help identify the issue. + # JSON input template you can fill out and use as your body input. + body = { + "credentials": { + "basic_auth_password": "str", # Optional. basic authentication + password for metrics HTTP endpoint. + "basic_auth_username": "str" # Optional. basic authentication + username for metrics HTTP endpoint. + } } """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + 401: cast( + Type[HttpResponseError], + lambda response: ClientAuthenticationError(response=response), + ), + 429: HttpResponseError, + 500: HttpResponseError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop( + "content_type", _headers.pop("Content-Type", None) + ) + cls: ClsType[None] = kwargs.pop("cls", None) + + content_type = content_type or "application/json" + _json = None + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + if body is not None: + _json = body + else: + _json = None + + _request = build_databases_update_cluster_metrics_credentials_request( + content_type=content_type, + json=_json, + content=_content, + headers=_headers, + params=_params, + ) + _request.url = self._client.format_url(_request.url) + + _stream = False + pipeline_response: PipelineResponse = ( + self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + ) + + response = pipeline_response.http_response + + if response.status_code not in [204]: + if _stream: + response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore + raise HttpResponseError(response=response) + + response_headers = {} + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if cls: + return cls(pipeline_response, None, response_headers) # type: ignore @distributed_trace - def update_logsink( - self, - database_cluster_uuid: str, - logsink_id: str, - body: Union[JSON, IO[bytes]], - **kwargs: Any, - ) -> Optional[JSON]: + def list_opeasearch_indexes( + self, database_cluster_uuid: str, **kwargs: Any + ) -> JSON: # pylint: disable=line-too-long - """Update Logsink for a Database Cluster. + """List Indexes for a OpenSearch Cluster. - To update a logsink for a database cluster, send a PUT request to - ``/v2/databases/$DATABASE_ID/logsink/$LOGSINK_ID``. + To list all of a OpenSearch cluster's indexes, send a GET request to + ``/v2/databases/$DATABASE_ID/indexes``. + + The result will be a JSON object with a ``indexes`` key. :param database_cluster_uuid: A unique identifier for a database cluster. Required. :type database_cluster_uuid: str - :param logsink_id: A unique identifier for a logsink of a database cluster. Required. - :type logsink_id: str - :param body: Is either a JSON type or a IO[bytes] type. Required. - :type body: JSON or IO[bytes] - :return: JSON object or None - :rtype: JSON or None + :return: JSON object + :rtype: JSON :raises ~azure.core.exceptions.HttpResponseError: Example: .. code-block:: python - # JSON input template you can fill out and use as your body input. - body = { - "config": {} + # response body for status code(s): 200 + response == { + "indexes": [ + { + "created_time": "2020-02-20 00:00:00", # Optional. The date + and time the index was created. + "health": "str", # Optional. The health of the OpenSearch + index. Known values are: "unknown", "green", "yellow", "red", and "red*". + "index_name": "str", # Optional. The name of the opensearch + index. + "number_of_replicas": 0, # Optional. The number of replicas + for the index. + "number_of_shards": 0, # Optional. The number of shards for + the index. + "size": 0, # Optional. The size of the index. + "status": "str" # Optional. The status of the OpenSearch + index. Known values are: "unknown", "open", "close", and "none". + } + ] } - # response body for status code(s): 404 response == { "id": "str", # A short identifier corresponding to the HTTP status code @@ -132820,28 +140961,13 @@ def update_logsink( } error_map.update(kwargs.pop("error_map", {}) or {}) - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _headers = kwargs.pop("headers", {}) or {} _params = kwargs.pop("params", {}) or {} - content_type: Optional[str] = kwargs.pop( - "content_type", _headers.pop("Content-Type", None) - ) - cls: ClsType[Optional[JSON]] = kwargs.pop("cls", None) - - content_type = content_type or "application/json" - _json = None - _content = None - if isinstance(body, (IOBase, bytes)): - _content = body - else: - _json = body + cls: ClsType[JSON] = kwargs.pop("cls", None) - _request = build_databases_update_logsink_request( + _request = build_databases_list_opeasearch_indexes_request( database_cluster_uuid=database_cluster_uuid, - logsink_id=logsink_id, - content_type=content_type, - json=_json, - content=_content, headers=_headers, params=_params, ) @@ -132862,7 +140988,6 @@ def update_logsink( map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore raise HttpResponseError(response=response) - deserialized = None response_headers = {} if response.status_code == 200: response_headers["ratelimit-limit"] = self._deserialize( @@ -132875,6 +141000,11 @@ def update_logsink( "int", response.headers.get("ratelimit-reset") ) + if response.content: + deserialized = response.json() + else: + deserialized = None + if response.status_code == 404: response_headers["ratelimit-limit"] = self._deserialize( "int", response.headers.get("ratelimit-limit") @@ -132892,24 +141022,27 @@ def update_logsink( deserialized = None if cls: - return cls(pipeline_response, deserialized, response_headers) # type: ignore + return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore - return deserialized # type: ignore + return cast(JSON, deserialized) # type: ignore @distributed_trace - def delete_logsink( - self, database_cluster_uuid: str, logsink_id: str, **kwargs: Any + def delete_opensearch_index( + self, database_cluster_uuid: str, index_name: str, **kwargs: Any ) -> Optional[JSON]: # pylint: disable=line-too-long - """Delete Logsink for a Database Cluster. + """Delete Index for OpenSearch Cluster. - To delete a logsink for a database cluster, send a DELETE request to - ``/v2/databases/$DATABASE_ID/logsink/$LOGSINK_ID``. + To delete a single index within OpenSearch cluster, send a DELETE request + to ``/v2/databases/$DATABASE_ID/indexes/$INDEX_NAME``. + + A status of 204 will be given. This indicates that the request was + processed successfully, but that no response body is needed. :param database_cluster_uuid: A unique identifier for a database cluster. Required. :type database_cluster_uuid: str - :param logsink_id: A unique identifier for a logsink of a database cluster. Required. - :type logsink_id: str + :param index_name: The name of the OpenSearch index. Required. + :type index_name: str :return: JSON object or None :rtype: JSON or None :raises ~azure.core.exceptions.HttpResponseError: @@ -132947,9 +141080,9 @@ def delete_logsink( cls: ClsType[Optional[JSON]] = kwargs.pop("cls", None) - _request = build_databases_delete_logsink_request( + _request = build_databases_delete_opensearch_index_request( database_cluster_uuid=database_cluster_uuid, - logsink_id=logsink_id, + index_name=index_name, headers=_headers, params=_params, ) @@ -132964,7 +141097,7 @@ def delete_logsink( response = pipeline_response.http_response - if response.status_code not in [200, 404]: + if response.status_code not in [204, 404]: if _stream: response.read() # Load the body in memory and close the socket map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore @@ -132972,7 +141105,7 @@ def delete_logsink( deserialized = None response_headers = {} - if response.status_code == 200: + if response.status_code == 204: response_headers["ratelimit-limit"] = self._deserialize( "int", response.headers.get("ratelimit-limit") ) @@ -133004,16 +141137,38 @@ def delete_logsink( return deserialized # type: ignore + +class DedicatedInferencesOperations: + """ + .. warning:: + **DO NOT** instantiate this class directly. + + Instead, you should access the following operations through + :class:`~pydo.GeneratedClient`'s + :attr:`dedicated_inferences` attribute. + """ + + def __init__(self, *args, **kwargs): + input_args = list(args) + self._client = input_args.pop(0) if input_args else kwargs.pop("client") + self._config = input_args.pop(0) if input_args else kwargs.pop("config") + self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer") + self._deserialize = ( + input_args.pop(0) if input_args else kwargs.pop("deserializer") + ) + @distributed_trace - def list_kafka_schemas(self, database_cluster_uuid: str, **kwargs: Any) -> JSON: + def get(self, dedicated_inference_id: str, **kwargs: Any) -> JSON: # pylint: disable=line-too-long - """List Schemas for Kafka Cluster. + """Get a Dedicated Inference. - To list all schemas for a Kafka cluster, send a GET request to - ``/v2/databases/$DATABASE_ID/schema-registry``. + Retrieve an existing Dedicated Inference by ID. Send a GET request to + ``/v2/dedicated-inferences/{dedicated_inference_id}``. The status in the response + is one of active, new, provisioning, updating, deleting, or error. - :param database_cluster_uuid: A unique identifier for a database cluster. Required. - :type database_cluster_uuid: str + :param dedicated_inference_id: A unique identifier for a Dedicated Inference instance. + Required. + :type dedicated_inference_id: str :return: JSON object :rtype: JSON :raises ~azure.core.exceptions.HttpResponseError: @@ -133023,17 +141178,109 @@ def list_kafka_schemas(self, database_cluster_uuid: str, **kwargs: Any) -> JSON: # response body for status code(s): 200 response == { - "subjects": [ - { - "schema": "str", # Optional. The schema definition in the - specified format. - "schema_id": 0, # Optional. The id for schema. - "schema_type": "str", # Optional. The type of the schema. - Known values are: "AVRO", "JSON", and "PROTOBUF". - "subject_name": "str" # Optional. The name of the schema - subject. - } - ] + "dedicated_inference": { + "created_at": "2020-02-20 00:00:00", # Optional. When the Dedicated + Inference was created. + "endpoints": { + "private_endpoint_fqdn": "str", # Optional. Private VPC FQDN + of the Dedicated Inference instance. + "public_endpoint_fqdn": "str" # Optional. Public FQDN of the + Dedicated Inference instance. + }, + "id": "str", # Optional. Unique ID of the Dedicated Inference. + "pending_deployment_spec": { + "created_at": "2020-02-20 00:00:00", # Optional. Pending + deployment when status is provisioning or updating. + "enable_public_endpoint": bool, # Optional. Whether to + expose a public LLM endpoint. + "id": "str", # Optional. Deployment UUID. + "model_deployments": [ + { + "accelerators": [ + { + "accelerator_slug": "str", # + DigitalOcean GPU slug. Required. + "scale": 0, # Number of + accelerator instances. Required. + "type": "str", # Accelerator + type (e.g. prefill_decode). Required. + "status": "str" # Optional. + Current state of the Accelerator. Known values are: + "new", "provisioning", and "active". + } + ], + "model_id": "str", # Optional. Used to + identify an existing deployment when updating; empty means create + new. + "model_provider": "str", # Optional. Model + provider. "hugging_face" + "model_slug": "str", # Optional. Model + identifier (e.g. Hugging Face slug). + "workload_config": {} # Optional. + Workload-specific configuration (e.g. ISL/OSL in future). + } + ], + "name": "str", # Optional. Name of the Dedicated Inference. + Must be unique within the team. + "status": "str", # Optional. Known values are: + "provisioning" and "updating". + "updated_at": "2020-02-20 00:00:00", # Optional. Pending + deployment when status is provisioning or updating. + "version": 0, # Optional. Spec version. + "vpc": { + "uuid": "str" # VPC UUID for the Dedicated + Inference. Required. + } + }, + "region": "str", # Optional. DigitalOcean region where the Dedicated + Inference is hosted. + "spec": { + "enable_public_endpoint": bool, # Whether to expose a public + LLM endpoint. Required. + "model_deployments": [ + { + "accelerators": [ + { + "accelerator_slug": "str", # + DigitalOcean GPU slug. Required. + "scale": 0, # Number of + accelerator instances. Required. + "type": "str", # Accelerator + type (e.g. prefill_decode). Required. + "status": "str" # Optional. + Current state of the Accelerator. Known values are: + "new", "provisioning", and "active". + } + ], + "model_id": "str", # Optional. Used to + identify an existing deployment when updating; empty means create + new. + "model_provider": "str", # Optional. Model + provider. "hugging_face" + "model_slug": "str", # Optional. Model + identifier (e.g. Hugging Face slug). + "workload_config": {} # Optional. + Workload-specific configuration (e.g. ISL/OSL in future). + } + ], + "name": "str", # Name of the Dedicated Inference. Must be + unique within the team. Required. + "region": "str", # DigitalOcean region where the Dedicated + Inference is hosted. Required. Known values are: "atl1", "nyc2", and + "tor1". + "version": 0, # Spec version. Required. + "vpc": { + "uuid": "str" # VPC UUID for the Dedicated + Inference. Required. + } + }, + "status": "str", # Optional. Current state of the Dedicated + Inference. Known values are: "active", "new", "provisioning", "updating", + "deleting", and "error". + "updated_at": "2020-02-20 00:00:00", # Optional. When the Dedicated + Inference was last updated. + "vpc_uuid": "str" # Optional. VPC UUID of the Dedicated Inference. + } } # response body for status code(s): 404 response == { @@ -133065,8 +141312,8 @@ def list_kafka_schemas(self, database_cluster_uuid: str, **kwargs: Any) -> JSON: cls: ClsType[JSON] = kwargs.pop("cls", None) - _request = build_databases_list_kafka_schemas_request( - database_cluster_uuid=database_cluster_uuid, + _request = build_dedicated_inferences_get_request( + dedicated_inference_id=dedicated_inference_id, headers=_headers, params=_params, ) @@ -133126,22 +141373,24 @@ def list_kafka_schemas(self, database_cluster_uuid: str, **kwargs: Any) -> JSON: return cast(JSON, deserialized) # type: ignore @overload - def create_kafka_schema( + def patch( self, - database_cluster_uuid: str, + dedicated_inference_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any, ) -> JSON: # pylint: disable=line-too-long - """Create Schema Registry for Kafka Cluster. + """Update a Dedicated Inference. - To create a Kafka schema for a database cluster, send a POST request to - ``/v2/databases/$DATABASE_ID/schema-registry``. + Update an existing Dedicated Inference. Send a PATCH request to + ``/v2/dedicated-inferences/{dedicated_inference_id}`` with updated ``spec`` and/or + ``access_tokens``. Status will move to updating and return to active when done. - :param database_cluster_uuid: A unique identifier for a database cluster. Required. - :type database_cluster_uuid: str + :param dedicated_inference_id: A unique identifier for a Dedicated Inference instance. + Required. + :type dedicated_inference_id: str :param body: Required. :type body: JSON :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. @@ -133156,19 +141405,155 @@ def create_kafka_schema( # JSON input template you can fill out and use as your body input. body = { - "schema": "str", # Optional. The schema definition in the specified format. - "schema_type": "str", # Optional. The type of the schema. Known values are: - "AVRO", "JSON", and "PROTOBUF". - "subject_name": "str" # Optional. The name of the schema subject. + "access_tokens": { + "hugging_face_token": "str" # Optional. Hugging Face token required + for gated models. + }, + "spec": { + "enable_public_endpoint": bool, # Whether to expose a public LLM + endpoint. Required. + "model_deployments": [ + { + "accelerators": [ + { + "accelerator_slug": "str", # + DigitalOcean GPU slug. Required. + "scale": 0, # Number of accelerator + instances. Required. + "type": "str", # Accelerator type + (e.g. prefill_decode). Required. + "status": "str" # Optional. Current + state of the Accelerator. Known values are: "new", + "provisioning", and "active". + } + ], + "model_id": "str", # Optional. Used to identify an + existing deployment when updating; empty means create new. + "model_provider": "str", # Optional. Model provider. + "hugging_face" + "model_slug": "str", # Optional. Model identifier + (e.g. Hugging Face slug). + "workload_config": {} # Optional. Workload-specific + configuration (e.g. ISL/OSL in future). + } + ], + "name": "str", # Name of the Dedicated Inference. Must be unique + within the team. Required. + "region": "str", # DigitalOcean region where the Dedicated Inference + is hosted. Required. Known values are: "atl1", "nyc2", and "tor1". + "version": 0, # Spec version. Required. + "vpc": { + "uuid": "str" # VPC UUID for the Dedicated Inference. + Required. + } + } } - # response body for status code(s): 201 + # response body for status code(s): 202 response == { - "schema": "str", # Optional. The schema definition in the specified format. - "schema_id": 0, # Optional. The id for schema. - "schema_type": "str", # Optional. The type of the schema. Known values are: - "AVRO", "JSON", and "PROTOBUF". - "subject_name": "str" # Optional. The name of the schema subject. + "dedicated_inference": { + "created_at": "2020-02-20 00:00:00", # Optional. When the Dedicated + Inference was created. + "endpoints": { + "private_endpoint_fqdn": "str", # Optional. Private VPC FQDN + of the Dedicated Inference instance. + "public_endpoint_fqdn": "str" # Optional. Public FQDN of the + Dedicated Inference instance. + }, + "id": "str", # Optional. Unique ID of the Dedicated Inference. + "pending_deployment_spec": { + "created_at": "2020-02-20 00:00:00", # Optional. Pending + deployment when status is provisioning or updating. + "enable_public_endpoint": bool, # Optional. Whether to + expose a public LLM endpoint. + "id": "str", # Optional. Deployment UUID. + "model_deployments": [ + { + "accelerators": [ + { + "accelerator_slug": "str", # + DigitalOcean GPU slug. Required. + "scale": 0, # Number of + accelerator instances. Required. + "type": "str", # Accelerator + type (e.g. prefill_decode). Required. + "status": "str" # Optional. + Current state of the Accelerator. Known values are: + "new", "provisioning", and "active". + } + ], + "model_id": "str", # Optional. Used to + identify an existing deployment when updating; empty means create + new. + "model_provider": "str", # Optional. Model + provider. "hugging_face" + "model_slug": "str", # Optional. Model + identifier (e.g. Hugging Face slug). + "workload_config": {} # Optional. + Workload-specific configuration (e.g. ISL/OSL in future). + } + ], + "name": "str", # Optional. Name of the Dedicated Inference. + Must be unique within the team. + "status": "str", # Optional. Known values are: + "provisioning" and "updating". + "updated_at": "2020-02-20 00:00:00", # Optional. Pending + deployment when status is provisioning or updating. + "version": 0, # Optional. Spec version. + "vpc": { + "uuid": "str" # VPC UUID for the Dedicated + Inference. Required. + } + }, + "region": "str", # Optional. DigitalOcean region where the Dedicated + Inference is hosted. + "spec": { + "enable_public_endpoint": bool, # Whether to expose a public + LLM endpoint. Required. + "model_deployments": [ + { + "accelerators": [ + { + "accelerator_slug": "str", # + DigitalOcean GPU slug. Required. + "scale": 0, # Number of + accelerator instances. Required. + "type": "str", # Accelerator + type (e.g. prefill_decode). Required. + "status": "str" # Optional. + Current state of the Accelerator. Known values are: + "new", "provisioning", and "active". + } + ], + "model_id": "str", # Optional. Used to + identify an existing deployment when updating; empty means create + new. + "model_provider": "str", # Optional. Model + provider. "hugging_face" + "model_slug": "str", # Optional. Model + identifier (e.g. Hugging Face slug). + "workload_config": {} # Optional. + Workload-specific configuration (e.g. ISL/OSL in future). + } + ], + "name": "str", # Name of the Dedicated Inference. Must be + unique within the team. Required. + "region": "str", # DigitalOcean region where the Dedicated + Inference is hosted. Required. Known values are: "atl1", "nyc2", and + "tor1". + "version": 0, # Spec version. Required. + "vpc": { + "uuid": "str" # VPC UUID for the Dedicated + Inference. Required. + } + }, + "status": "str", # Optional. Current state of the Dedicated + Inference. Known values are: "active", "new", "provisioning", "updating", + "deleting", and "error". + "updated_at": "2020-02-20 00:00:00", # Optional. When the Dedicated + Inference was last updated. + "vpc_uuid": "str" # Optional. VPC UUID of the Dedicated Inference. + } } # response body for status code(s): 404 response == { @@ -133184,22 +141569,24 @@ def create_kafka_schema( """ @overload - def create_kafka_schema( + def patch( self, - database_cluster_uuid: str, + dedicated_inference_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any, ) -> JSON: # pylint: disable=line-too-long - """Create Schema Registry for Kafka Cluster. + """Update a Dedicated Inference. - To create a Kafka schema for a database cluster, send a POST request to - ``/v2/databases/$DATABASE_ID/schema-registry``. + Update an existing Dedicated Inference. Send a PATCH request to + ``/v2/dedicated-inferences/{dedicated_inference_id}`` with updated ``spec`` and/or + ``access_tokens``. Status will move to updating and return to active when done. - :param database_cluster_uuid: A unique identifier for a database cluster. Required. - :type database_cluster_uuid: str + :param dedicated_inference_id: A unique identifier for a Dedicated Inference instance. + Required. + :type dedicated_inference_id: str :param body: Required. :type body: IO[bytes] :keyword content_type: Body Parameter content-type. Content type parameter for binary body. @@ -133212,13 +141599,111 @@ def create_kafka_schema( Example: .. code-block:: python - # response body for status code(s): 201 + # response body for status code(s): 202 response == { - "schema": "str", # Optional. The schema definition in the specified format. - "schema_id": 0, # Optional. The id for schema. - "schema_type": "str", # Optional. The type of the schema. Known values are: - "AVRO", "JSON", and "PROTOBUF". - "subject_name": "str" # Optional. The name of the schema subject. + "dedicated_inference": { + "created_at": "2020-02-20 00:00:00", # Optional. When the Dedicated + Inference was created. + "endpoints": { + "private_endpoint_fqdn": "str", # Optional. Private VPC FQDN + of the Dedicated Inference instance. + "public_endpoint_fqdn": "str" # Optional. Public FQDN of the + Dedicated Inference instance. + }, + "id": "str", # Optional. Unique ID of the Dedicated Inference. + "pending_deployment_spec": { + "created_at": "2020-02-20 00:00:00", # Optional. Pending + deployment when status is provisioning or updating. + "enable_public_endpoint": bool, # Optional. Whether to + expose a public LLM endpoint. + "id": "str", # Optional. Deployment UUID. + "model_deployments": [ + { + "accelerators": [ + { + "accelerator_slug": "str", # + DigitalOcean GPU slug. Required. + "scale": 0, # Number of + accelerator instances. Required. + "type": "str", # Accelerator + type (e.g. prefill_decode). Required. + "status": "str" # Optional. + Current state of the Accelerator. Known values are: + "new", "provisioning", and "active". + } + ], + "model_id": "str", # Optional. Used to + identify an existing deployment when updating; empty means create + new. + "model_provider": "str", # Optional. Model + provider. "hugging_face" + "model_slug": "str", # Optional. Model + identifier (e.g. Hugging Face slug). + "workload_config": {} # Optional. + Workload-specific configuration (e.g. ISL/OSL in future). + } + ], + "name": "str", # Optional. Name of the Dedicated Inference. + Must be unique within the team. + "status": "str", # Optional. Known values are: + "provisioning" and "updating". + "updated_at": "2020-02-20 00:00:00", # Optional. Pending + deployment when status is provisioning or updating. + "version": 0, # Optional. Spec version. + "vpc": { + "uuid": "str" # VPC UUID for the Dedicated + Inference. Required. + } + }, + "region": "str", # Optional. DigitalOcean region where the Dedicated + Inference is hosted. + "spec": { + "enable_public_endpoint": bool, # Whether to expose a public + LLM endpoint. Required. + "model_deployments": [ + { + "accelerators": [ + { + "accelerator_slug": "str", # + DigitalOcean GPU slug. Required. + "scale": 0, # Number of + accelerator instances. Required. + "type": "str", # Accelerator + type (e.g. prefill_decode). Required. + "status": "str" # Optional. + Current state of the Accelerator. Known values are: + "new", "provisioning", and "active". + } + ], + "model_id": "str", # Optional. Used to + identify an existing deployment when updating; empty means create + new. + "model_provider": "str", # Optional. Model + provider. "hugging_face" + "model_slug": "str", # Optional. Model + identifier (e.g. Hugging Face slug). + "workload_config": {} # Optional. + Workload-specific configuration (e.g. ISL/OSL in future). + } + ], + "name": "str", # Name of the Dedicated Inference. Must be + unique within the team. Required. + "region": "str", # DigitalOcean region where the Dedicated + Inference is hosted. Required. Known values are: "atl1", "nyc2", and + "tor1". + "version": 0, # Spec version. Required. + "vpc": { + "uuid": "str" # VPC UUID for the Dedicated + Inference. Required. + } + }, + "status": "str", # Optional. Current state of the Dedicated + Inference. Known values are: "active", "new", "provisioning", "updating", + "deleting", and "error". + "updated_at": "2020-02-20 00:00:00", # Optional. When the Dedicated + Inference was last updated. + "vpc_uuid": "str" # Optional. VPC UUID of the Dedicated Inference. + } } # response body for status code(s): 404 response == { @@ -133234,17 +141719,19 @@ def create_kafka_schema( """ @distributed_trace - def create_kafka_schema( - self, database_cluster_uuid: str, body: Union[JSON, IO[bytes]], **kwargs: Any + def patch( + self, dedicated_inference_id: str, body: Union[JSON, IO[bytes]], **kwargs: Any ) -> JSON: # pylint: disable=line-too-long - """Create Schema Registry for Kafka Cluster. + """Update a Dedicated Inference. - To create a Kafka schema for a database cluster, send a POST request to - ``/v2/databases/$DATABASE_ID/schema-registry``. + Update an existing Dedicated Inference. Send a PATCH request to + ``/v2/dedicated-inferences/{dedicated_inference_id}`` with updated ``spec`` and/or + ``access_tokens``. Status will move to updating and return to active when done. - :param database_cluster_uuid: A unique identifier for a database cluster. Required. - :type database_cluster_uuid: str + :param dedicated_inference_id: A unique identifier for a Dedicated Inference instance. + Required. + :type dedicated_inference_id: str :param body: Is either a JSON type or a IO[bytes] type. Required. :type body: JSON or IO[bytes] :return: JSON object @@ -133256,19 +141743,155 @@ def create_kafka_schema( # JSON input template you can fill out and use as your body input. body = { - "schema": "str", # Optional. The schema definition in the specified format. - "schema_type": "str", # Optional. The type of the schema. Known values are: - "AVRO", "JSON", and "PROTOBUF". - "subject_name": "str" # Optional. The name of the schema subject. + "access_tokens": { + "hugging_face_token": "str" # Optional. Hugging Face token required + for gated models. + }, + "spec": { + "enable_public_endpoint": bool, # Whether to expose a public LLM + endpoint. Required. + "model_deployments": [ + { + "accelerators": [ + { + "accelerator_slug": "str", # + DigitalOcean GPU slug. Required. + "scale": 0, # Number of accelerator + instances. Required. + "type": "str", # Accelerator type + (e.g. prefill_decode). Required. + "status": "str" # Optional. Current + state of the Accelerator. Known values are: "new", + "provisioning", and "active". + } + ], + "model_id": "str", # Optional. Used to identify an + existing deployment when updating; empty means create new. + "model_provider": "str", # Optional. Model provider. + "hugging_face" + "model_slug": "str", # Optional. Model identifier + (e.g. Hugging Face slug). + "workload_config": {} # Optional. Workload-specific + configuration (e.g. ISL/OSL in future). + } + ], + "name": "str", # Name of the Dedicated Inference. Must be unique + within the team. Required. + "region": "str", # DigitalOcean region where the Dedicated Inference + is hosted. Required. Known values are: "atl1", "nyc2", and "tor1". + "version": 0, # Spec version. Required. + "vpc": { + "uuid": "str" # VPC UUID for the Dedicated Inference. + Required. + } + } } - # response body for status code(s): 201 + # response body for status code(s): 202 response == { - "schema": "str", # Optional. The schema definition in the specified format. - "schema_id": 0, # Optional. The id for schema. - "schema_type": "str", # Optional. The type of the schema. Known values are: - "AVRO", "JSON", and "PROTOBUF". - "subject_name": "str" # Optional. The name of the schema subject. + "dedicated_inference": { + "created_at": "2020-02-20 00:00:00", # Optional. When the Dedicated + Inference was created. + "endpoints": { + "private_endpoint_fqdn": "str", # Optional. Private VPC FQDN + of the Dedicated Inference instance. + "public_endpoint_fqdn": "str" # Optional. Public FQDN of the + Dedicated Inference instance. + }, + "id": "str", # Optional. Unique ID of the Dedicated Inference. + "pending_deployment_spec": { + "created_at": "2020-02-20 00:00:00", # Optional. Pending + deployment when status is provisioning or updating. + "enable_public_endpoint": bool, # Optional. Whether to + expose a public LLM endpoint. + "id": "str", # Optional. Deployment UUID. + "model_deployments": [ + { + "accelerators": [ + { + "accelerator_slug": "str", # + DigitalOcean GPU slug. Required. + "scale": 0, # Number of + accelerator instances. Required. + "type": "str", # Accelerator + type (e.g. prefill_decode). Required. + "status": "str" # Optional. + Current state of the Accelerator. Known values are: + "new", "provisioning", and "active". + } + ], + "model_id": "str", # Optional. Used to + identify an existing deployment when updating; empty means create + new. + "model_provider": "str", # Optional. Model + provider. "hugging_face" + "model_slug": "str", # Optional. Model + identifier (e.g. Hugging Face slug). + "workload_config": {} # Optional. + Workload-specific configuration (e.g. ISL/OSL in future). + } + ], + "name": "str", # Optional. Name of the Dedicated Inference. + Must be unique within the team. + "status": "str", # Optional. Known values are: + "provisioning" and "updating". + "updated_at": "2020-02-20 00:00:00", # Optional. Pending + deployment when status is provisioning or updating. + "version": 0, # Optional. Spec version. + "vpc": { + "uuid": "str" # VPC UUID for the Dedicated + Inference. Required. + } + }, + "region": "str", # Optional. DigitalOcean region where the Dedicated + Inference is hosted. + "spec": { + "enable_public_endpoint": bool, # Whether to expose a public + LLM endpoint. Required. + "model_deployments": [ + { + "accelerators": [ + { + "accelerator_slug": "str", # + DigitalOcean GPU slug. Required. + "scale": 0, # Number of + accelerator instances. Required. + "type": "str", # Accelerator + type (e.g. prefill_decode). Required. + "status": "str" # Optional. + Current state of the Accelerator. Known values are: + "new", "provisioning", and "active". + } + ], + "model_id": "str", # Optional. Used to + identify an existing deployment when updating; empty means create + new. + "model_provider": "str", # Optional. Model + provider. "hugging_face" + "model_slug": "str", # Optional. Model + identifier (e.g. Hugging Face slug). + "workload_config": {} # Optional. + Workload-specific configuration (e.g. ISL/OSL in future). + } + ], + "name": "str", # Name of the Dedicated Inference. Must be + unique within the team. Required. + "region": "str", # DigitalOcean region where the Dedicated + Inference is hosted. Required. Known values are: "atl1", "nyc2", and + "tor1". + "version": 0, # Spec version. Required. + "vpc": { + "uuid": "str" # VPC UUID for the Dedicated + Inference. Required. + } + }, + "status": "str", # Optional. Current state of the Dedicated + Inference. Known values are: "active", "new", "provisioning", "updating", + "deleting", and "error". + "updated_at": "2020-02-20 00:00:00", # Optional. When the Dedicated + Inference was last updated. + "vpc_uuid": "str" # Optional. VPC UUID of the Dedicated Inference. + } } # response body for status code(s): 404 response == { @@ -133311,8 +141934,8 @@ def create_kafka_schema( else: _json = body - _request = build_databases_create_kafka_schema_request( - database_cluster_uuid=database_cluster_uuid, + _request = build_dedicated_inferences_patch_request( + dedicated_inference_id=dedicated_inference_id, content_type=content_type, json=_json, content=_content, @@ -133330,14 +141953,14 @@ def create_kafka_schema( response = pipeline_response.http_response - if response.status_code not in [201, 404]: + if response.status_code not in [202, 404]: if _stream: response.read() # Load the body in memory and close the socket map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore raise HttpResponseError(response=response) response_headers = {} - if response.status_code == 201: + if response.status_code == 202: response_headers["ratelimit-limit"] = self._deserialize( "int", response.headers.get("ratelimit-limit") ) @@ -133375,35 +141998,24 @@ def create_kafka_schema( return cast(JSON, deserialized) # type: ignore @distributed_trace - def get_kafka_schema( - self, database_cluster_uuid: str, subject_name: str, **kwargs: Any - ) -> JSON: + def delete(self, dedicated_inference_id: str, **kwargs: Any) -> Optional[JSON]: # pylint: disable=line-too-long - """Get a Kafka Schema by Subject Name. + """Delete a Dedicated Inference. - To get a specific schema by subject name for a Kafka cluster, send a GET request to - ``/v2/databases/$DATABASE_ID/schema-registry/$SUBJECT_NAME``. + Delete an existing Dedicated Inference. Send a DELETE request to + ``/v2/dedicated-inferences/{dedicated_inference_id}``. The response 202 Accepted + indicates the request was accepted for processing. - :param database_cluster_uuid: A unique identifier for a database cluster. Required. - :type database_cluster_uuid: str - :param subject_name: The name of the Kafka schema subject. Required. - :type subject_name: str - :return: JSON object - :rtype: JSON + :param dedicated_inference_id: A unique identifier for a Dedicated Inference instance. + Required. + :type dedicated_inference_id: str + :return: JSON object or None + :rtype: JSON or None :raises ~azure.core.exceptions.HttpResponseError: Example: .. code-block:: python - # response body for status code(s): 200 - response == { - "schema": "str", # Optional. The schema definition in the specified format. - "schema_id": 0, # Optional. The id for schema. - "schema_type": "str", # Optional. The type of the schema. Known values are: - "AVRO", "JSON", and "PROTOBUF". - "subject_name": "str", # Optional. The name of the schema subject. - "version": "str" # Optional. The version of the schema. - } # response body for status code(s): 404 response == { "id": "str", # A short identifier corresponding to the HTTP status code @@ -133432,11 +142044,236 @@ def get_kafka_schema( _headers = kwargs.pop("headers", {}) or {} _params = kwargs.pop("params", {}) or {} + cls: ClsType[Optional[JSON]] = kwargs.pop("cls", None) + + _request = build_dedicated_inferences_delete_request( + dedicated_inference_id=dedicated_inference_id, + headers=_headers, + params=_params, + ) + _request.url = self._client.format_url(_request.url) + + _stream = False + pipeline_response: PipelineResponse = ( + self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + ) + + response = pipeline_response.http_response + + if response.status_code not in [202, 404]: + if _stream: + response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore + raise HttpResponseError(response=response) + + deserialized = None + response_headers = {} + if response.status_code == 202: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.status_code == 404: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.content: + deserialized = response.json() + else: + deserialized = None + + if cls: + return cls(pipeline_response, deserialized, response_headers) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + def list( + self, + *, + per_page: int = 20, + page: int = 1, + region: Optional[str] = None, + **kwargs: Any, + ) -> JSON: + # pylint: disable=line-too-long + """List Dedicated Inferences. + + List all Dedicated Inference instances for your team. Send a GET request to + ``/v2/dedicated-inferences``. You may filter by region and use page and per_page + for pagination. + + :keyword per_page: Number of items returned per page. Default value is 20. + :paramtype per_page: int + :keyword page: Which 'page' of paginated results to return. Default value is 1. + :paramtype page: int + :keyword region: Filter by region. Dedicated Inference is only available in nyc2, tor1, and + atl1. Known values are: "nyc2", "tor1", and "atl1". Default value is None. + :paramtype region: str + :return: JSON object + :rtype: JSON + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 200 + response == { + "dedicated_inferences": [ + { + "created_at": "2020-02-20 00:00:00", # Optional. When the + Dedicated Inference was created. + "endpoints": { + "private_endpoint_fqdn": "str", # Optional. Private + VPC FQDN of the Dedicated Inference instance. + "public_endpoint_fqdn": "str" # Optional. Public + FQDN of the Dedicated Inference instance. + }, + "id": "str", # Optional. Unique ID of the Dedicated + Inference. + "pending_deployment_spec": { + "created_at": "2020-02-20 00:00:00", # Optional. + Pending deployment when status is provisioning or updating. + "enable_public_endpoint": bool, # Optional. Whether + to expose a public LLM endpoint. + "id": "str", # Optional. Deployment UUID. + "model_deployments": [ + { + "accelerators": [ + { + "accelerator_slug": + "str", # DigitalOcean GPU slug. Required. + "scale": 0, # Number + of accelerator instances. Required. + "type": "str", # + Accelerator type (e.g. prefill_decode). Required. + "status": "str" # + Optional. Current state of the Accelerator. Known + values are: "new", "provisioning", and "active". + } + ], + "model_id": "str", # Optional. Used + to identify an existing deployment when updating; empty means + create new. + "model_provider": "str", # Optional. + Model provider. "hugging_face" + "model_slug": "str", # Optional. + Model identifier (e.g. Hugging Face slug). + "workload_config": {} # Optional. + Workload-specific configuration (e.g. ISL/OSL in future). + } + ], + "name": "str", # Optional. Name of the Dedicated + Inference. Must be unique within the team. + "status": "str", # Optional. Known values are: + "provisioning" and "updating". + "updated_at": "2020-02-20 00:00:00", # Optional. + Pending deployment when status is provisioning or updating. + "version": 0, # Optional. Spec version. + "vpc": { + "uuid": "str" # VPC UUID for the Dedicated + Inference. Required. + } + }, + "region": "str", # Optional. DigitalOcean region where the + Dedicated Inference is hosted. + "spec": { + "enable_public_endpoint": bool, # Whether to expose + a public LLM endpoint. Required. + "model_deployments": [ + { + "accelerators": [ + { + "accelerator_slug": + "str", # DigitalOcean GPU slug. Required. + "scale": 0, # Number + of accelerator instances. Required. + "type": "str", # + Accelerator type (e.g. prefill_decode). Required. + "status": "str" # + Optional. Current state of the Accelerator. Known + values are: "new", "provisioning", and "active". + } + ], + "model_id": "str", # Optional. Used + to identify an existing deployment when updating; empty means + create new. + "model_provider": "str", # Optional. + Model provider. "hugging_face" + "model_slug": "str", # Optional. + Model identifier (e.g. Hugging Face slug). + "workload_config": {} # Optional. + Workload-specific configuration (e.g. ISL/OSL in future). + } + ], + "name": "str", # Name of the Dedicated Inference. + Must be unique within the team. Required. + "region": "str", # DigitalOcean region where the + Dedicated Inference is hosted. Required. Known values are: "atl1", + "nyc2", and "tor1". + "version": 0, # Spec version. Required. + "vpc": { + "uuid": "str" # VPC UUID for the Dedicated + Inference. Required. + } + }, + "status": "str", # Optional. Current state of the Dedicated + Inference. Known values are: "active", "new", "provisioning", "updating", + "deleting", and "error". + "updated_at": "2020-02-20 00:00:00", # Optional. When the + Dedicated Inference was last updated. + "vpc_uuid": "str" # Optional. VPC UUID of the Dedicated + Inference. + } + ], + "links": { + "pages": { + "str": "str" # Optional. Pagination links (first, prev, + next, last). + } + }, + "meta": { + "total": 0 # Total number of results. Required. + } + } + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + 401: cast( + Type[HttpResponseError], + lambda response: ClientAuthenticationError(response=response), + ), + 429: HttpResponseError, + 500: HttpResponseError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + cls: ClsType[JSON] = kwargs.pop("cls", None) - _request = build_databases_get_kafka_schema_request( - database_cluster_uuid=database_cluster_uuid, - subject_name=subject_name, + _request = build_dedicated_inferences_list_request( + per_page=per_page, + page=page, + region=region, headers=_headers, params=_params, ) @@ -133451,81 +142288,543 @@ def get_kafka_schema( response = pipeline_response.http_response - if response.status_code not in [200, 404]: + if response.status_code not in [200]: if _stream: response.read() # Load the body in memory and close the socket map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore raise HttpResponseError(response=response) response_headers = {} - if response.status_code == 200: - response_headers["ratelimit-limit"] = self._deserialize( - "int", response.headers.get("ratelimit-limit") - ) - response_headers["ratelimit-remaining"] = self._deserialize( - "int", response.headers.get("ratelimit-remaining") - ) - response_headers["ratelimit-reset"] = self._deserialize( - "int", response.headers.get("ratelimit-reset") - ) - - if response.content: - deserialized = response.json() - else: - deserialized = None - - if response.status_code == 404: - response_headers["ratelimit-limit"] = self._deserialize( - "int", response.headers.get("ratelimit-limit") - ) - response_headers["ratelimit-remaining"] = self._deserialize( - "int", response.headers.get("ratelimit-remaining") - ) - response_headers["ratelimit-reset"] = self._deserialize( - "int", response.headers.get("ratelimit-reset") - ) + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) - if response.content: - deserialized = response.json() - else: - deserialized = None + if response.content: + deserialized = response.json() + else: + deserialized = None if cls: return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore return cast(JSON, deserialized) # type: ignore + @overload + def create( + self, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> JSON: + # pylint: disable=line-too-long + """Create a Dedicated Inference. + + Create a new Dedicated Inference for your team. Send a POST request to + ``/v2/dedicated-inferences`` with a ``spec`` object (version, name, region, vpc, + enable_public_endpoint, model_deployments) and optional ``access_tokens`` (e.g. + hugging_face_token for gated models). The response code 202 Accepted indicates + the request was accepted for processing; it does not indicate success or failure. + The token value is returned only on create; store it securely. + + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: JSON object + :rtype: JSON + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "spec": { + "enable_public_endpoint": bool, # Whether to expose a public LLM + endpoint. Required. + "model_deployments": [ + { + "accelerators": [ + { + "accelerator_slug": "str", # + DigitalOcean GPU slug. Required. + "scale": 0, # Number of accelerator + instances. Required. + "type": "str", # Accelerator type + (e.g. prefill_decode). Required. + "status": "str" # Optional. Current + state of the Accelerator. Known values are: "new", + "provisioning", and "active". + } + ], + "model_id": "str", # Optional. Used to identify an + existing deployment when updating; empty means create new. + "model_provider": "str", # Optional. Model provider. + "hugging_face" + "model_slug": "str", # Optional. Model identifier + (e.g. Hugging Face slug). + "workload_config": {} # Optional. Workload-specific + configuration (e.g. ISL/OSL in future). + } + ], + "name": "str", # Name of the Dedicated Inference. Must be unique + within the team. Required. + "region": "str", # DigitalOcean region where the Dedicated Inference + is hosted. Required. Known values are: "atl1", "nyc2", and "tor1". + "version": 0, # Spec version. Required. + "vpc": { + "uuid": "str" # VPC UUID for the Dedicated Inference. + Required. + } + }, + "access_tokens": { + "str": "str" # Optional. Key-value pairs for provider tokens (e.g. + Hugging Face). + } + } + + # response body for status code(s): 202 + response == { + "dedicated_inference": { + "created_at": "2020-02-20 00:00:00", # Optional. When the Dedicated + Inference was created. + "endpoints": { + "private_endpoint_fqdn": "str", # Optional. Private VPC FQDN + of the Dedicated Inference instance. + "public_endpoint_fqdn": "str" # Optional. Public FQDN of the + Dedicated Inference instance. + }, + "id": "str", # Optional. Unique ID of the Dedicated Inference. + "pending_deployment_spec": { + "created_at": "2020-02-20 00:00:00", # Optional. Pending + deployment when status is provisioning or updating. + "enable_public_endpoint": bool, # Optional. Whether to + expose a public LLM endpoint. + "id": "str", # Optional. Deployment UUID. + "model_deployments": [ + { + "accelerators": [ + { + "accelerator_slug": "str", # + DigitalOcean GPU slug. Required. + "scale": 0, # Number of + accelerator instances. Required. + "type": "str", # Accelerator + type (e.g. prefill_decode). Required. + "status": "str" # Optional. + Current state of the Accelerator. Known values are: + "new", "provisioning", and "active". + } + ], + "model_id": "str", # Optional. Used to + identify an existing deployment when updating; empty means create + new. + "model_provider": "str", # Optional. Model + provider. "hugging_face" + "model_slug": "str", # Optional. Model + identifier (e.g. Hugging Face slug). + "workload_config": {} # Optional. + Workload-specific configuration (e.g. ISL/OSL in future). + } + ], + "name": "str", # Optional. Name of the Dedicated Inference. + Must be unique within the team. + "status": "str", # Optional. Known values are: + "provisioning" and "updating". + "updated_at": "2020-02-20 00:00:00", # Optional. Pending + deployment when status is provisioning or updating. + "version": 0, # Optional. Spec version. + "vpc": { + "uuid": "str" # VPC UUID for the Dedicated + Inference. Required. + } + }, + "region": "str", # Optional. DigitalOcean region where the Dedicated + Inference is hosted. + "spec": { + "enable_public_endpoint": bool, # Whether to expose a public + LLM endpoint. Required. + "model_deployments": [ + { + "accelerators": [ + { + "accelerator_slug": "str", # + DigitalOcean GPU slug. Required. + "scale": 0, # Number of + accelerator instances. Required. + "type": "str", # Accelerator + type (e.g. prefill_decode). Required. + "status": "str" # Optional. + Current state of the Accelerator. Known values are: + "new", "provisioning", and "active". + } + ], + "model_id": "str", # Optional. Used to + identify an existing deployment when updating; empty means create + new. + "model_provider": "str", # Optional. Model + provider. "hugging_face" + "model_slug": "str", # Optional. Model + identifier (e.g. Hugging Face slug). + "workload_config": {} # Optional. + Workload-specific configuration (e.g. ISL/OSL in future). + } + ], + "name": "str", # Name of the Dedicated Inference. Must be + unique within the team. Required. + "region": "str", # DigitalOcean region where the Dedicated + Inference is hosted. Required. Known values are: "atl1", "nyc2", and + "tor1". + "version": 0, # Spec version. Required. + "vpc": { + "uuid": "str" # VPC UUID for the Dedicated + Inference. Required. + } + }, + "status": "str", # Optional. Current state of the Dedicated + Inference. Known values are: "active", "new", "provisioning", "updating", + "deleting", and "error". + "updated_at": "2020-02-20 00:00:00", # Optional. When the Dedicated + Inference was last updated. + "vpc_uuid": "str" # Optional. VPC UUID of the Dedicated Inference. + }, + "token": { + "created_at": "2020-02-20 00:00:00", # Optional. Access token for + authenticating to Dedicated Inference endpoints. + "id": "str", # Optional. Unique ID of the token. + "name": "str", # Optional. Name of the token. + "value": "str" # Optional. Token value; only returned once on + create. Store securely. + } + } + """ + + @overload + def create( + self, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> JSON: + # pylint: disable=line-too-long + """Create a Dedicated Inference. + + Create a new Dedicated Inference for your team. Send a POST request to + ``/v2/dedicated-inferences`` with a ``spec`` object (version, name, region, vpc, + enable_public_endpoint, model_deployments) and optional ``access_tokens`` (e.g. + hugging_face_token for gated models). The response code 202 Accepted indicates + the request was accepted for processing; it does not indicate success or failure. + The token value is returned only on create; store it securely. + + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: JSON object + :rtype: JSON + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 202 + response == { + "dedicated_inference": { + "created_at": "2020-02-20 00:00:00", # Optional. When the Dedicated + Inference was created. + "endpoints": { + "private_endpoint_fqdn": "str", # Optional. Private VPC FQDN + of the Dedicated Inference instance. + "public_endpoint_fqdn": "str" # Optional. Public FQDN of the + Dedicated Inference instance. + }, + "id": "str", # Optional. Unique ID of the Dedicated Inference. + "pending_deployment_spec": { + "created_at": "2020-02-20 00:00:00", # Optional. Pending + deployment when status is provisioning or updating. + "enable_public_endpoint": bool, # Optional. Whether to + expose a public LLM endpoint. + "id": "str", # Optional. Deployment UUID. + "model_deployments": [ + { + "accelerators": [ + { + "accelerator_slug": "str", # + DigitalOcean GPU slug. Required. + "scale": 0, # Number of + accelerator instances. Required. + "type": "str", # Accelerator + type (e.g. prefill_decode). Required. + "status": "str" # Optional. + Current state of the Accelerator. Known values are: + "new", "provisioning", and "active". + } + ], + "model_id": "str", # Optional. Used to + identify an existing deployment when updating; empty means create + new. + "model_provider": "str", # Optional. Model + provider. "hugging_face" + "model_slug": "str", # Optional. Model + identifier (e.g. Hugging Face slug). + "workload_config": {} # Optional. + Workload-specific configuration (e.g. ISL/OSL in future). + } + ], + "name": "str", # Optional. Name of the Dedicated Inference. + Must be unique within the team. + "status": "str", # Optional. Known values are: + "provisioning" and "updating". + "updated_at": "2020-02-20 00:00:00", # Optional. Pending + deployment when status is provisioning or updating. + "version": 0, # Optional. Spec version. + "vpc": { + "uuid": "str" # VPC UUID for the Dedicated + Inference. Required. + } + }, + "region": "str", # Optional. DigitalOcean region where the Dedicated + Inference is hosted. + "spec": { + "enable_public_endpoint": bool, # Whether to expose a public + LLM endpoint. Required. + "model_deployments": [ + { + "accelerators": [ + { + "accelerator_slug": "str", # + DigitalOcean GPU slug. Required. + "scale": 0, # Number of + accelerator instances. Required. + "type": "str", # Accelerator + type (e.g. prefill_decode). Required. + "status": "str" # Optional. + Current state of the Accelerator. Known values are: + "new", "provisioning", and "active". + } + ], + "model_id": "str", # Optional. Used to + identify an existing deployment when updating; empty means create + new. + "model_provider": "str", # Optional. Model + provider. "hugging_face" + "model_slug": "str", # Optional. Model + identifier (e.g. Hugging Face slug). + "workload_config": {} # Optional. + Workload-specific configuration (e.g. ISL/OSL in future). + } + ], + "name": "str", # Name of the Dedicated Inference. Must be + unique within the team. Required. + "region": "str", # DigitalOcean region where the Dedicated + Inference is hosted. Required. Known values are: "atl1", "nyc2", and + "tor1". + "version": 0, # Spec version. Required. + "vpc": { + "uuid": "str" # VPC UUID for the Dedicated + Inference. Required. + } + }, + "status": "str", # Optional. Current state of the Dedicated + Inference. Known values are: "active", "new", "provisioning", "updating", + "deleting", and "error". + "updated_at": "2020-02-20 00:00:00", # Optional. When the Dedicated + Inference was last updated. + "vpc_uuid": "str" # Optional. VPC UUID of the Dedicated Inference. + }, + "token": { + "created_at": "2020-02-20 00:00:00", # Optional. Access token for + authenticating to Dedicated Inference endpoints. + "id": "str", # Optional. Unique ID of the token. + "name": "str", # Optional. Name of the token. + "value": "str" # Optional. Token value; only returned once on + create. Store securely. + } + } + """ + @distributed_trace - def delete_kafka_schema( - self, database_cluster_uuid: str, subject_name: str, **kwargs: Any - ) -> Optional[JSON]: + def create(self, body: Union[JSON, IO[bytes]], **kwargs: Any) -> JSON: # pylint: disable=line-too-long - """Delete a Kafka Schema by Subject Name. + """Create a Dedicated Inference. - To delete a specific schema by subject name for a Kafka cluster, send a DELETE request to - ``/v2/databases/$DATABASE_ID/schema-registry/$SUBJECT_NAME``. + Create a new Dedicated Inference for your team. Send a POST request to + ``/v2/dedicated-inferences`` with a ``spec`` object (version, name, region, vpc, + enable_public_endpoint, model_deployments) and optional ``access_tokens`` (e.g. + hugging_face_token for gated models). The response code 202 Accepted indicates + the request was accepted for processing; it does not indicate success or failure. + The token value is returned only on create; store it securely. - :param database_cluster_uuid: A unique identifier for a database cluster. Required. - :type database_cluster_uuid: str - :param subject_name: The name of the Kafka schema subject. Required. - :type subject_name: str - :return: JSON object or None - :rtype: JSON or None + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :return: JSON object + :rtype: JSON :raises ~azure.core.exceptions.HttpResponseError: Example: .. code-block:: python - # response body for status code(s): 404 + # JSON input template you can fill out and use as your body input. + body = { + "spec": { + "enable_public_endpoint": bool, # Whether to expose a public LLM + endpoint. Required. + "model_deployments": [ + { + "accelerators": [ + { + "accelerator_slug": "str", # + DigitalOcean GPU slug. Required. + "scale": 0, # Number of accelerator + instances. Required. + "type": "str", # Accelerator type + (e.g. prefill_decode). Required. + "status": "str" # Optional. Current + state of the Accelerator. Known values are: "new", + "provisioning", and "active". + } + ], + "model_id": "str", # Optional. Used to identify an + existing deployment when updating; empty means create new. + "model_provider": "str", # Optional. Model provider. + "hugging_face" + "model_slug": "str", # Optional. Model identifier + (e.g. Hugging Face slug). + "workload_config": {} # Optional. Workload-specific + configuration (e.g. ISL/OSL in future). + } + ], + "name": "str", # Name of the Dedicated Inference. Must be unique + within the team. Required. + "region": "str", # DigitalOcean region where the Dedicated Inference + is hosted. Required. Known values are: "atl1", "nyc2", and "tor1". + "version": 0, # Spec version. Required. + "vpc": { + "uuid": "str" # VPC UUID for the Dedicated Inference. + Required. + } + }, + "access_tokens": { + "str": "str" # Optional. Key-value pairs for provider tokens (e.g. + Hugging Face). + } + } + + # response body for status code(s): 202 response == { - "id": "str", # A short identifier corresponding to the HTTP status code - returned. For example, the ID for a response returning a 404 status code would - be "not_found.". Required. - "message": "str", # A message providing additional information about the - error, including details to help resolve it when possible. Required. - "request_id": "str" # Optional. Optionally, some endpoints may include a - request ID that should be provided when reporting bugs or opening support - tickets to help identify the issue. + "dedicated_inference": { + "created_at": "2020-02-20 00:00:00", # Optional. When the Dedicated + Inference was created. + "endpoints": { + "private_endpoint_fqdn": "str", # Optional. Private VPC FQDN + of the Dedicated Inference instance. + "public_endpoint_fqdn": "str" # Optional. Public FQDN of the + Dedicated Inference instance. + }, + "id": "str", # Optional. Unique ID of the Dedicated Inference. + "pending_deployment_spec": { + "created_at": "2020-02-20 00:00:00", # Optional. Pending + deployment when status is provisioning or updating. + "enable_public_endpoint": bool, # Optional. Whether to + expose a public LLM endpoint. + "id": "str", # Optional. Deployment UUID. + "model_deployments": [ + { + "accelerators": [ + { + "accelerator_slug": "str", # + DigitalOcean GPU slug. Required. + "scale": 0, # Number of + accelerator instances. Required. + "type": "str", # Accelerator + type (e.g. prefill_decode). Required. + "status": "str" # Optional. + Current state of the Accelerator. Known values are: + "new", "provisioning", and "active". + } + ], + "model_id": "str", # Optional. Used to + identify an existing deployment when updating; empty means create + new. + "model_provider": "str", # Optional. Model + provider. "hugging_face" + "model_slug": "str", # Optional. Model + identifier (e.g. Hugging Face slug). + "workload_config": {} # Optional. + Workload-specific configuration (e.g. ISL/OSL in future). + } + ], + "name": "str", # Optional. Name of the Dedicated Inference. + Must be unique within the team. + "status": "str", # Optional. Known values are: + "provisioning" and "updating". + "updated_at": "2020-02-20 00:00:00", # Optional. Pending + deployment when status is provisioning or updating. + "version": 0, # Optional. Spec version. + "vpc": { + "uuid": "str" # VPC UUID for the Dedicated + Inference. Required. + } + }, + "region": "str", # Optional. DigitalOcean region where the Dedicated + Inference is hosted. + "spec": { + "enable_public_endpoint": bool, # Whether to expose a public + LLM endpoint. Required. + "model_deployments": [ + { + "accelerators": [ + { + "accelerator_slug": "str", # + DigitalOcean GPU slug. Required. + "scale": 0, # Number of + accelerator instances. Required. + "type": "str", # Accelerator + type (e.g. prefill_decode). Required. + "status": "str" # Optional. + Current state of the Accelerator. Known values are: + "new", "provisioning", and "active". + } + ], + "model_id": "str", # Optional. Used to + identify an existing deployment when updating; empty means create + new. + "model_provider": "str", # Optional. Model + provider. "hugging_face" + "model_slug": "str", # Optional. Model + identifier (e.g. Hugging Face slug). + "workload_config": {} # Optional. + Workload-specific configuration (e.g. ISL/OSL in future). + } + ], + "name": "str", # Name of the Dedicated Inference. Must be + unique within the team. Required. + "region": "str", # DigitalOcean region where the Dedicated + Inference is hosted. Required. Known values are: "atl1", "nyc2", and + "tor1". + "version": 0, # Spec version. Required. + "vpc": { + "uuid": "str" # VPC UUID for the Dedicated + Inference. Required. + } + }, + "status": "str", # Optional. Current state of the Dedicated + Inference. Known values are: "active", "new", "provisioning", "updating", + "deleting", and "error". + "updated_at": "2020-02-20 00:00:00", # Optional. When the Dedicated + Inference was last updated. + "vpc_uuid": "str" # Optional. VPC UUID of the Dedicated Inference. + }, + "token": { + "created_at": "2020-02-20 00:00:00", # Optional. Access token for + authenticating to Dedicated Inference endpoints. + "id": "str", # Optional. Unique ID of the token. + "name": "str", # Optional. Name of the token. + "value": "str" # Optional. Token value; only returned once on + create. Store securely. + } } """ error_map: MutableMapping[int, Type[HttpResponseError]] = { @@ -133541,14 +142840,26 @@ def delete_kafka_schema( } error_map.update(kwargs.pop("error_map", {}) or {}) - _headers = kwargs.pop("headers", {}) or {} + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = kwargs.pop("params", {}) or {} - cls: ClsType[Optional[JSON]] = kwargs.pop("cls", None) + content_type: Optional[str] = kwargs.pop( + "content_type", _headers.pop("Content-Type", None) + ) + cls: ClsType[JSON] = kwargs.pop("cls", None) - _request = build_databases_delete_kafka_schema_request( - database_cluster_uuid=database_cluster_uuid, - subject_name=subject_name, + content_type = content_type or "application/json" + _json = None + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _json = body + + _request = build_dedicated_inferences_create_request( + content_type=content_type, + json=_json, + content=_content, headers=_headers, params=_params, ) @@ -133563,62 +142874,59 @@ def delete_kafka_schema( response = pipeline_response.http_response - if response.status_code not in [204, 404]: + if response.status_code not in [202]: if _stream: response.read() # Load the body in memory and close the socket map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore raise HttpResponseError(response=response) - deserialized = None response_headers = {} - if response.status_code == 204: - response_headers["ratelimit-limit"] = self._deserialize( - "int", response.headers.get("ratelimit-limit") - ) - response_headers["ratelimit-remaining"] = self._deserialize( - "int", response.headers.get("ratelimit-remaining") - ) - response_headers["ratelimit-reset"] = self._deserialize( - "int", response.headers.get("ratelimit-reset") - ) - - if response.status_code == 404: - response_headers["ratelimit-limit"] = self._deserialize( - "int", response.headers.get("ratelimit-limit") - ) - response_headers["ratelimit-remaining"] = self._deserialize( - "int", response.headers.get("ratelimit-remaining") - ) - response_headers["ratelimit-reset"] = self._deserialize( - "int", response.headers.get("ratelimit-reset") - ) + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) - if response.content: - deserialized = response.json() - else: - deserialized = None + if response.content: + deserialized = response.json() + else: + deserialized = None if cls: - return cls(pipeline_response, deserialized, response_headers) # type: ignore + return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore - return deserialized # type: ignore + return cast(JSON, deserialized) # type: ignore @distributed_trace - def get_kafka_schema_version( - self, database_cluster_uuid: str, subject_name: str, version: str, **kwargs: Any + def list_accelerators( + self, + dedicated_inference_id: str, + *, + per_page: int = 20, + page: int = 1, + slug: Optional[str] = None, + **kwargs: Any, ) -> JSON: # pylint: disable=line-too-long - """Get Kafka Schema by Subject Version. + """List Dedicated Inference Accelerators. - To get a specific schema by subject name for a Kafka cluster, send a GET request to - ``/v2/databases/$DATABASE_ID/schema-registry/$SUBJECT_NAME/versions/$VERSION``. + List all accelerators (GPUs) in use by a Dedicated Inference instance. Send a + GET request to ``/v2/dedicated-inferences/{dedicated_inference_id}/accelerators``. + Optionally filter by slug and use page/per_page for pagination. - :param database_cluster_uuid: A unique identifier for a database cluster. Required. - :type database_cluster_uuid: str - :param subject_name: The name of the Kafka schema subject. Required. - :type subject_name: str - :param version: The version of the Kafka schema subject. Required. - :type version: str + :param dedicated_inference_id: A unique identifier for a Dedicated Inference instance. + Required. + :type dedicated_inference_id: str + :keyword per_page: Number of items returned per page. Default value is 20. + :paramtype per_page: int + :keyword page: Which 'page' of paginated results to return. Default value is 1. + :paramtype page: int + :keyword slug: Filter accelerators by GPU slug. Default value is None. + :paramtype slug: str :return: JSON object :rtype: JSON :raises ~azure.core.exceptions.HttpResponseError: @@ -133628,12 +142936,23 @@ def get_kafka_schema_version( # response body for status code(s): 200 response == { - "schema": "str", # Optional. The schema definition in the specified format. - "schema_id": 0, # Optional. The id for schema. - "schema_type": "str", # Optional. The type of the schema. Known values are: - "AVRO", "JSON", and "PROTOBUF". - "subject_name": "str", # Optional. The name of the schema subject. - "version": "str" # Optional. The version of the schema. + "meta": { + "total": 0 # Optional. Number of objects returned by the request. + }, + "accelerators": [ + { + "created_at": "2020-02-20 00:00:00", # Optional. + "id": "str", # Optional. Unique ID of the accelerator. + "name": "str", # Optional. Name of the accelerator. + "role": "str", # Optional. Role of the accelerator (e.g. + prefill_decode). + "slug": "str", # Optional. DigitalOcean GPU slug. + "status": "str" # Optional. Status of the accelerator. + } + ], + "links": { + "pages": {} + } } # response body for status code(s): 404 response == { @@ -133665,10 +142984,11 @@ def get_kafka_schema_version( cls: ClsType[JSON] = kwargs.pop("cls", None) - _request = build_databases_get_kafka_schema_version_request( - database_cluster_uuid=database_cluster_uuid, - subject_name=subject_name, - version=version, + _request = build_dedicated_inferences_list_accelerators_request( + dedicated_inference_id=dedicated_inference_id, + per_page=per_page, + page=page, + slug=slug, headers=_headers, params=_params, ) @@ -133728,19 +143048,21 @@ def get_kafka_schema_version( return cast(JSON, deserialized) # type: ignore @distributed_trace - def get_kafka_schema_config( - self, database_cluster_uuid: str, **kwargs: Any + def get_accelerator( + self, dedicated_inference_id: str, accelerator_id: str, **kwargs: Any ) -> JSON: # pylint: disable=line-too-long - """Retrieve Schema Registry Configuration for a kafka Cluster. + """Get a Dedicated Inference Accelerator. - To retrieve the Schema Registry configuration for a Kafka cluster, send a GET request to - ``/v2/databases/$DATABASE_ID/schema-registry/config``. - The response is a JSON object with a ``compatibility_level`` key, which is set to an object - containing any database configuration parameters. + Retrieve a single accelerator by ID for a Dedicated Inference instance. Send a + GET request to + ``/v2/dedicated-inferences/{dedicated_inference_id}/accelerators/{accelerator_id}``. - :param database_cluster_uuid: A unique identifier for a database cluster. Required. - :type database_cluster_uuid: str + :param dedicated_inference_id: A unique identifier for a Dedicated Inference instance. + Required. + :type dedicated_inference_id: str + :param accelerator_id: A unique identifier for a Dedicated Inference accelerator. Required. + :type accelerator_id: str :return: JSON object :rtype: JSON :raises ~azure.core.exceptions.HttpResponseError: @@ -133750,9 +143072,12 @@ def get_kafka_schema_config( # response body for status code(s): 200 response == { - "compatibility_level": "str" # The compatibility level of the schema - registry. Required. Known values are: "NONE", "BACKWARD", "BACKWARD_TRANSITIVE", - "FORWARD", "FORWARD_TRANSITIVE", "FULL", and "FULL_TRANSITIVE". + "created_at": "2020-02-20 00:00:00", # Optional. + "id": "str", # Optional. Unique ID of the accelerator. + "name": "str", # Optional. Name of the accelerator. + "role": "str", # Optional. Role of the accelerator (e.g. prefill_decode). + "slug": "str", # Optional. DigitalOcean GPU slug. + "status": "str" # Optional. Status of the accelerator. } # response body for status code(s): 404 response == { @@ -133784,8 +143109,9 @@ def get_kafka_schema_config( cls: ClsType[JSON] = kwargs.pop("cls", None) - _request = build_databases_get_kafka_schema_config_request( - database_cluster_uuid=database_cluster_uuid, + _request = build_dedicated_inferences_get_accelerator_request( + dedicated_inference_id=dedicated_inference_id, + accelerator_id=accelerator_id, headers=_headers, params=_params, ) @@ -133844,132 +143170,18 @@ def get_kafka_schema_config( return cast(JSON, deserialized) # type: ignore - @overload - def update_kafka_schema_config( - self, - database_cluster_uuid: str, - body: Optional[JSON] = None, - *, - content_type: str = "application/json", - **kwargs: Any, - ) -> JSON: - # pylint: disable=line-too-long - """Update Schema Registry Configuration for a kafka Cluster. - - To update the Schema Registry configuration for a Kafka cluster, send a PUT request to - ``/v2/databases/$DATABASE_ID/schema-registry/config``. - The response is a JSON object with a ``compatibility_level`` key, which is set to an object - containing any database configuration parameters. - - :param database_cluster_uuid: A unique identifier for a database cluster. Required. - :type database_cluster_uuid: str - :param body: Default value is None. - :type body: JSON - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: JSON object - :rtype: JSON - :raises ~azure.core.exceptions.HttpResponseError: - - Example: - .. code-block:: python - - # JSON input template you can fill out and use as your body input. - body = { - "compatibility_level": "str" # The compatibility level of the schema - registry. Required. Known values are: "NONE", "BACKWARD", "BACKWARD_TRANSITIVE", - "FORWARD", "FORWARD_TRANSITIVE", "FULL", and "FULL_TRANSITIVE". - } - - # response body for status code(s): 200 - response == { - "compatibility_level": "str" # The compatibility level of the schema - registry. Required. Known values are: "NONE", "BACKWARD", "BACKWARD_TRANSITIVE", - "FORWARD", "FORWARD_TRANSITIVE", "FULL", and "FULL_TRANSITIVE". - } - # response body for status code(s): 404 - response == { - "id": "str", # A short identifier corresponding to the HTTP status code - returned. For example, the ID for a response returning a 404 status code would - be "not_found.". Required. - "message": "str", # A message providing additional information about the - error, including details to help resolve it when possible. Required. - "request_id": "str" # Optional. Optionally, some endpoints may include a - request ID that should be provided when reporting bugs or opening support - tickets to help identify the issue. - } - """ - - @overload - def update_kafka_schema_config( - self, - database_cluster_uuid: str, - body: Optional[IO[bytes]] = None, - *, - content_type: str = "application/json", - **kwargs: Any, - ) -> JSON: - # pylint: disable=line-too-long - """Update Schema Registry Configuration for a kafka Cluster. - - To update the Schema Registry configuration for a Kafka cluster, send a PUT request to - ``/v2/databases/$DATABASE_ID/schema-registry/config``. - The response is a JSON object with a ``compatibility_level`` key, which is set to an object - containing any database configuration parameters. - - :param database_cluster_uuid: A unique identifier for a database cluster. Required. - :type database_cluster_uuid: str - :param body: Default value is None. - :type body: IO[bytes] - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str - :return: JSON object - :rtype: JSON - :raises ~azure.core.exceptions.HttpResponseError: - - Example: - .. code-block:: python - - # response body for status code(s): 200 - response == { - "compatibility_level": "str" # The compatibility level of the schema - registry. Required. Known values are: "NONE", "BACKWARD", "BACKWARD_TRANSITIVE", - "FORWARD", "FORWARD_TRANSITIVE", "FULL", and "FULL_TRANSITIVE". - } - # response body for status code(s): 404 - response == { - "id": "str", # A short identifier corresponding to the HTTP status code - returned. For example, the ID for a response returning a 404 status code would - be "not_found.". Required. - "message": "str", # A message providing additional information about the - error, including details to help resolve it when possible. Required. - "request_id": "str" # Optional. Optionally, some endpoints may include a - request ID that should be provided when reporting bugs or opening support - tickets to help identify the issue. - } - """ - @distributed_trace - def update_kafka_schema_config( - self, - database_cluster_uuid: str, - body: Optional[Union[JSON, IO[bytes]]] = None, - **kwargs: Any, - ) -> JSON: + def get_ca(self, dedicated_inference_id: str, **kwargs: Any) -> JSON: # pylint: disable=line-too-long - """Update Schema Registry Configuration for a kafka Cluster. + """Get Dedicated Inference CA Certificate. - To update the Schema Registry configuration for a Kafka cluster, send a PUT request to - ``/v2/databases/$DATABASE_ID/schema-registry/config``. - The response is a JSON object with a ``compatibility_level`` key, which is set to an object - containing any database configuration parameters. + Get the CA certificate for a Dedicated Inference instance (base64-encoded). + Required for private endpoint connectivity. Send a GET request to + ``/v2/dedicated-inferences/{dedicated_inference_id}/ca``. - :param database_cluster_uuid: A unique identifier for a database cluster. Required. - :type database_cluster_uuid: str - :param body: Is either a JSON type or a IO[bytes] type. Default value is None. - :type body: JSON or IO[bytes] + :param dedicated_inference_id: A unique identifier for a Dedicated Inference instance. + Required. + :type dedicated_inference_id: str :return: JSON object :rtype: JSON :raises ~azure.core.exceptions.HttpResponseError: @@ -133977,18 +143189,9 @@ def update_kafka_schema_config( Example: .. code-block:: python - # JSON input template you can fill out and use as your body input. - body = { - "compatibility_level": "str" # The compatibility level of the schema - registry. Required. Known values are: "NONE", "BACKWARD", "BACKWARD_TRANSITIVE", - "FORWARD", "FORWARD_TRANSITIVE", "FULL", and "FULL_TRANSITIVE". - } - # response body for status code(s): 200 response == { - "compatibility_level": "str" # The compatibility level of the schema - registry. Required. Known values are: "NONE", "BACKWARD", "BACKWARD_TRANSITIVE", - "FORWARD", "FORWARD_TRANSITIVE", "FULL", and "FULL_TRANSITIVE". + "cert": "str" # Base64-encoded CA certificate. Required. } # response body for status code(s): 404 response == { @@ -134015,30 +143218,13 @@ def update_kafka_schema_config( } error_map.update(kwargs.pop("error_map", {}) or {}) - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _headers = kwargs.pop("headers", {}) or {} _params = kwargs.pop("params", {}) or {} - content_type: Optional[str] = kwargs.pop( - "content_type", _headers.pop("Content-Type", None) - ) cls: ClsType[JSON] = kwargs.pop("cls", None) - content_type = content_type or "application/json" - _json = None - _content = None - if isinstance(body, (IOBase, bytes)): - _content = body - else: - if body is not None: - _json = body - else: - _json = None - - _request = build_databases_update_kafka_schema_config_request( - database_cluster_uuid=database_cluster_uuid, - content_type=content_type, - json=_json, - content=_content, + _request = build_dedicated_inferences_get_ca_request( + dedicated_inference_id=dedicated_inference_id, headers=_headers, params=_params, ) @@ -134098,22 +143284,28 @@ def update_kafka_schema_config( return cast(JSON, deserialized) # type: ignore @distributed_trace - def get_kafka_schema_subject_config( - self, database_cluster_uuid: str, subject_name: str, **kwargs: Any + def list_tokens( + self, + dedicated_inference_id: str, + *, + per_page: int = 20, + page: int = 1, + **kwargs: Any, ) -> JSON: # pylint: disable=line-too-long - """Retrieve Schema Registry Configuration for a Subject of kafka Cluster. + """List Dedicated Inference Tokens. - To retrieve the Schema Registry configuration for a Subject of a Kafka cluster, send a GET - request to - ``/v2/databases/$DATABASE_ID/schema-registry/config/$SUBJECT_NAME``. - The response is a JSON object with a ``compatibility_level`` key, which is set to an object - containing any database configuration parameters. + List all access tokens for a Dedicated Inference instance. Token values are + not returned; only id, name, and created_at. Send a GET request to + ``/v2/dedicated-inferences/{dedicated_inference_id}/tokens``. - :param database_cluster_uuid: A unique identifier for a database cluster. Required. - :type database_cluster_uuid: str - :param subject_name: The name of the Kafka schema subject. Required. - :type subject_name: str + :param dedicated_inference_id: A unique identifier for a Dedicated Inference instance. + Required. + :type dedicated_inference_id: str + :keyword per_page: Number of items returned per page. Default value is 20. + :paramtype per_page: int + :keyword page: Which 'page' of paginated results to return. Default value is 1. + :paramtype page: int :return: JSON object :rtype: JSON :raises ~azure.core.exceptions.HttpResponseError: @@ -134123,10 +143315,21 @@ def get_kafka_schema_subject_config( # response body for status code(s): 200 response == { - "compatibility_level": "str", # The compatibility level of the schema - registry. Required. Known values are: "NONE", "BACKWARD", "BACKWARD_TRANSITIVE", - "FORWARD", "FORWARD_TRANSITIVE", "FULL", and "FULL_TRANSITIVE". - "subject_name": "str" # The name of the schema subject. Required. + "meta": { + "total": 0 # Optional. Number of objects returned by the request. + }, + "links": { + "pages": {} + }, + "tokens": [ + { + "created_at": "2020-02-20 00:00:00", # Optional. + "id": "str", # Optional. Unique ID of the token. + "name": "str", # Optional. Name of the token. + "value": "str" # Optional. Token value; only returned once + on create. Store securely. + } + ] } # response body for status code(s): 404 response == { @@ -134158,9 +143361,10 @@ def get_kafka_schema_subject_config( cls: ClsType[JSON] = kwargs.pop("cls", None) - _request = build_databases_get_kafka_schema_subject_config_request( - database_cluster_uuid=database_cluster_uuid, - subject_name=subject_name, + _request = build_dedicated_inferences_list_tokens_request( + dedicated_inference_id=dedicated_inference_id, + per_page=per_page, + page=page, headers=_headers, params=_params, ) @@ -134220,29 +143424,25 @@ def get_kafka_schema_subject_config( return cast(JSON, deserialized) # type: ignore @overload - def update_kafka_schema_subject_config( + def create_tokens( self, - database_cluster_uuid: str, - subject_name: str, - body: Optional[JSON] = None, + dedicated_inference_id: str, + body: JSON, *, content_type: str = "application/json", **kwargs: Any, ) -> JSON: # pylint: disable=line-too-long - """Update Schema Registry Configuration for a Subject of kafka Cluster. + """Create a Dedicated Inference Token. - To update the Schema Registry configuration for a Subject of a Kafka cluster, send a PUT - request to - ``/v2/databases/$DATABASE_ID/schema-registry/config/$SUBJECT_NAME``. - The response is a JSON object with a ``compatibility_level`` key, which is set to an object - containing any database configuration parameters. + Create a new access token for a Dedicated Inference instance. Send a POST + request to ``/v2/dedicated-inferences/{dedicated_inference_id}/tokens`` with a + ``name``. The token value is returned only once in the response; store it securely. - :param database_cluster_uuid: A unique identifier for a database cluster. Required. - :type database_cluster_uuid: str - :param subject_name: The name of the Kafka schema subject. Required. - :type subject_name: str - :param body: Default value is None. + :param dedicated_inference_id: A unique identifier for a Dedicated Inference instance. + Required. + :type dedicated_inference_id: str + :param body: Required. :type body: JSON :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". @@ -134256,17 +143456,19 @@ def update_kafka_schema_subject_config( # JSON input template you can fill out and use as your body input. body = { - "compatibility_level": "str" # The compatibility level of the schema - registry. Required. Known values are: "NONE", "BACKWARD", "BACKWARD_TRANSITIVE", - "FORWARD", "FORWARD_TRANSITIVE", "FULL", and "FULL_TRANSITIVE". + "name": "str" # Name for the new token. Required. } - # response body for status code(s): 200 + # response body for status code(s): 202 response == { - "compatibility_level": "str", # The compatibility level of the schema - registry. Required. Known values are: "NONE", "BACKWARD", "BACKWARD_TRANSITIVE", - "FORWARD", "FORWARD_TRANSITIVE", "FULL", and "FULL_TRANSITIVE". - "subject_name": "str" # The name of the schema subject. Required. + "token": { + "created_at": "2020-02-20 00:00:00", # Optional. Access token for + authenticating to Dedicated Inference endpoints. + "id": "str", # Optional. Unique ID of the token. + "name": "str", # Optional. Name of the token. + "value": "str" # Optional. Token value; only returned once on + create. Store securely. + } } # response body for status code(s): 404 response == { @@ -134282,29 +143484,25 @@ def update_kafka_schema_subject_config( """ @overload - def update_kafka_schema_subject_config( + def create_tokens( self, - database_cluster_uuid: str, - subject_name: str, - body: Optional[IO[bytes]] = None, + dedicated_inference_id: str, + body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any, ) -> JSON: # pylint: disable=line-too-long - """Update Schema Registry Configuration for a Subject of kafka Cluster. + """Create a Dedicated Inference Token. - To update the Schema Registry configuration for a Subject of a Kafka cluster, send a PUT - request to - ``/v2/databases/$DATABASE_ID/schema-registry/config/$SUBJECT_NAME``. - The response is a JSON object with a ``compatibility_level`` key, which is set to an object - containing any database configuration parameters. + Create a new access token for a Dedicated Inference instance. Send a POST + request to ``/v2/dedicated-inferences/{dedicated_inference_id}/tokens`` with a + ``name``. The token value is returned only once in the response; store it securely. - :param database_cluster_uuid: A unique identifier for a database cluster. Required. - :type database_cluster_uuid: str - :param subject_name: The name of the Kafka schema subject. Required. - :type subject_name: str - :param body: Default value is None. + :param dedicated_inference_id: A unique identifier for a Dedicated Inference instance. + Required. + :type dedicated_inference_id: str + :param body: Required. :type body: IO[bytes] :keyword content_type: Body Parameter content-type. Content type parameter for binary body. Default value is "application/json". @@ -134316,12 +143514,16 @@ def update_kafka_schema_subject_config( Example: .. code-block:: python - # response body for status code(s): 200 + # response body for status code(s): 202 response == { - "compatibility_level": "str", # The compatibility level of the schema - registry. Required. Known values are: "NONE", "BACKWARD", "BACKWARD_TRANSITIVE", - "FORWARD", "FORWARD_TRANSITIVE", "FULL", and "FULL_TRANSITIVE". - "subject_name": "str" # The name of the schema subject. Required. + "token": { + "created_at": "2020-02-20 00:00:00", # Optional. Access token for + authenticating to Dedicated Inference endpoints. + "id": "str", # Optional. Unique ID of the token. + "name": "str", # Optional. Name of the token. + "value": "str" # Optional. Token value; only returned once on + create. Store securely. + } } # response body for status code(s): 404 response == { @@ -134337,27 +143539,20 @@ def update_kafka_schema_subject_config( """ @distributed_trace - def update_kafka_schema_subject_config( - self, - database_cluster_uuid: str, - subject_name: str, - body: Optional[Union[JSON, IO[bytes]]] = None, - **kwargs: Any, + def create_tokens( + self, dedicated_inference_id: str, body: Union[JSON, IO[bytes]], **kwargs: Any ) -> JSON: # pylint: disable=line-too-long - """Update Schema Registry Configuration for a Subject of kafka Cluster. + """Create a Dedicated Inference Token. - To update the Schema Registry configuration for a Subject of a Kafka cluster, send a PUT - request to - ``/v2/databases/$DATABASE_ID/schema-registry/config/$SUBJECT_NAME``. - The response is a JSON object with a ``compatibility_level`` key, which is set to an object - containing any database configuration parameters. + Create a new access token for a Dedicated Inference instance. Send a POST + request to ``/v2/dedicated-inferences/{dedicated_inference_id}/tokens`` with a + ``name``. The token value is returned only once in the response; store it securely. - :param database_cluster_uuid: A unique identifier for a database cluster. Required. - :type database_cluster_uuid: str - :param subject_name: The name of the Kafka schema subject. Required. - :type subject_name: str - :param body: Is either a JSON type or a IO[bytes] type. Default value is None. + :param dedicated_inference_id: A unique identifier for a Dedicated Inference instance. + Required. + :type dedicated_inference_id: str + :param body: Is either a JSON type or a IO[bytes] type. Required. :type body: JSON or IO[bytes] :return: JSON object :rtype: JSON @@ -134368,17 +143563,19 @@ def update_kafka_schema_subject_config( # JSON input template you can fill out and use as your body input. body = { - "compatibility_level": "str" # The compatibility level of the schema - registry. Required. Known values are: "NONE", "BACKWARD", "BACKWARD_TRANSITIVE", - "FORWARD", "FORWARD_TRANSITIVE", "FULL", and "FULL_TRANSITIVE". + "name": "str" # Name for the new token. Required. } - # response body for status code(s): 200 + # response body for status code(s): 202 response == { - "compatibility_level": "str", # The compatibility level of the schema - registry. Required. Known values are: "NONE", "BACKWARD", "BACKWARD_TRANSITIVE", - "FORWARD", "FORWARD_TRANSITIVE", "FULL", and "FULL_TRANSITIVE". - "subject_name": "str" # The name of the schema subject. Required. + "token": { + "created_at": "2020-02-20 00:00:00", # Optional. Access token for + authenticating to Dedicated Inference endpoints. + "id": "str", # Optional. Unique ID of the token. + "name": "str", # Optional. Name of the token. + "value": "str" # Optional. Token value; only returned once on + create. Store securely. + } } # response body for status code(s): 404 response == { @@ -134419,14 +143616,10 @@ def update_kafka_schema_subject_config( if isinstance(body, (IOBase, bytes)): _content = body else: - if body is not None: - _json = body - else: - _json = None + _json = body - _request = build_databases_update_kafka_schema_subject_config_request( - database_cluster_uuid=database_cluster_uuid, - subject_name=subject_name, + _request = build_dedicated_inferences_create_tokens_request( + dedicated_inference_id=dedicated_inference_id, content_type=content_type, json=_json, content=_content, @@ -134444,14 +143637,14 @@ def update_kafka_schema_subject_config( response = pipeline_response.http_response - if response.status_code not in [200, 404]: + if response.status_code not in [202, 404]: if _stream: response.read() # Load the body in memory and close the socket map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore raise HttpResponseError(response=response) response_headers = {} - if response.status_code == 200: + if response.status_code == 202: response_headers["ratelimit-limit"] = self._deserialize( "int", response.headers.get("ratelimit-limit") ) @@ -134489,32 +143682,27 @@ def update_kafka_schema_subject_config( return cast(JSON, deserialized) # type: ignore @distributed_trace - def get_cluster_metrics_credentials(self, **kwargs: Any) -> JSON: + def delete_tokens( + self, dedicated_inference_id: str, token_id: str, **kwargs: Any + ) -> Optional[JSON]: # pylint: disable=line-too-long - """Retrieve Database Clusters' Metrics Endpoint Credentials. + """Revoke a Dedicated Inference Token. - To show the credentials for all database clusters' metrics endpoints, send a GET request to - ``/v2/databases/metrics/credentials``. The result will be a JSON object with a ``credentials`` - key. + Revoke (delete) an access token for a Dedicated Inference instance. Send a + DELETE request to ``/v2/dedicated-inferences/{dedicated_inference_id}/tokens/{token_id}``. - :return: JSON object - :rtype: JSON + :param dedicated_inference_id: A unique identifier for a Dedicated Inference instance. + Required. + :type dedicated_inference_id: str + :param token_id: A unique identifier for a Dedicated Inference access token. Required. + :type token_id: str + :return: JSON object or None + :rtype: JSON or None :raises ~azure.core.exceptions.HttpResponseError: Example: .. code-block:: python - # response body for status code(s): 200 - response == { - "credentials": { - "credentials": { - "basic_auth_password": "str", # Optional. basic - authentication password for metrics HTTP endpoint. - "basic_auth_username": "str" # Optional. basic - authentication username for metrics HTTP endpoint. - } - } - } # response body for status code(s): 404 response == { "id": "str", # A short identifier corresponding to the HTTP status code @@ -134543,9 +143731,11 @@ def get_cluster_metrics_credentials(self, **kwargs: Any) -> JSON: _headers = kwargs.pop("headers", {}) or {} _params = kwargs.pop("params", {}) or {} - cls: ClsType[JSON] = kwargs.pop("cls", None) + cls: ClsType[Optional[JSON]] = kwargs.pop("cls", None) - _request = build_databases_get_cluster_metrics_credentials_request( + _request = build_dedicated_inferences_delete_tokens_request( + dedicated_inference_id=dedicated_inference_id, + token_id=token_id, headers=_headers, params=_params, ) @@ -134560,14 +143750,15 @@ def get_cluster_metrics_credentials(self, **kwargs: Any) -> JSON: response = pipeline_response.http_response - if response.status_code not in [200, 404]: + if response.status_code not in [204, 404]: if _stream: response.read() # Load the body in memory and close the socket map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore raise HttpResponseError(response=response) + deserialized = None response_headers = {} - if response.status_code == 200: + if response.status_code == 204: response_headers["ratelimit-limit"] = self._deserialize( "int", response.headers.get("ratelimit-limit") ) @@ -134578,11 +143769,6 @@ def get_cluster_metrics_credentials(self, **kwargs: Any) -> JSON: "int", response.headers.get("ratelimit-reset") ) - if response.content: - deserialized = response.json() - else: - deserialized = None - if response.status_code == 404: response_headers["ratelimit-limit"] = self._deserialize( "int", response.headers.get("ratelimit-limit") @@ -134600,98 +143786,37 @@ def get_cluster_metrics_credentials(self, **kwargs: Any) -> JSON: deserialized = None if cls: - return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore - - return cast(JSON, deserialized) # type: ignore - - @overload - def update_cluster_metrics_credentials( # pylint: disable=inconsistent-return-statements - self, - body: Optional[JSON] = None, - *, - content_type: str = "application/json", - **kwargs: Any, - ) -> None: - """Update Database Clusters' Metrics Endpoint Credentials. - - To update the credentials for all database clusters' metrics endpoints, send a PUT request to - ``/v2/databases/metrics/credentials``. A successful request will receive a 204 No Content - status code with no body in response. - - :param body: Default value is None. - :type body: JSON - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: None - :rtype: None - :raises ~azure.core.exceptions.HttpResponseError: - - Example: - .. code-block:: python - - # JSON input template you can fill out and use as your body input. - body = { - "credentials": { - "basic_auth_password": "str", # Optional. basic authentication - password for metrics HTTP endpoint. - "basic_auth_username": "str" # Optional. basic authentication - username for metrics HTTP endpoint. - } - } - """ - - @overload - def update_cluster_metrics_credentials( # pylint: disable=inconsistent-return-statements - self, - body: Optional[IO[bytes]] = None, - *, - content_type: str = "application/json", - **kwargs: Any, - ) -> None: - """Update Database Clusters' Metrics Endpoint Credentials. - - To update the credentials for all database clusters' metrics endpoints, send a PUT request to - ``/v2/databases/metrics/credentials``. A successful request will receive a 204 No Content - status code with no body in response. + return cls(pipeline_response, deserialized, response_headers) # type: ignore - :param body: Default value is None. - :type body: IO[bytes] - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str - :return: None - :rtype: None - :raises ~azure.core.exceptions.HttpResponseError: - """ + return deserialized # type: ignore @distributed_trace - def update_cluster_metrics_credentials( # pylint: disable=inconsistent-return-statements - self, body: Optional[Union[JSON, IO[bytes]]] = None, **kwargs: Any - ) -> None: - """Update Database Clusters' Metrics Endpoint Credentials. + def list_sizes(self, **kwargs: Any) -> JSON: + """List Dedicated Inference Sizes. - To update the credentials for all database clusters' metrics endpoints, send a PUT request to - ``/v2/databases/metrics/credentials``. A successful request will receive a 204 No Content - status code with no body in response. + Get available Dedicated Inference sizes and pricing for supported GPUs. Send a + GET request to ``/v2/dedicated-inferences/sizes``. - :param body: Is either a JSON type or a IO[bytes] type. Default value is None. - :type body: JSON or IO[bytes] - :return: None - :rtype: None + :return: JSON object + :rtype: JSON :raises ~azure.core.exceptions.HttpResponseError: Example: .. code-block:: python - # JSON input template you can fill out and use as your body input. - body = { - "credentials": { - "basic_auth_password": "str", # Optional. basic authentication - password for metrics HTTP endpoint. - "basic_auth_username": "str" # Optional. basic authentication - username for metrics HTTP endpoint. - } + # response body for status code(s): 200 + response == { + "enabled_regions": [ + "str" # Optional. Regions where Dedicated Inference is available. + ], + "sizes": [ + { + "currency": "str", # Optional. + "gpu_slug": "str", # Optional. + "price_per_hour": "str", # Optional. + "region": "str" # Optional. + } + ] } """ error_map: MutableMapping[int, Type[HttpResponseError]] = { @@ -134707,29 +143832,12 @@ def update_cluster_metrics_credentials( # pylint: disable=inconsistent-return-s } error_map.update(kwargs.pop("error_map", {}) or {}) - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _headers = kwargs.pop("headers", {}) or {} _params = kwargs.pop("params", {}) or {} - content_type: Optional[str] = kwargs.pop( - "content_type", _headers.pop("Content-Type", None) - ) - cls: ClsType[None] = kwargs.pop("cls", None) - - content_type = content_type or "application/json" - _json = None - _content = None - if isinstance(body, (IOBase, bytes)): - _content = body - else: - if body is not None: - _json = body - else: - _json = None + cls: ClsType[JSON] = kwargs.pop("cls", None) - _request = build_databases_update_cluster_metrics_credentials_request( - content_type=content_type, - json=_json, - content=_content, + _request = build_dedicated_inferences_list_sizes_request( headers=_headers, params=_params, ) @@ -134744,7 +143852,7 @@ def update_cluster_metrics_credentials( # pylint: disable=inconsistent-return-s response = pipeline_response.http_response - if response.status_code not in [204]: + if response.status_code not in [200]: if _stream: response.read() # Load the body in memory and close the socket map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore @@ -134761,23 +143869,24 @@ def update_cluster_metrics_credentials( # pylint: disable=inconsistent-return-s "int", response.headers.get("ratelimit-reset") ) + if response.content: + deserialized = response.json() + else: + deserialized = None + if cls: - return cls(pipeline_response, None, response_headers) # type: ignore + return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore - @distributed_trace - def list_opeasearch_indexes( - self, database_cluster_uuid: str, **kwargs: Any - ) -> JSON: - # pylint: disable=line-too-long - """List Indexes for a OpenSearch Cluster. + return cast(JSON, deserialized) # type: ignore - To list all of a OpenSearch cluster's indexes, send a GET request to - ``/v2/databases/$DATABASE_ID/indexes``. + @distributed_trace + def get_gpu_model_config(self, **kwargs: Any) -> JSON: + """Get Dedicated Inference GPU Model Config. - The result will be a JSON object with a ``indexes`` key. + Get supported GPU and model configurations for Dedicated Inference. Use this to + discover supported GPU slugs and model slugs (e.g. Hugging Face). Send a GET + request to ``/v2/dedicated-inferences/gpu-model-config``. - :param database_cluster_uuid: A unique identifier for a database cluster. Required. - :type database_cluster_uuid: str :return: JSON object :rtype: JSON :raises ~azure.core.exceptions.HttpResponseError: @@ -134787,35 +143896,18 @@ def list_opeasearch_indexes( # response body for status code(s): 200 response == { - "indexes": [ + "gpu_model_configs": [ { - "created_time": "2020-02-20 00:00:00", # Optional. The date - and time the index was created. - "health": "str", # Optional. The health of the OpenSearch - index. Known values are: "unknown", "green", "yellow", "red", and "red*". - "index_name": "str", # Optional. The name of the opensearch - index. - "number_of_replicas": 0, # Optional. The number of replicas - for the index. - "number_of_shards": 0, # Optional. The number of shards for - the index. - "size": 0, # Optional. The size of the index. - "status": "str" # Optional. The status of the OpenSearch - index. Known values are: "unknown", "open", "close", and "none". + "gpu_slugs": [ + "str" # Optional. + ], + "is_gated_model": bool, # Optional. Whether the model + requires gated access (e.g. Hugging Face token). + "model_name": "str", # Optional. + "model_slug": "str" # Optional. } ] } - # response body for status code(s): 404 - response == { - "id": "str", # A short identifier corresponding to the HTTP status code - returned. For example, the ID for a response returning a 404 status code would - be "not_found.". Required. - "message": "str", # A message providing additional information about the - error, including details to help resolve it when possible. Required. - "request_id": "str" # Optional. Optionally, some endpoints may include a - request ID that should be provided when reporting bugs or opening support - tickets to help identify the issue. - } """ error_map: MutableMapping[int, Type[HttpResponseError]] = { 404: ResourceNotFoundError, @@ -134835,8 +143927,7 @@ def list_opeasearch_indexes( cls: ClsType[JSON] = kwargs.pop("cls", None) - _request = build_databases_list_opeasearch_indexes_request( - database_cluster_uuid=database_cluster_uuid, + _request = build_dedicated_inferences_get_gpu_model_config_request( headers=_headers, params=_params, ) @@ -134851,160 +143942,32 @@ def list_opeasearch_indexes( response = pipeline_response.http_response - if response.status_code not in [200, 404]: + if response.status_code not in [200]: if _stream: response.read() # Load the body in memory and close the socket map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore raise HttpResponseError(response=response) response_headers = {} - if response.status_code == 200: - response_headers["ratelimit-limit"] = self._deserialize( - "int", response.headers.get("ratelimit-limit") - ) - response_headers["ratelimit-remaining"] = self._deserialize( - "int", response.headers.get("ratelimit-remaining") - ) - response_headers["ratelimit-reset"] = self._deserialize( - "int", response.headers.get("ratelimit-reset") - ) - - if response.content: - deserialized = response.json() - else: - deserialized = None - - if response.status_code == 404: - response_headers["ratelimit-limit"] = self._deserialize( - "int", response.headers.get("ratelimit-limit") - ) - response_headers["ratelimit-remaining"] = self._deserialize( - "int", response.headers.get("ratelimit-remaining") - ) - response_headers["ratelimit-reset"] = self._deserialize( - "int", response.headers.get("ratelimit-reset") - ) - - if response.content: - deserialized = response.json() - else: - deserialized = None - - if cls: - return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore - - return cast(JSON, deserialized) # type: ignore - - @distributed_trace - def delete_opensearch_index( - self, database_cluster_uuid: str, index_name: str, **kwargs: Any - ) -> Optional[JSON]: - # pylint: disable=line-too-long - """Delete Index for OpenSearch Cluster. - - To delete a single index within OpenSearch cluster, send a DELETE request - to ``/v2/databases/$DATABASE_ID/indexes/$INDEX_NAME``. - - A status of 204 will be given. This indicates that the request was - processed successfully, but that no response body is needed. - - :param database_cluster_uuid: A unique identifier for a database cluster. Required. - :type database_cluster_uuid: str - :param index_name: The name of the OpenSearch index. Required. - :type index_name: str - :return: JSON object or None - :rtype: JSON or None - :raises ~azure.core.exceptions.HttpResponseError: - - Example: - .. code-block:: python - - # response body for status code(s): 404 - response == { - "id": "str", # A short identifier corresponding to the HTTP status code - returned. For example, the ID for a response returning a 404 status code would - be "not_found.". Required. - "message": "str", # A message providing additional information about the - error, including details to help resolve it when possible. Required. - "request_id": "str" # Optional. Optionally, some endpoints may include a - request ID that should be provided when reporting bugs or opening support - tickets to help identify the issue. - } - """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - 401: cast( - Type[HttpResponseError], - lambda response: ClientAuthenticationError(response=response), - ), - 429: HttpResponseError, - 500: HttpResponseError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = kwargs.pop("headers", {}) or {} - _params = kwargs.pop("params", {}) or {} - - cls: ClsType[Optional[JSON]] = kwargs.pop("cls", None) - - _request = build_databases_delete_opensearch_index_request( - database_cluster_uuid=database_cluster_uuid, - index_name=index_name, - headers=_headers, - params=_params, + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") ) - _request.url = self._client.format_url(_request.url) - - _stream = False - pipeline_response: PipelineResponse = ( - self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") ) - response = pipeline_response.http_response - - if response.status_code not in [204, 404]: - if _stream: - response.read() # Load the body in memory and close the socket - map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore - raise HttpResponseError(response=response) - - deserialized = None - response_headers = {} - if response.status_code == 204: - response_headers["ratelimit-limit"] = self._deserialize( - "int", response.headers.get("ratelimit-limit") - ) - response_headers["ratelimit-remaining"] = self._deserialize( - "int", response.headers.get("ratelimit-remaining") - ) - response_headers["ratelimit-reset"] = self._deserialize( - "int", response.headers.get("ratelimit-reset") - ) - - if response.status_code == 404: - response_headers["ratelimit-limit"] = self._deserialize( - "int", response.headers.get("ratelimit-limit") - ) - response_headers["ratelimit-remaining"] = self._deserialize( - "int", response.headers.get("ratelimit-remaining") - ) - response_headers["ratelimit-reset"] = self._deserialize( - "int", response.headers.get("ratelimit-reset") - ) - - if response.content: - deserialized = response.json() - else: - deserialized = None + if response.content: + deserialized = response.json() + else: + deserialized = None if cls: - return cls(pipeline_response, deserialized, response_headers) # type: ignore + return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore - return deserialized # type: ignore + return cast(JSON, deserialized) # type: ignore class DomainsOperations: @@ -149507,6 +158470,897 @@ def delete_trigger( return deserialized # type: ignore +class FunctionsAccessKeyOperations: + """ + .. warning:: + **DO NOT** instantiate this class directly. + + Instead, you should access the following operations through + :class:`~pydo.GeneratedClient`'s + :attr:`functions_access_key` attribute. + """ + + def __init__(self, *args, **kwargs): + input_args = list(args) + self._client = input_args.pop(0) if input_args else kwargs.pop("client") + self._config = input_args.pop(0) if input_args else kwargs.pop("config") + self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer") + self._deserialize = ( + input_args.pop(0) if input_args else kwargs.pop("deserializer") + ) + + @distributed_trace + def list(self, namespace_id: str, **kwargs: Any) -> JSON: + # pylint: disable=line-too-long + """List Namespace Access Keys. + + Lists all access keys for a serverless functions namespace. + + To list access keys, send a GET request to ``/v2/functions/namespaces/{namespace_id}/keys``. + + :param namespace_id: The ID of the namespace to be managed. Required. + :type namespace_id: str + :return: JSON object + :rtype: JSON + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 200 + response == { + "access_keys": [ + { + "created_at": "2020-02-20 00:00:00", # Optional. The date + and time the key was created. + "expires_at": "2020-02-20 00:00:00", # Optional. When the + key expires (null for non-expiring keys). + "id": "str", # Optional. The access key's unique identifier + with prefix 'dof"" *v1*"" '. + "name": "str", # Optional. The access key's name. + "updated_at": "2020-02-20 00:00:00" # Optional. The date and + time the key was last updated. + } + ], + "count": 0 # Optional. Total number of access keys. + } + # response body for status code(s): 404 + response == { + "id": "str", # A short identifier corresponding to the HTTP status code + returned. For example, the ID for a response returning a 404 status code would + be "not_found.". Required. + "message": "str", # A message providing additional information about the + error, including details to help resolve it when possible. Required. + "request_id": "str" # Optional. Optionally, some endpoints may include a + request ID that should be provided when reporting bugs or opening support + tickets to help identify the issue. + } + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + 401: cast( + Type[HttpResponseError], + lambda response: ClientAuthenticationError(response=response), + ), + 429: HttpResponseError, + 500: HttpResponseError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[JSON] = kwargs.pop("cls", None) + + _request = build_functions_access_key_list_request( + namespace_id=namespace_id, + headers=_headers, + params=_params, + ) + _request.url = self._client.format_url(_request.url) + + _stream = False + pipeline_response: PipelineResponse = ( + self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + ) + + response = pipeline_response.http_response + + if response.status_code not in [200, 404]: + if _stream: + response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore + raise HttpResponseError(response=response) + + response_headers = {} + if response.status_code == 200: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.content: + deserialized = response.json() + else: + deserialized = None + + if response.status_code == 404: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.content: + deserialized = response.json() + else: + deserialized = None + + if cls: + return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore + + return cast(JSON, deserialized) # type: ignore + + @overload + def create( + self, + namespace_id: str, + body: JSON, + *, + content_type: str = "application/json", + **kwargs: Any, + ) -> JSON: + # pylint: disable=line-too-long + """Create a Namespace Access Key. + + Creates a new access key for a serverless functions namespace. + The access key can be used to authenticate requests to the namespace's functions. + The secret key is only returned once upon creation. + + To create an access key, send a POST request to + ``/v2/functions/namespaces/{namespace_id}/keys``. + + :param namespace_id: The ID of the namespace to be managed. Required. + :type namespace_id: str + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: JSON object + :rtype: JSON + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "name": "str", # The access key's name. Required. + "expires_in": "str" # Optional. The duration after which the access key + expires, specified as a human-readable duration string in the format ``h`` + (hours) or ``d`` (days). Minimum value is ``1h``. If omitted, the key will + never expire. + } + + # response body for status code(s): 201 + response == { + "access_key": { + "created_at": "2020-02-20 00:00:00", # Optional. The date and time + the key was created. + "expires_at": "2020-02-20 00:00:00", # Optional. When the key + expires (null for non-expiring keys). + "id": "str", # Optional. The access key's unique identifier with + prefix 'dof"" *v1*"" '. + "name": "str", # Optional. The access key's name. + "secret": "str", # Optional. The secret key used to authenticate. + This is only returned once upon creation. Make sure to copy and securely + store it. + "updated_at": "2020-02-20 00:00:00" # Optional. The date and time + the key was last updated. + } + } + # response body for status code(s): 400, 404, 409 + response == { + "id": "str", # A short identifier corresponding to the HTTP status code + returned. For example, the ID for a response returning a 404 status code would + be "not_found.". Required. + "message": "str", # A message providing additional information about the + error, including details to help resolve it when possible. Required. + "request_id": "str" # Optional. Optionally, some endpoints may include a + request ID that should be provided when reporting bugs or opening support + tickets to help identify the issue. + } + """ + + @overload + def create( + self, + namespace_id: str, + body: IO[bytes], + *, + content_type: str = "application/json", + **kwargs: Any, + ) -> JSON: + # pylint: disable=line-too-long + """Create a Namespace Access Key. + + Creates a new access key for a serverless functions namespace. + The access key can be used to authenticate requests to the namespace's functions. + The secret key is only returned once upon creation. + + To create an access key, send a POST request to + ``/v2/functions/namespaces/{namespace_id}/keys``. + + :param namespace_id: The ID of the namespace to be managed. Required. + :type namespace_id: str + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: JSON object + :rtype: JSON + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 201 + response == { + "access_key": { + "created_at": "2020-02-20 00:00:00", # Optional. The date and time + the key was created. + "expires_at": "2020-02-20 00:00:00", # Optional. When the key + expires (null for non-expiring keys). + "id": "str", # Optional. The access key's unique identifier with + prefix 'dof"" *v1*"" '. + "name": "str", # Optional. The access key's name. + "secret": "str", # Optional. The secret key used to authenticate. + This is only returned once upon creation. Make sure to copy and securely + store it. + "updated_at": "2020-02-20 00:00:00" # Optional. The date and time + the key was last updated. + } + } + # response body for status code(s): 400, 404, 409 + response == { + "id": "str", # A short identifier corresponding to the HTTP status code + returned. For example, the ID for a response returning a 404 status code would + be "not_found.". Required. + "message": "str", # A message providing additional information about the + error, including details to help resolve it when possible. Required. + "request_id": "str" # Optional. Optionally, some endpoints may include a + request ID that should be provided when reporting bugs or opening support + tickets to help identify the issue. + } + """ + + @distributed_trace + def create( + self, namespace_id: str, body: Union[JSON, IO[bytes]], **kwargs: Any + ) -> JSON: + # pylint: disable=line-too-long + """Create a Namespace Access Key. + + Creates a new access key for a serverless functions namespace. + The access key can be used to authenticate requests to the namespace's functions. + The secret key is only returned once upon creation. + + To create an access key, send a POST request to + ``/v2/functions/namespaces/{namespace_id}/keys``. + + :param namespace_id: The ID of the namespace to be managed. Required. + :type namespace_id: str + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :return: JSON object + :rtype: JSON + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "name": "str", # The access key's name. Required. + "expires_in": "str" # Optional. The duration after which the access key + expires, specified as a human-readable duration string in the format ``h`` + (hours) or ``d`` (days). Minimum value is ``1h``. If omitted, the key will + never expire. + } + + # response body for status code(s): 201 + response == { + "access_key": { + "created_at": "2020-02-20 00:00:00", # Optional. The date and time + the key was created. + "expires_at": "2020-02-20 00:00:00", # Optional. When the key + expires (null for non-expiring keys). + "id": "str", # Optional. The access key's unique identifier with + prefix 'dof"" *v1*"" '. + "name": "str", # Optional. The access key's name. + "secret": "str", # Optional. The secret key used to authenticate. + This is only returned once upon creation. Make sure to copy and securely + store it. + "updated_at": "2020-02-20 00:00:00" # Optional. The date and time + the key was last updated. + } + } + # response body for status code(s): 400, 404, 409 + response == { + "id": "str", # A short identifier corresponding to the HTTP status code + returned. For example, the ID for a response returning a 404 status code would + be "not_found.". Required. + "message": "str", # A message providing additional information about the + error, including details to help resolve it when possible. Required. + "request_id": "str" # Optional. Optionally, some endpoints may include a + request ID that should be provided when reporting bugs or opening support + tickets to help identify the issue. + } + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + 401: cast( + Type[HttpResponseError], + lambda response: ClientAuthenticationError(response=response), + ), + 429: HttpResponseError, + 500: HttpResponseError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop( + "content_type", _headers.pop("Content-Type", None) + ) + cls: ClsType[JSON] = kwargs.pop("cls", None) + + content_type = content_type or "application/json" + _json = None + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _json = body + + _request = build_functions_access_key_create_request( + namespace_id=namespace_id, + content_type=content_type, + json=_json, + content=_content, + headers=_headers, + params=_params, + ) + _request.url = self._client.format_url(_request.url) + + _stream = False + pipeline_response: PipelineResponse = ( + self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + ) + + response = pipeline_response.http_response + + if response.status_code not in [201, 400, 404, 409]: + if _stream: + response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore + raise HttpResponseError(response=response) + + response_headers = {} + if response.status_code == 201: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.content: + deserialized = response.json() + else: + deserialized = None + + if response.status_code == 400: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.content: + deserialized = response.json() + else: + deserialized = None + + if response.status_code == 404: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.content: + deserialized = response.json() + else: + deserialized = None + + if response.status_code == 409: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.content: + deserialized = response.json() + else: + deserialized = None + + if cls: + return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore + + return cast(JSON, deserialized) # type: ignore + + @overload + def update( + self, + namespace_id: str, + key_id: str, + body: JSON, + *, + content_type: str = "application/json", + **kwargs: Any, + ) -> JSON: + # pylint: disable=line-too-long + """Update a Namespace Access Key. + + Updates the name of an access key for a serverless functions namespace. + + To update an access key, send a PUT request to + ``/v2/functions/namespaces/{namespace_id}/keys/{key_id}``. + + :param namespace_id: The ID of the namespace to be managed. Required. + :type namespace_id: str + :param key_id: The ID of the access key to be managed. Required. + :type key_id: str + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: JSON object + :rtype: JSON + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "name": "str" # The new name for the access key. Required. + } + + # response body for status code(s): 200 + response == { + "access_key": { + "created_at": "2020-02-20 00:00:00", # Optional. The date and time + the key was created. + "expires_at": "2020-02-20 00:00:00", # Optional. When the key + expires (null for non-expiring keys). + "id": "str", # Optional. The access key's unique identifier with + prefix 'dof"" *v1*"" '. + "name": "str", # Optional. The access key's name. + "updated_at": "2020-02-20 00:00:00" # Optional. The date and time + the key was last updated. + } + } + # response body for status code(s): 400, 404, 409 + response == { + "id": "str", # A short identifier corresponding to the HTTP status code + returned. For example, the ID for a response returning a 404 status code would + be "not_found.". Required. + "message": "str", # A message providing additional information about the + error, including details to help resolve it when possible. Required. + "request_id": "str" # Optional. Optionally, some endpoints may include a + request ID that should be provided when reporting bugs or opening support + tickets to help identify the issue. + } + """ + + @overload + def update( + self, + namespace_id: str, + key_id: str, + body: IO[bytes], + *, + content_type: str = "application/json", + **kwargs: Any, + ) -> JSON: + # pylint: disable=line-too-long + """Update a Namespace Access Key. + + Updates the name of an access key for a serverless functions namespace. + + To update an access key, send a PUT request to + ``/v2/functions/namespaces/{namespace_id}/keys/{key_id}``. + + :param namespace_id: The ID of the namespace to be managed. Required. + :type namespace_id: str + :param key_id: The ID of the access key to be managed. Required. + :type key_id: str + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: JSON object + :rtype: JSON + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 200 + response == { + "access_key": { + "created_at": "2020-02-20 00:00:00", # Optional. The date and time + the key was created. + "expires_at": "2020-02-20 00:00:00", # Optional. When the key + expires (null for non-expiring keys). + "id": "str", # Optional. The access key's unique identifier with + prefix 'dof"" *v1*"" '. + "name": "str", # Optional. The access key's name. + "updated_at": "2020-02-20 00:00:00" # Optional. The date and time + the key was last updated. + } + } + # response body for status code(s): 400, 404, 409 + response == { + "id": "str", # A short identifier corresponding to the HTTP status code + returned. For example, the ID for a response returning a 404 status code would + be "not_found.". Required. + "message": "str", # A message providing additional information about the + error, including details to help resolve it when possible. Required. + "request_id": "str" # Optional. Optionally, some endpoints may include a + request ID that should be provided when reporting bugs or opening support + tickets to help identify the issue. + } + """ + + @distributed_trace + def update( + self, + namespace_id: str, + key_id: str, + body: Union[JSON, IO[bytes]], + **kwargs: Any, + ) -> JSON: + # pylint: disable=line-too-long + """Update a Namespace Access Key. + + Updates the name of an access key for a serverless functions namespace. + + To update an access key, send a PUT request to + ``/v2/functions/namespaces/{namespace_id}/keys/{key_id}``. + + :param namespace_id: The ID of the namespace to be managed. Required. + :type namespace_id: str + :param key_id: The ID of the access key to be managed. Required. + :type key_id: str + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :return: JSON object + :rtype: JSON + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "name": "str" # The new name for the access key. Required. + } + + # response body for status code(s): 200 + response == { + "access_key": { + "created_at": "2020-02-20 00:00:00", # Optional. The date and time + the key was created. + "expires_at": "2020-02-20 00:00:00", # Optional. When the key + expires (null for non-expiring keys). + "id": "str", # Optional. The access key's unique identifier with + prefix 'dof"" *v1*"" '. + "name": "str", # Optional. The access key's name. + "updated_at": "2020-02-20 00:00:00" # Optional. The date and time + the key was last updated. + } + } + # response body for status code(s): 400, 404, 409 + response == { + "id": "str", # A short identifier corresponding to the HTTP status code + returned. For example, the ID for a response returning a 404 status code would + be "not_found.". Required. + "message": "str", # A message providing additional information about the + error, including details to help resolve it when possible. Required. + "request_id": "str" # Optional. Optionally, some endpoints may include a + request ID that should be provided when reporting bugs or opening support + tickets to help identify the issue. + } + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + 401: cast( + Type[HttpResponseError], + lambda response: ClientAuthenticationError(response=response), + ), + 429: HttpResponseError, + 500: HttpResponseError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop( + "content_type", _headers.pop("Content-Type", None) + ) + cls: ClsType[JSON] = kwargs.pop("cls", None) + + content_type = content_type or "application/json" + _json = None + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _json = body + + _request = build_functions_access_key_update_request( + namespace_id=namespace_id, + key_id=key_id, + content_type=content_type, + json=_json, + content=_content, + headers=_headers, + params=_params, + ) + _request.url = self._client.format_url(_request.url) + + _stream = False + pipeline_response: PipelineResponse = ( + self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + ) + + response = pipeline_response.http_response + + if response.status_code not in [200, 400, 404, 409]: + if _stream: + response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore + raise HttpResponseError(response=response) + + response_headers = {} + if response.status_code == 200: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.content: + deserialized = response.json() + else: + deserialized = None + + if response.status_code == 400: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.content: + deserialized = response.json() + else: + deserialized = None + + if response.status_code == 404: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.content: + deserialized = response.json() + else: + deserialized = None + + if response.status_code == 409: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.content: + deserialized = response.json() + else: + deserialized = None + + if cls: + return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore + + return cast(JSON, deserialized) # type: ignore + + @distributed_trace + def delete(self, namespace_id: str, key_id: str, **kwargs: Any) -> JSON: + # pylint: disable=line-too-long + """Delete a Namespace Access Key. + + Deletes an access key for a serverless functions namespace. + + To delete an access key, send a DELETE request to + ``/v2/functions/namespaces/{namespace_id}/keys/{key_id}``. + + :param namespace_id: The ID of the namespace to be managed. Required. + :type namespace_id: str + :param key_id: The ID of the access key to be managed. Required. + :type key_id: str + :return: JSON or JSON object + :rtype: JSON + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 404 + response == { + "id": "str", # A short identifier corresponding to the HTTP status code + returned. For example, the ID for a response returning a 404 status code would + be "not_found.". Required. + "message": "str", # A message providing additional information about the + error, including details to help resolve it when possible. Required. + "request_id": "str" # Optional. Optionally, some endpoints may include a + request ID that should be provided when reporting bugs or opening support + tickets to help identify the issue. + } + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + 401: cast( + Type[HttpResponseError], + lambda response: ClientAuthenticationError(response=response), + ), + 429: HttpResponseError, + 500: HttpResponseError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[JSON] = kwargs.pop("cls", None) + + _request = build_functions_access_key_delete_request( + namespace_id=namespace_id, + key_id=key_id, + headers=_headers, + params=_params, + ) + _request.url = self._client.format_url(_request.url) + + _stream = False + pipeline_response: PipelineResponse = ( + self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + ) + + response = pipeline_response.http_response + + if response.status_code not in [200, 404]: + if _stream: + response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore + raise HttpResponseError(response=response) + + response_headers = {} + if response.status_code == 200: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.content: + deserialized = response.json() + else: + deserialized = None + + if response.status_code == 404: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.content: + deserialized = response.json() + else: + deserialized = None + + if cls: + return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore + + return cast(JSON, deserialized) # type: ignore + + class ImagesOperations: """ .. warning:: @@ -182518,79 +192372,995 @@ def create( :keyword content_type: Body Parameter content-type. Content type parameter for binary body. Default value is "application/json". :paramtype content_type: str - :return: JSON object - :rtype: JSON + :return: JSON object + :rtype: JSON + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 201 + response == { + "registry": { + "created_at": "2020-02-20 00:00:00", # Optional. A time value given + in ISO8601 combined date and time format that represents when the registry + was created. + "name": "str", # Optional. A globally unique name for the container + registry. Must be lowercase and be composed only of numbers, letters and + ``-``"" , up to a limit of 63 characters. + "region": "str", # Optional. Slug of the region where registry data + is stored. + "storage_usage_bytes": 0, # Optional. The amount of storage used in + the registry in bytes. + "storage_usage_bytes_updated_at": "2020-02-20 00:00:00", # Optional. + The time at which the storage usage was updated. Storage usage is calculated + asynchronously, and may not immediately reflect pushes to the registry. + "subscription": { + "created_at": "2020-02-20 00:00:00", # Optional. The time at + which the subscription was created. + "tier": { + "allow_storage_overage": bool, # Optional. A boolean + indicating whether the subscription tier supports additional storage + above what is included in the base plan at an additional cost per GiB + used. + "included_bandwidth_bytes": 0, # Optional. The + amount of outbound data transfer included in the subscription tier in + bytes. + "included_repositories": 0, # Optional. The number + of repositories included in the subscription tier. ``0`` indicates + that the subscription tier includes unlimited repositories. + "included_storage_bytes": 0, # Optional. The amount + of storage included in the subscription tier in bytes. + "monthly_price_in_cents": 0, # Optional. The monthly + cost of the subscription tier in cents. + "name": "str", # Optional. The name of the + subscription tier. + "slug": "str", # Optional. The slug identifier of + the subscription tier. + "storage_overage_price_in_cents": 0 # Optional. The + price paid in cents per GiB for additional storage beyond what is + included in the subscription plan. + }, + "updated_at": "2020-02-20 00:00:00" # Optional. The time at + which the subscription was last updated. + } + } + } + """ + + @distributed_trace + def create(self, body: Union[JSON, IO[bytes]], **kwargs: Any) -> JSON: + # pylint: disable=line-too-long + """Create Container Registry. + + **Note: This endpoint is deprecated. Please use the ``/v2/registries`` endpoint instead.** + + To create your container registry, send a POST request to ``/v2/registry``. + + The ``name`` becomes part of the URL for images stored in the registry. For + example, if your registry is called ``example``\\ , an image in it will have the + URL ``registry.digitalocean.com/example/image:tag``. + + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :return: JSON object + :rtype: JSON + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "name": "str", # A globally unique name for the container registry. Must be + lowercase and be composed only of numbers, letters and ``-``"" , up to a limit of + 63 characters. Required. + "subscription_tier_slug": "str", # The slug of the subscription tier to sign + up for. Valid values can be retrieved using the options endpoint. Required. Known + values are: "starter", "basic", and "professional". + "region": "str" # Optional. Slug of the region where registry data is + stored. When not provided, a region will be selected. Known values are: "nyc3", + "sfo3", "ams3", "sgp1", and "fra1". + } + + # response body for status code(s): 201 + response == { + "registry": { + "created_at": "2020-02-20 00:00:00", # Optional. A time value given + in ISO8601 combined date and time format that represents when the registry + was created. + "name": "str", # Optional. A globally unique name for the container + registry. Must be lowercase and be composed only of numbers, letters and + ``-``"" , up to a limit of 63 characters. + "region": "str", # Optional. Slug of the region where registry data + is stored. + "storage_usage_bytes": 0, # Optional. The amount of storage used in + the registry in bytes. + "storage_usage_bytes_updated_at": "2020-02-20 00:00:00", # Optional. + The time at which the storage usage was updated. Storage usage is calculated + asynchronously, and may not immediately reflect pushes to the registry. + "subscription": { + "created_at": "2020-02-20 00:00:00", # Optional. The time at + which the subscription was created. + "tier": { + "allow_storage_overage": bool, # Optional. A boolean + indicating whether the subscription tier supports additional storage + above what is included in the base plan at an additional cost per GiB + used. + "included_bandwidth_bytes": 0, # Optional. The + amount of outbound data transfer included in the subscription tier in + bytes. + "included_repositories": 0, # Optional. The number + of repositories included in the subscription tier. ``0`` indicates + that the subscription tier includes unlimited repositories. + "included_storage_bytes": 0, # Optional. The amount + of storage included in the subscription tier in bytes. + "monthly_price_in_cents": 0, # Optional. The monthly + cost of the subscription tier in cents. + "name": "str", # Optional. The name of the + subscription tier. + "slug": "str", # Optional. The slug identifier of + the subscription tier. + "storage_overage_price_in_cents": 0 # Optional. The + price paid in cents per GiB for additional storage beyond what is + included in the subscription plan. + }, + "updated_at": "2020-02-20 00:00:00" # Optional. The time at + which the subscription was last updated. + } + } + } + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + 401: cast( + Type[HttpResponseError], + lambda response: ClientAuthenticationError(response=response), + ), + 429: HttpResponseError, + 500: HttpResponseError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop( + "content_type", _headers.pop("Content-Type", None) + ) + cls: ClsType[JSON] = kwargs.pop("cls", None) + + content_type = content_type or "application/json" + _json = None + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _json = body + + _request = build_registry_create_request( + content_type=content_type, + json=_json, + content=_content, + headers=_headers, + params=_params, + ) + _request.url = self._client.format_url(_request.url) + + _stream = False + pipeline_response: PipelineResponse = ( + self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + ) + + response = pipeline_response.http_response + + if response.status_code not in [201]: + if _stream: + response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore + raise HttpResponseError(response=response) + + response_headers = {} + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.content: + deserialized = response.json() + else: + deserialized = None + + if cls: + return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore + + return cast(JSON, deserialized) # type: ignore + + @distributed_trace + def delete(self, **kwargs: Any) -> Optional[JSON]: + # pylint: disable=line-too-long + """Delete Container Registry. + + **Note: This endpoint is deprecated. Please use the ``/v2/registries`` endpoint instead.** + + To delete your container registry, destroying all container image + data stored in it, send a DELETE request to ``/v2/registry``. + + This operation is not compatible with multiple registries in a DO account. You should use + ``/v2/registries/{registry_name}`` instead. + + :return: JSON object or None + :rtype: JSON or None + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 404, 412 + response == { + "id": "str", # A short identifier corresponding to the HTTP status code + returned. For example, the ID for a response returning a 404 status code would + be "not_found.". Required. + "message": "str", # A message providing additional information about the + error, including details to help resolve it when possible. Required. + "request_id": "str" # Optional. Optionally, some endpoints may include a + request ID that should be provided when reporting bugs or opening support + tickets to help identify the issue. + } + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + 401: cast( + Type[HttpResponseError], + lambda response: ClientAuthenticationError(response=response), + ), + 429: HttpResponseError, + 500: HttpResponseError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[Optional[JSON]] = kwargs.pop("cls", None) + + _request = build_registry_delete_request( + headers=_headers, + params=_params, + ) + _request.url = self._client.format_url(_request.url) + + _stream = False + pipeline_response: PipelineResponse = ( + self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + ) + + response = pipeline_response.http_response + + if response.status_code not in [204, 404, 412]: + if _stream: + response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore + raise HttpResponseError(response=response) + + deserialized = None + response_headers = {} + if response.status_code == 204: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.status_code == 404: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.content: + deserialized = response.json() + else: + deserialized = None + + if response.status_code == 412: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.content: + deserialized = response.json() + else: + deserialized = None + + if cls: + return cls(pipeline_response, deserialized, response_headers) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + def get_subscription(self, **kwargs: Any) -> JSON: + # pylint: disable=line-too-long + """Get Subscription. + + **Note: This endpoint is deprecated. Please use the ``/v2/registries`` endpoint instead.** + + A subscription is automatically created when you configure your + container registry. To get information about your subscription, send a GET + request to ``/v2/registry/subscription``. + + :return: JSON object + :rtype: JSON + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 200 + response == { + "subscription": { + "created_at": "2020-02-20 00:00:00", # Optional. The time at which + the subscription was created. + "tier": { + "allow_storage_overage": bool, # Optional. A boolean + indicating whether the subscription tier supports additional storage + above what is included in the base plan at an additional cost per GiB + used. + "included_bandwidth_bytes": 0, # Optional. The amount of + outbound data transfer included in the subscription tier in bytes. + "included_repositories": 0, # Optional. The number of + repositories included in the subscription tier. ``0`` indicates that the + subscription tier includes unlimited repositories. + "included_storage_bytes": 0, # Optional. The amount of + storage included in the subscription tier in bytes. + "monthly_price_in_cents": 0, # Optional. The monthly cost of + the subscription tier in cents. + "name": "str", # Optional. The name of the subscription + tier. + "slug": "str", # Optional. The slug identifier of the + subscription tier. + "storage_overage_price_in_cents": 0 # Optional. The price + paid in cents per GiB for additional storage beyond what is included in + the subscription plan. + }, + "updated_at": "2020-02-20 00:00:00" # Optional. The time at which + the subscription was last updated. + } + } + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + 401: cast( + Type[HttpResponseError], + lambda response: ClientAuthenticationError(response=response), + ), + 429: HttpResponseError, + 500: HttpResponseError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[JSON] = kwargs.pop("cls", None) + + _request = build_registry_get_subscription_request( + headers=_headers, + params=_params, + ) + _request.url = self._client.format_url(_request.url) + + _stream = False + pipeline_response: PipelineResponse = ( + self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore + raise HttpResponseError(response=response) + + response_headers = {} + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.content: + deserialized = response.json() + else: + deserialized = None + + if cls: + return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore + + return cast(JSON, deserialized) # type: ignore + + @overload + def update_subscription( + self, + body: Optional[JSON] = None, + *, + content_type: str = "application/json", + **kwargs: Any, + ) -> JSON: + # pylint: disable=line-too-long + """Update Subscription Tier. + + **Note: This endpoint is deprecated. Please use the ``/v2/registries`` endpoint instead.** + + After creating your registry, you can switch to a different + subscription tier to better suit your needs. To do this, send a POST request + to ``/v2/registry/subscription``. + + :param body: Default value is None. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: JSON object + :rtype: JSON + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "tier_slug": "str" # Optional. The slug of the subscription tier to sign up + for. Known values are: "starter", "basic", and "professional". + } + + # response body for status code(s): 200 + response == { + "subscription": { + "created_at": "2020-02-20 00:00:00", # Optional. The time at which + the subscription was created. + "tier": { + "allow_storage_overage": bool, # Optional. A boolean + indicating whether the subscription tier supports additional storage + above what is included in the base plan at an additional cost per GiB + used. + "included_bandwidth_bytes": 0, # Optional. The amount of + outbound data transfer included in the subscription tier in bytes. + "included_repositories": 0, # Optional. The number of + repositories included in the subscription tier. ``0`` indicates that the + subscription tier includes unlimited repositories. + "included_storage_bytes": 0, # Optional. The amount of + storage included in the subscription tier in bytes. + "monthly_price_in_cents": 0, # Optional. The monthly cost of + the subscription tier in cents. + "name": "str", # Optional. The name of the subscription + tier. + "slug": "str", # Optional. The slug identifier of the + subscription tier. + "storage_overage_price_in_cents": 0 # Optional. The price + paid in cents per GiB for additional storage beyond what is included in + the subscription plan. + }, + "updated_at": "2020-02-20 00:00:00" # Optional. The time at which + the subscription was last updated. + } + } + # response body for status code(s): 412 + response == { + "id": "str", # A short identifier corresponding to the HTTP status code + returned. For example, the ID for a response returning a 404 status code would + be "not_found.". Required. + "message": "str", # A message providing additional information about the + error, including details to help resolve it when possible. Required. + "request_id": "str" # Optional. Optionally, some endpoints may include a + request ID that should be provided when reporting bugs or opening support + tickets to help identify the issue. + } + """ + + @overload + def update_subscription( + self, + body: Optional[IO[bytes]] = None, + *, + content_type: str = "application/json", + **kwargs: Any, + ) -> JSON: + # pylint: disable=line-too-long + """Update Subscription Tier. + + **Note: This endpoint is deprecated. Please use the ``/v2/registries`` endpoint instead.** + + After creating your registry, you can switch to a different + subscription tier to better suit your needs. To do this, send a POST request + to ``/v2/registry/subscription``. + + :param body: Default value is None. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: JSON object + :rtype: JSON + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 200 + response == { + "subscription": { + "created_at": "2020-02-20 00:00:00", # Optional. The time at which + the subscription was created. + "tier": { + "allow_storage_overage": bool, # Optional. A boolean + indicating whether the subscription tier supports additional storage + above what is included in the base plan at an additional cost per GiB + used. + "included_bandwidth_bytes": 0, # Optional. The amount of + outbound data transfer included in the subscription tier in bytes. + "included_repositories": 0, # Optional. The number of + repositories included in the subscription tier. ``0`` indicates that the + subscription tier includes unlimited repositories. + "included_storage_bytes": 0, # Optional. The amount of + storage included in the subscription tier in bytes. + "monthly_price_in_cents": 0, # Optional. The monthly cost of + the subscription tier in cents. + "name": "str", # Optional. The name of the subscription + tier. + "slug": "str", # Optional. The slug identifier of the + subscription tier. + "storage_overage_price_in_cents": 0 # Optional. The price + paid in cents per GiB for additional storage beyond what is included in + the subscription plan. + }, + "updated_at": "2020-02-20 00:00:00" # Optional. The time at which + the subscription was last updated. + } + } + # response body for status code(s): 412 + response == { + "id": "str", # A short identifier corresponding to the HTTP status code + returned. For example, the ID for a response returning a 404 status code would + be "not_found.". Required. + "message": "str", # A message providing additional information about the + error, including details to help resolve it when possible. Required. + "request_id": "str" # Optional. Optionally, some endpoints may include a + request ID that should be provided when reporting bugs or opening support + tickets to help identify the issue. + } + """ + + @distributed_trace + def update_subscription( + self, body: Optional[Union[JSON, IO[bytes]]] = None, **kwargs: Any + ) -> JSON: + # pylint: disable=line-too-long + """Update Subscription Tier. + + **Note: This endpoint is deprecated. Please use the ``/v2/registries`` endpoint instead.** + + After creating your registry, you can switch to a different + subscription tier to better suit your needs. To do this, send a POST request + to ``/v2/registry/subscription``. + + :param body: Is either a JSON type or a IO[bytes] type. Default value is None. + :type body: JSON or IO[bytes] + :return: JSON object + :rtype: JSON + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "tier_slug": "str" # Optional. The slug of the subscription tier to sign up + for. Known values are: "starter", "basic", and "professional". + } + + # response body for status code(s): 200 + response == { + "subscription": { + "created_at": "2020-02-20 00:00:00", # Optional. The time at which + the subscription was created. + "tier": { + "allow_storage_overage": bool, # Optional. A boolean + indicating whether the subscription tier supports additional storage + above what is included in the base plan at an additional cost per GiB + used. + "included_bandwidth_bytes": 0, # Optional. The amount of + outbound data transfer included in the subscription tier in bytes. + "included_repositories": 0, # Optional. The number of + repositories included in the subscription tier. ``0`` indicates that the + subscription tier includes unlimited repositories. + "included_storage_bytes": 0, # Optional. The amount of + storage included in the subscription tier in bytes. + "monthly_price_in_cents": 0, # Optional. The monthly cost of + the subscription tier in cents. + "name": "str", # Optional. The name of the subscription + tier. + "slug": "str", # Optional. The slug identifier of the + subscription tier. + "storage_overage_price_in_cents": 0 # Optional. The price + paid in cents per GiB for additional storage beyond what is included in + the subscription plan. + }, + "updated_at": "2020-02-20 00:00:00" # Optional. The time at which + the subscription was last updated. + } + } + # response body for status code(s): 412 + response == { + "id": "str", # A short identifier corresponding to the HTTP status code + returned. For example, the ID for a response returning a 404 status code would + be "not_found.". Required. + "message": "str", # A message providing additional information about the + error, including details to help resolve it when possible. Required. + "request_id": "str" # Optional. Optionally, some endpoints may include a + request ID that should be provided when reporting bugs or opening support + tickets to help identify the issue. + } + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + 401: cast( + Type[HttpResponseError], + lambda response: ClientAuthenticationError(response=response), + ), + 429: HttpResponseError, + 500: HttpResponseError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop( + "content_type", _headers.pop("Content-Type", None) + ) + cls: ClsType[JSON] = kwargs.pop("cls", None) + + content_type = content_type or "application/json" + _json = None + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + if body is not None: + _json = body + else: + _json = None + + _request = build_registry_update_subscription_request( + content_type=content_type, + json=_json, + content=_content, + headers=_headers, + params=_params, + ) + _request.url = self._client.format_url(_request.url) + + _stream = False + pipeline_response: PipelineResponse = ( + self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + ) + + response = pipeline_response.http_response + + if response.status_code not in [200, 412]: + if _stream: + response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore + raise HttpResponseError(response=response) + + response_headers = {} + if response.status_code == 200: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.content: + deserialized = response.json() + else: + deserialized = None + + if response.status_code == 412: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.content: + deserialized = response.json() + else: + deserialized = None + + if cls: + return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore + + return cast(JSON, deserialized) # type: ignore + + @distributed_trace + def get_docker_credentials( + self, *, expiry_seconds: int = 0, read_write: bool = False, **kwargs: Any + ) -> JSON: + """Get Docker Credentials for Container Registry. + + **Note: This endpoint is deprecated. Please use the ``/v2/registries`` endpoint instead.** + + In order to access your container registry with the Docker client or from a + Kubernetes cluster, you will need to configure authentication. The necessary + JSON configuration can be retrieved by sending a GET request to + ``/v2/registry/docker-credentials``. + + The response will be in the format of a Docker ``config.json`` file. To use the + config in your Kubernetes cluster, create a Secret with: + + .. code-block:: + + kubectl create secret generic docr \\ + --from-file=.dockerconfigjson=config.json \\ + --type=kubernetes.io/dockerconfigjson + + + By default, the returned credentials have read-only access to your registry + and cannot be used to push images. This is appropriate for most Kubernetes + clusters. To retrieve read/write credentials, suitable for use with the Docker + client or in a CI system, read_write may be provided as query parameter. For + example: ``/v2/registry/docker-credentials?read_write=true`` + + By default, the returned credentials will not expire. To retrieve credentials + with an expiry set, expiry_seconds may be provided as a query parameter. For + example: ``/v2/registry/docker-credentials?expiry_seconds=3600`` will return + credentials that expire after one hour. + + :keyword expiry_seconds: The duration in seconds that the returned registry credentials will be + valid. If not set or 0, the credentials will not expire. Default value is 0. + :paramtype expiry_seconds: int + :keyword read_write: By default, the registry credentials allow for read-only access. Set this + query parameter to ``true`` to obtain read-write credentials. Default value is False. + :paramtype read_write: bool + :return: JSON object + :rtype: JSON + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 200 + response == { + "auths": { + "registry.digitalocean.com": { + "auth": "str" # Optional. A base64 encoded string containing + credentials for the container registry. + } + } + } + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + 401: cast( + Type[HttpResponseError], + lambda response: ClientAuthenticationError(response=response), + ), + 429: HttpResponseError, + 500: HttpResponseError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[JSON] = kwargs.pop("cls", None) + + _request = build_registry_get_docker_credentials_request( + expiry_seconds=expiry_seconds, + read_write=read_write, + headers=_headers, + params=_params, + ) + _request.url = self._client.format_url(_request.url) + + _stream = False + pipeline_response: PipelineResponse = ( + self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore + raise HttpResponseError(response=response) + + response_headers = {} + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.content: + deserialized = response.json() + else: + deserialized = None + + if cls: + return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore + + return cast(JSON, deserialized) # type: ignore + + @overload + def validate_name( + self, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> Optional[JSON]: + # pylint: disable=line-too-long + """Validate a Container Registry Name. + + **Note: This endpoint is deprecated. Please use the ``/v2/registries`` endpoint instead.** + + To validate that a container registry name is available for use, send a POST + request to ``/v2/registry/validate-name``. + + If the name is both formatted correctly and available, the response code will + be 204 and contain no body. If the name is already in use, the response will + be a 409 Conflict. + + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: JSON object or None + :rtype: JSON or None + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "name": "str" # A globally unique name for the container registry. Must be + lowercase and be composed only of numbers, letters and ``-``"" , up to a limit of + 63 characters. Required. + } + + # response body for status code(s): 409 + response == { + "id": "str", # A short identifier corresponding to the HTTP status code + returned. For example, the ID for a response returning a 404 status code would + be "not_found.". Required. + "message": "str", # A message providing additional information about the + error, including details to help resolve it when possible. Required. + "request_id": "str" # Optional. Optionally, some endpoints may include a + request ID that should be provided when reporting bugs or opening support + tickets to help identify the issue. + } + """ + + @overload + def validate_name( + self, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> Optional[JSON]: + # pylint: disable=line-too-long + """Validate a Container Registry Name. + + **Note: This endpoint is deprecated. Please use the ``/v2/registries`` endpoint instead.** + + To validate that a container registry name is available for use, send a POST + request to ``/v2/registry/validate-name``. + + If the name is both formatted correctly and available, the response code will + be 204 and contain no body. If the name is already in use, the response will + be a 409 Conflict. + + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: JSON object or None + :rtype: JSON or None :raises ~azure.core.exceptions.HttpResponseError: Example: .. code-block:: python - # response body for status code(s): 201 + # response body for status code(s): 409 response == { - "registry": { - "created_at": "2020-02-20 00:00:00", # Optional. A time value given - in ISO8601 combined date and time format that represents when the registry - was created. - "name": "str", # Optional. A globally unique name for the container - registry. Must be lowercase and be composed only of numbers, letters and - ``-``"" , up to a limit of 63 characters. - "region": "str", # Optional. Slug of the region where registry data - is stored. - "storage_usage_bytes": 0, # Optional. The amount of storage used in - the registry in bytes. - "storage_usage_bytes_updated_at": "2020-02-20 00:00:00", # Optional. - The time at which the storage usage was updated. Storage usage is calculated - asynchronously, and may not immediately reflect pushes to the registry. - "subscription": { - "created_at": "2020-02-20 00:00:00", # Optional. The time at - which the subscription was created. - "tier": { - "allow_storage_overage": bool, # Optional. A boolean - indicating whether the subscription tier supports additional storage - above what is included in the base plan at an additional cost per GiB - used. - "included_bandwidth_bytes": 0, # Optional. The - amount of outbound data transfer included in the subscription tier in - bytes. - "included_repositories": 0, # Optional. The number - of repositories included in the subscription tier. ``0`` indicates - that the subscription tier includes unlimited repositories. - "included_storage_bytes": 0, # Optional. The amount - of storage included in the subscription tier in bytes. - "monthly_price_in_cents": 0, # Optional. The monthly - cost of the subscription tier in cents. - "name": "str", # Optional. The name of the - subscription tier. - "slug": "str", # Optional. The slug identifier of - the subscription tier. - "storage_overage_price_in_cents": 0 # Optional. The - price paid in cents per GiB for additional storage beyond what is - included in the subscription plan. - }, - "updated_at": "2020-02-20 00:00:00" # Optional. The time at - which the subscription was last updated. - } - } + "id": "str", # A short identifier corresponding to the HTTP status code + returned. For example, the ID for a response returning a 404 status code would + be "not_found.". Required. + "message": "str", # A message providing additional information about the + error, including details to help resolve it when possible. Required. + "request_id": "str" # Optional. Optionally, some endpoints may include a + request ID that should be provided when reporting bugs or opening support + tickets to help identify the issue. } """ @distributed_trace - def create(self, body: Union[JSON, IO[bytes]], **kwargs: Any) -> JSON: + def validate_name( + self, body: Union[JSON, IO[bytes]], **kwargs: Any + ) -> Optional[JSON]: # pylint: disable=line-too-long - """Create Container Registry. + """Validate a Container Registry Name. **Note: This endpoint is deprecated. Please use the ``/v2/registries`` endpoint instead.** - To create your container registry, send a POST request to ``/v2/registry``. + To validate that a container registry name is available for use, send a POST + request to ``/v2/registry/validate-name``. - The ``name`` becomes part of the URL for images stored in the registry. For - example, if your registry is called ``example``\\ , an image in it will have the - URL ``registry.digitalocean.com/example/image:tag``. + If the name is both formatted correctly and available, the response code will + be 204 and contain no body. If the name is already in use, the response will + be a 409 Conflict. :param body: Is either a JSON type or a IO[bytes] type. Required. :type body: JSON or IO[bytes] - :return: JSON object - :rtype: JSON + :return: JSON object or None + :rtype: JSON or None :raises ~azure.core.exceptions.HttpResponseError: Example: @@ -182598,63 +193368,21 @@ def create(self, body: Union[JSON, IO[bytes]], **kwargs: Any) -> JSON: # JSON input template you can fill out and use as your body input. body = { - "name": "str", # A globally unique name for the container registry. Must be + "name": "str" # A globally unique name for the container registry. Must be lowercase and be composed only of numbers, letters and ``-``"" , up to a limit of 63 characters. Required. - "subscription_tier_slug": "str", # The slug of the subscription tier to sign - up for. Valid values can be retrieved using the options endpoint. Required. Known - values are: "starter", "basic", and "professional". - "region": "str" # Optional. Slug of the region where registry data is - stored. When not provided, a region will be selected. Known values are: "nyc3", - "sfo3", "ams3", "sgp1", and "fra1". } - # response body for status code(s): 201 + # response body for status code(s): 409 response == { - "registry": { - "created_at": "2020-02-20 00:00:00", # Optional. A time value given - in ISO8601 combined date and time format that represents when the registry - was created. - "name": "str", # Optional. A globally unique name for the container - registry. Must be lowercase and be composed only of numbers, letters and - ``-``"" , up to a limit of 63 characters. - "region": "str", # Optional. Slug of the region where registry data - is stored. - "storage_usage_bytes": 0, # Optional. The amount of storage used in - the registry in bytes. - "storage_usage_bytes_updated_at": "2020-02-20 00:00:00", # Optional. - The time at which the storage usage was updated. Storage usage is calculated - asynchronously, and may not immediately reflect pushes to the registry. - "subscription": { - "created_at": "2020-02-20 00:00:00", # Optional. The time at - which the subscription was created. - "tier": { - "allow_storage_overage": bool, # Optional. A boolean - indicating whether the subscription tier supports additional storage - above what is included in the base plan at an additional cost per GiB - used. - "included_bandwidth_bytes": 0, # Optional. The - amount of outbound data transfer included in the subscription tier in - bytes. - "included_repositories": 0, # Optional. The number - of repositories included in the subscription tier. ``0`` indicates - that the subscription tier includes unlimited repositories. - "included_storage_bytes": 0, # Optional. The amount - of storage included in the subscription tier in bytes. - "monthly_price_in_cents": 0, # Optional. The monthly - cost of the subscription tier in cents. - "name": "str", # Optional. The name of the - subscription tier. - "slug": "str", # Optional. The slug identifier of - the subscription tier. - "storage_overage_price_in_cents": 0 # Optional. The - price paid in cents per GiB for additional storage beyond what is - included in the subscription plan. - }, - "updated_at": "2020-02-20 00:00:00" # Optional. The time at - which the subscription was last updated. - } - } + "id": "str", # A short identifier corresponding to the HTTP status code + returned. For example, the ID for a response returning a 404 status code would + be "not_found.". Required. + "message": "str", # A message providing additional information about the + error, including details to help resolve it when possible. Required. + "request_id": "str" # Optional. Optionally, some endpoints may include a + request ID that should be provided when reporting bugs or opening support + tickets to help identify the issue. } """ error_map: MutableMapping[int, Type[HttpResponseError]] = { @@ -182676,7 +193404,7 @@ def create(self, body: Union[JSON, IO[bytes]], **kwargs: Any) -> JSON: content_type: Optional[str] = kwargs.pop( "content_type", _headers.pop("Content-Type", None) ) - cls: ClsType[JSON] = kwargs.pop("cls", None) + cls: ClsType[Optional[JSON]] = kwargs.pop("cls", None) content_type = content_type or "application/json" _json = None @@ -182686,7 +193414,7 @@ def create(self, body: Union[JSON, IO[bytes]], **kwargs: Any) -> JSON: else: _json = body - _request = build_registry_create_request( + _request = build_registry_validate_name_request( content_type=content_type, json=_json, content=_content, @@ -182704,54 +193432,109 @@ def create(self, body: Union[JSON, IO[bytes]], **kwargs: Any) -> JSON: response = pipeline_response.http_response - if response.status_code not in [201]: + if response.status_code not in [204, 409]: if _stream: response.read() # Load the body in memory and close the socket map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore raise HttpResponseError(response=response) + deserialized = None response_headers = {} - response_headers["ratelimit-limit"] = self._deserialize( - "int", response.headers.get("ratelimit-limit") - ) - response_headers["ratelimit-remaining"] = self._deserialize( - "int", response.headers.get("ratelimit-remaining") - ) - response_headers["ratelimit-reset"] = self._deserialize( - "int", response.headers.get("ratelimit-reset") - ) + if response.status_code == 204: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) - if response.content: - deserialized = response.json() - else: - deserialized = None + if response.status_code == 409: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.content: + deserialized = response.json() + else: + deserialized = None if cls: - return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore + return cls(pipeline_response, deserialized, response_headers) # type: ignore - return cast(JSON, deserialized) # type: ignore + return deserialized # type: ignore @distributed_trace - def delete(self, **kwargs: Any) -> Optional[JSON]: + def list_repositories( + self, registry_name: str, *, per_page: int = 20, page: int = 1, **kwargs: Any + ) -> JSON: # pylint: disable=line-too-long - """Delete Container Registry. + """List All Container Registry Repositories. **Note: This endpoint is deprecated. Please use the ``/v2/registries`` endpoint instead.** - To delete your container registry, destroying all container image - data stored in it, send a DELETE request to ``/v2/registry``. + This endpoint has been deprecated in favor of the *List All Container Registry Repositories + [V2]* endpoint. - This operation is not compatible with multiple registries in a DO account. You should use - ``/v2/registries/{registry_name}`` instead. + To list all repositories in your container registry, send a GET + request to ``/v2/registry/$REGISTRY_NAME/repositories``. - :return: JSON object or None - :rtype: JSON or None + :param registry_name: The name of a container registry. Required. + :type registry_name: str + :keyword per_page: Number of items returned per page. Default value is 20. + :paramtype per_page: int + :keyword page: Which 'page' of paginated results to return. Default value is 1. + :paramtype page: int + :return: JSON object + :rtype: JSON :raises ~azure.core.exceptions.HttpResponseError: Example: .. code-block:: python - # response body for status code(s): 404, 412 + # response body for status code(s): 200 + response == { + "meta": { + "total": 0 # Optional. Number of objects returned by the request. + }, + "links": { + "pages": {} + }, + "repositories": [ + { + "latest_tag": { + "compressed_size_bytes": 0, # Optional. The + compressed size of the tag in bytes. + "manifest_digest": "str", # Optional. The digest of + the manifest associated with the tag. + "registry_name": "str", # Optional. The name of the + container registry. + "repository": "str", # Optional. The name of the + repository. + "size_bytes": 0, # Optional. The uncompressed size + of the tag in bytes (this size is calculated asynchronously so it may + not be immediately available). + "tag": "str", # Optional. The name of the tag. + "updated_at": "2020-02-20 00:00:00" # Optional. The + time the tag was last updated. + }, + "name": "str", # Optional. The name of the repository. + "registry_name": "str", # Optional. The name of the + container registry. + "tag_count": 0 # Optional. The number of tags in the + repository. + } + ] + } + # response body for status code(s): 404 response == { "id": "str", # A short identifier corresponding to the HTTP status code returned. For example, the ID for a response returning a 404 status code would @@ -182779,9 +193562,12 @@ def delete(self, **kwargs: Any) -> Optional[JSON]: _headers = kwargs.pop("headers", {}) or {} _params = kwargs.pop("params", {}) or {} - cls: ClsType[Optional[JSON]] = kwargs.pop("cls", None) + cls: ClsType[JSON] = kwargs.pop("cls", None) - _request = build_registry_delete_request( + _request = build_registry_list_repositories_request( + registry_name=registry_name, + per_page=per_page, + page=page, headers=_headers, params=_params, ) @@ -182796,26 +193582,14 @@ def delete(self, **kwargs: Any) -> Optional[JSON]: response = pipeline_response.http_response - if response.status_code not in [204, 404, 412]: + if response.status_code not in [200, 404]: if _stream: response.read() # Load the body in memory and close the socket map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore raise HttpResponseError(response=response) - deserialized = None response_headers = {} - if response.status_code == 204: - response_headers["ratelimit-limit"] = self._deserialize( - "int", response.headers.get("ratelimit-limit") - ) - response_headers["ratelimit-remaining"] = self._deserialize( - "int", response.headers.get("ratelimit-remaining") - ) - response_headers["ratelimit-reset"] = self._deserialize( - "int", response.headers.get("ratelimit-reset") - ) - - if response.status_code == 404: + if response.status_code == 200: response_headers["ratelimit-limit"] = self._deserialize( "int", response.headers.get("ratelimit-limit") ) @@ -182831,7 +193605,7 @@ def delete(self, **kwargs: Any) -> Optional[JSON]: else: deserialized = None - if response.status_code == 412: + if response.status_code == 404: response_headers["ratelimit-limit"] = self._deserialize( "int", response.headers.get("ratelimit-limit") ) @@ -182848,21 +193622,38 @@ def delete(self, **kwargs: Any) -> Optional[JSON]: deserialized = None if cls: - return cls(pipeline_response, deserialized, response_headers) # type: ignore + return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore - return deserialized # type: ignore + return cast(JSON, deserialized) # type: ignore @distributed_trace - def get_subscription(self, **kwargs: Any) -> JSON: + def list_repositories_v2( + self, + registry_name: str, + *, + per_page: int = 20, + page: int = 1, + page_token: Optional[str] = None, + **kwargs: Any, + ) -> JSON: # pylint: disable=line-too-long - """Get Subscription. + """List All Container Registry Repositories (V2). **Note: This endpoint is deprecated. Please use the ``/v2/registries`` endpoint instead.** - A subscription is automatically created when you configure your - container registry. To get information about your subscription, send a GET - request to ``/v2/registry/subscription``. + To list all repositories in your container registry, send a GET + request to ``/v2/registry/$REGISTRY_NAME/repositoriesV2``. + :param registry_name: The name of a container registry. Required. + :type registry_name: str + :keyword per_page: Number of items returned per page. Default value is 20. + :paramtype per_page: int + :keyword page: Which 'page' of paginated results to return. Ignored when 'page_token' is + provided. Default value is 1. + :paramtype page: int + :keyword page_token: Token to retrieve of the next or previous set of results more quickly than + using 'page'. Default value is None. + :paramtype page_token: str :return: JSON object :rtype: JSON :raises ~azure.core.exceptions.HttpResponseError: @@ -182872,34 +193663,60 @@ def get_subscription(self, **kwargs: Any) -> JSON: # response body for status code(s): 200 response == { - "subscription": { - "created_at": "2020-02-20 00:00:00", # Optional. The time at which - the subscription was created. - "tier": { - "allow_storage_overage": bool, # Optional. A boolean - indicating whether the subscription tier supports additional storage - above what is included in the base plan at an additional cost per GiB - used. - "included_bandwidth_bytes": 0, # Optional. The amount of - outbound data transfer included in the subscription tier in bytes. - "included_repositories": 0, # Optional. The number of - repositories included in the subscription tier. ``0`` indicates that the - subscription tier includes unlimited repositories. - "included_storage_bytes": 0, # Optional. The amount of - storage included in the subscription tier in bytes. - "monthly_price_in_cents": 0, # Optional. The monthly cost of - the subscription tier in cents. - "name": "str", # Optional. The name of the subscription - tier. - "slug": "str", # Optional. The slug identifier of the - subscription tier. - "storage_overage_price_in_cents": 0 # Optional. The price - paid in cents per GiB for additional storage beyond what is included in - the subscription plan. - }, - "updated_at": "2020-02-20 00:00:00" # Optional. The time at which - the subscription was last updated. - } + "meta": { + "total": 0 # Optional. Number of objects returned by the request. + }, + "links": { + "pages": {} + }, + "repositories": [ + { + "latest_manifest": { + "blobs": [ + { + "compressed_size_bytes": 0, # + Optional. The compressed size of the blob in bytes. + "digest": "str" # Optional. The + digest of the blob. + } + ], + "compressed_size_bytes": 0, # Optional. The + compressed size of the manifest in bytes. + "digest": "str", # Optional. The manifest digest. + "registry_name": "str", # Optional. The name of the + container registry. + "repository": "str", # Optional. The name of the + repository. + "size_bytes": 0, # Optional. The uncompressed size + of the manifest in bytes (this size is calculated asynchronously so + it may not be immediately available). + "tags": [ + "str" # Optional. All tags associated with + this manifest. + ], + "updated_at": "2020-02-20 00:00:00" # Optional. The + time the manifest was last updated. + }, + "manifest_count": 0, # Optional. The number of manifests in + the repository. + "name": "str", # Optional. The name of the repository. + "registry_name": "str", # Optional. The name of the + container registry. + "tag_count": 0 # Optional. The number of tags in the + repository. + } + ] + } + # response body for status code(s): 400, 404 + response == { + "id": "str", # A short identifier corresponding to the HTTP status code + returned. For example, the ID for a response returning a 404 status code would + be "not_found.". Required. + "message": "str", # A message providing additional information about the + error, including details to help resolve it when possible. Required. + "request_id": "str" # Optional. Optionally, some endpoints may include a + request ID that should be provided when reporting bugs or opening support + tickets to help identify the issue. } """ error_map: MutableMapping[int, Type[HttpResponseError]] = { @@ -182920,7 +193737,11 @@ def get_subscription(self, **kwargs: Any) -> JSON: cls: ClsType[JSON] = kwargs.pop("cls", None) - _request = build_registry_get_subscription_request( + _request = build_registry_list_repositories_v2_request( + registry_name=registry_name, + per_page=per_page, + page=page, + page_token=page_token, headers=_headers, params=_params, ) @@ -182935,55 +193756,98 @@ def get_subscription(self, **kwargs: Any) -> JSON: response = pipeline_response.http_response - if response.status_code not in [200]: + if response.status_code not in [200, 400, 404]: if _stream: response.read() # Load the body in memory and close the socket map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore raise HttpResponseError(response=response) response_headers = {} - response_headers["ratelimit-limit"] = self._deserialize( - "int", response.headers.get("ratelimit-limit") - ) - response_headers["ratelimit-remaining"] = self._deserialize( - "int", response.headers.get("ratelimit-remaining") - ) - response_headers["ratelimit-reset"] = self._deserialize( - "int", response.headers.get("ratelimit-reset") - ) + if response.status_code == 200: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) - if response.content: - deserialized = response.json() - else: - deserialized = None + if response.content: + deserialized = response.json() + else: + deserialized = None + + if response.status_code == 400: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.content: + deserialized = response.json() + else: + deserialized = None + + if response.status_code == 404: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.content: + deserialized = response.json() + else: + deserialized = None if cls: return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore return cast(JSON, deserialized) # type: ignore - @overload - def update_subscription( + @distributed_trace + def list_repository_tags( self, - body: Optional[JSON] = None, + registry_name: str, + repository_name: str, *, - content_type: str = "application/json", + per_page: int = 20, + page: int = 1, **kwargs: Any, ) -> JSON: # pylint: disable=line-too-long - """Update Subscription Tier. + """List All Container Registry Repository Tags. **Note: This endpoint is deprecated. Please use the ``/v2/registries`` endpoint instead.** - After creating your registry, you can switch to a different - subscription tier to better suit your needs. To do this, send a POST request - to ``/v2/registry/subscription``. + To list all tags in your container registry repository, send a GET + request to ``/v2/registry/$REGISTRY_NAME/repositories/$REPOSITORY_NAME/tags``. - :param body: Default value is None. - :type body: JSON - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str + Note that if your repository name contains ``/`` characters, it must be + URL-encoded in the request URL. For example, to list tags for + ``registry.digitalocean.com/example/my/repo``\\ , the path would be + ``/v2/registry/example/repositories/my%2Frepo/tags``. + + :param registry_name: The name of a container registry. Required. + :type registry_name: str + :param repository_name: The name of a container registry repository. If the name contains ``/`` + characters, they must be URL-encoded, e.g. ``%2F``. Required. + :type repository_name: str + :keyword per_page: Number of items returned per page. Default value is 20. + :paramtype per_page: int + :keyword page: Which 'page' of paginated results to return. Default value is 1. + :paramtype page: int :return: JSON object :rtype: JSON :raises ~azure.core.exceptions.HttpResponseError: @@ -182991,44 +193855,33 @@ def update_subscription( Example: .. code-block:: python - # JSON input template you can fill out and use as your body input. - body = { - "tier_slug": "str" # Optional. The slug of the subscription tier to sign up - for. Known values are: "starter", "basic", and "professional". - } - # response body for status code(s): 200 response == { - "subscription": { - "created_at": "2020-02-20 00:00:00", # Optional. The time at which - the subscription was created. - "tier": { - "allow_storage_overage": bool, # Optional. A boolean - indicating whether the subscription tier supports additional storage - above what is included in the base plan at an additional cost per GiB - used. - "included_bandwidth_bytes": 0, # Optional. The amount of - outbound data transfer included in the subscription tier in bytes. - "included_repositories": 0, # Optional. The number of - repositories included in the subscription tier. ``0`` indicates that the - subscription tier includes unlimited repositories. - "included_storage_bytes": 0, # Optional. The amount of - storage included in the subscription tier in bytes. - "monthly_price_in_cents": 0, # Optional. The monthly cost of - the subscription tier in cents. - "name": "str", # Optional. The name of the subscription - tier. - "slug": "str", # Optional. The slug identifier of the - subscription tier. - "storage_overage_price_in_cents": 0 # Optional. The price - paid in cents per GiB for additional storage beyond what is included in - the subscription plan. - }, - "updated_at": "2020-02-20 00:00:00" # Optional. The time at which - the subscription was last updated. - } + "meta": { + "total": 0 # Optional. Number of objects returned by the request. + }, + "links": { + "pages": {} + }, + "tags": [ + { + "compressed_size_bytes": 0, # Optional. The compressed size + of the tag in bytes. + "manifest_digest": "str", # Optional. The digest of the + manifest associated with the tag. + "registry_name": "str", # Optional. The name of the + container registry. + "repository": "str", # Optional. The name of the repository. + "size_bytes": 0, # Optional. The uncompressed size of the + tag in bytes (this size is calculated asynchronously so it may not be + immediately available). + "tag": "str", # Optional. The name of the tag. + "updated_at": "2020-02-20 00:00:00" # Optional. The time the + tag was last updated. + } + ] } - # response body for status code(s): 412 + # response body for status code(s): 404 response == { "id": "str", # A short identifier corresponding to the HTTP status code returned. For example, the ID for a response returning a 404 status code would @@ -183040,68 +193893,126 @@ def update_subscription( tickets to help identify the issue. } """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + 401: cast( + Type[HttpResponseError], + lambda response: ClientAuthenticationError(response=response), + ), + 429: HttpResponseError, + 500: HttpResponseError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) - @overload - def update_subscription( + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[JSON] = kwargs.pop("cls", None) + + _request = build_registry_list_repository_tags_request( + registry_name=registry_name, + repository_name=repository_name, + per_page=per_page, + page=page, + headers=_headers, + params=_params, + ) + _request.url = self._client.format_url(_request.url) + + _stream = False + pipeline_response: PipelineResponse = ( + self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + ) + + response = pipeline_response.http_response + + if response.status_code not in [200, 404]: + if _stream: + response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore + raise HttpResponseError(response=response) + + response_headers = {} + if response.status_code == 200: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.content: + deserialized = response.json() + else: + deserialized = None + + if response.status_code == 404: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.content: + deserialized = response.json() + else: + deserialized = None + + if cls: + return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore + + return cast(JSON, deserialized) # type: ignore + + @distributed_trace + def delete_repository_tag( self, - body: Optional[IO[bytes]] = None, - *, - content_type: str = "application/json", + registry_name: str, + repository_name: str, + repository_tag: str, **kwargs: Any, - ) -> JSON: + ) -> Optional[JSON]: # pylint: disable=line-too-long - """Update Subscription Tier. + """Delete Container Registry Repository Tag. **Note: This endpoint is deprecated. Please use the ``/v2/registries`` endpoint instead.** - After creating your registry, you can switch to a different - subscription tier to better suit your needs. To do this, send a POST request - to ``/v2/registry/subscription``. + To delete a container repository tag, send a DELETE request to + ``/v2/registry/$REGISTRY_NAME/repositories/$REPOSITORY_NAME/tags/$TAG``. - :param body: Default value is None. - :type body: IO[bytes] - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str - :return: JSON object - :rtype: JSON + Note that if your repository name contains ``/`` characters, it must be + URL-encoded in the request URL. For example, to delete + ``registry.digitalocean.com/example/my/repo:mytag``\\ , the path would be + ``/v2/registry/example/repositories/my%2Frepo/tags/mytag``. + + A successful request will receive a 204 status code with no body in response. + This indicates that the request was processed successfully. + + :param registry_name: The name of a container registry. Required. + :type registry_name: str + :param repository_name: The name of a container registry repository. If the name contains ``/`` + characters, they must be URL-encoded, e.g. ``%2F``. Required. + :type repository_name: str + :param repository_tag: The name of a container registry repository tag. Required. + :type repository_tag: str + :return: JSON object or None + :rtype: JSON or None :raises ~azure.core.exceptions.HttpResponseError: Example: .. code-block:: python - # response body for status code(s): 200 - response == { - "subscription": { - "created_at": "2020-02-20 00:00:00", # Optional. The time at which - the subscription was created. - "tier": { - "allow_storage_overage": bool, # Optional. A boolean - indicating whether the subscription tier supports additional storage - above what is included in the base plan at an additional cost per GiB - used. - "included_bandwidth_bytes": 0, # Optional. The amount of - outbound data transfer included in the subscription tier in bytes. - "included_repositories": 0, # Optional. The number of - repositories included in the subscription tier. ``0`` indicates that the - subscription tier includes unlimited repositories. - "included_storage_bytes": 0, # Optional. The amount of - storage included in the subscription tier in bytes. - "monthly_price_in_cents": 0, # Optional. The monthly cost of - the subscription tier in cents. - "name": "str", # Optional. The name of the subscription - tier. - "slug": "str", # Optional. The slug identifier of the - subscription tier. - "storage_overage_price_in_cents": 0 # Optional. The price - paid in cents per GiB for additional storage beyond what is included in - the subscription plan. - }, - "updated_at": "2020-02-20 00:00:00" # Optional. The time at which - the subscription was last updated. - } - } - # response body for status code(s): 412 + # response body for status code(s): 404 response == { "id": "str", # A short identifier corresponding to the HTTP status code returned. For example, the ID for a response returning a 404 status code would @@ -183113,22 +194024,114 @@ def update_subscription( tickets to help identify the issue. } """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + 401: cast( + Type[HttpResponseError], + lambda response: ClientAuthenticationError(response=response), + ), + 429: HttpResponseError, + 500: HttpResponseError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[Optional[JSON]] = kwargs.pop("cls", None) + + _request = build_registry_delete_repository_tag_request( + registry_name=registry_name, + repository_name=repository_name, + repository_tag=repository_tag, + headers=_headers, + params=_params, + ) + _request.url = self._client.format_url(_request.url) + + _stream = False + pipeline_response: PipelineResponse = ( + self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + ) + + response = pipeline_response.http_response + + if response.status_code not in [204, 404]: + if _stream: + response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore + raise HttpResponseError(response=response) + + deserialized = None + response_headers = {} + if response.status_code == 204: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.status_code == 404: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.content: + deserialized = response.json() + else: + deserialized = None + + if cls: + return cls(pipeline_response, deserialized, response_headers) # type: ignore + + return deserialized # type: ignore @distributed_trace - def update_subscription( - self, body: Optional[Union[JSON, IO[bytes]]] = None, **kwargs: Any + def list_repository_manifests( + self, + registry_name: str, + repository_name: str, + *, + per_page: int = 20, + page: int = 1, + **kwargs: Any, ) -> JSON: # pylint: disable=line-too-long - """Update Subscription Tier. + """List All Container Registry Repository Manifests. **Note: This endpoint is deprecated. Please use the ``/v2/registries`` endpoint instead.** - After creating your registry, you can switch to a different - subscription tier to better suit your needs. To do this, send a POST request - to ``/v2/registry/subscription``. + To list all manifests in your container registry repository, send a GET + request to ``/v2/registry/$REGISTRY_NAME/repositories/$REPOSITORY_NAME/digests``. - :param body: Is either a JSON type or a IO[bytes] type. Default value is None. - :type body: JSON or IO[bytes] + Note that if your repository name contains ``/`` characters, it must be + URL-encoded in the request URL. For example, to list manifests for + ``registry.digitalocean.com/example/my/repo``\\ , the path would be + ``/v2/registry/example/repositories/my%2Frepo/digests``. + + :param registry_name: The name of a container registry. Required. + :type registry_name: str + :param repository_name: The name of a container registry repository. If the name contains ``/`` + characters, they must be URL-encoded, e.g. ``%2F``. Required. + :type repository_name: str + :keyword per_page: Number of items returned per page. Default value is 20. + :paramtype per_page: int + :keyword page: Which 'page' of paginated results to return. Default value is 1. + :paramtype page: int :return: JSON object :rtype: JSON :raises ~azure.core.exceptions.HttpResponseError: @@ -183136,44 +194139,43 @@ def update_subscription( Example: .. code-block:: python - # JSON input template you can fill out and use as your body input. - body = { - "tier_slug": "str" # Optional. The slug of the subscription tier to sign up - for. Known values are: "starter", "basic", and "professional". - } - # response body for status code(s): 200 response == { - "subscription": { - "created_at": "2020-02-20 00:00:00", # Optional. The time at which - the subscription was created. - "tier": { - "allow_storage_overage": bool, # Optional. A boolean - indicating whether the subscription tier supports additional storage - above what is included in the base plan at an additional cost per GiB - used. - "included_bandwidth_bytes": 0, # Optional. The amount of - outbound data transfer included in the subscription tier in bytes. - "included_repositories": 0, # Optional. The number of - repositories included in the subscription tier. ``0`` indicates that the - subscription tier includes unlimited repositories. - "included_storage_bytes": 0, # Optional. The amount of - storage included in the subscription tier in bytes. - "monthly_price_in_cents": 0, # Optional. The monthly cost of - the subscription tier in cents. - "name": "str", # Optional. The name of the subscription - tier. - "slug": "str", # Optional. The slug identifier of the - subscription tier. - "storage_overage_price_in_cents": 0 # Optional. The price - paid in cents per GiB for additional storage beyond what is included in - the subscription plan. - }, - "updated_at": "2020-02-20 00:00:00" # Optional. The time at which - the subscription was last updated. - } + "meta": { + "total": 0 # Optional. Number of objects returned by the request. + }, + "links": { + "pages": {} + }, + "manifests": [ + { + "blobs": [ + { + "compressed_size_bytes": 0, # Optional. The + compressed size of the blob in bytes. + "digest": "str" # Optional. The digest of + the blob. + } + ], + "compressed_size_bytes": 0, # Optional. The compressed size + of the manifest in bytes. + "digest": "str", # Optional. The manifest digest. + "registry_name": "str", # Optional. The name of the + container registry. + "repository": "str", # Optional. The name of the repository. + "size_bytes": 0, # Optional. The uncompressed size of the + manifest in bytes (this size is calculated asynchronously so it may not + be immediately available). + "tags": [ + "str" # Optional. All tags associated with this + manifest. + ], + "updated_at": "2020-02-20 00:00:00" # Optional. The time the + manifest was last updated. + } + ] } - # response body for status code(s): 412 + # response body for status code(s): 404 response == { "id": "str", # A short identifier corresponding to the HTTP status code returned. For example, the ID for a response returning a 404 status code would @@ -183198,29 +194200,16 @@ def update_subscription( } error_map.update(kwargs.pop("error_map", {}) or {}) - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _headers = kwargs.pop("headers", {}) or {} _params = kwargs.pop("params", {}) or {} - content_type: Optional[str] = kwargs.pop( - "content_type", _headers.pop("Content-Type", None) - ) cls: ClsType[JSON] = kwargs.pop("cls", None) - content_type = content_type or "application/json" - _json = None - _content = None - if isinstance(body, (IOBase, bytes)): - _content = body - else: - if body is not None: - _json = body - else: - _json = None - - _request = build_registry_update_subscription_request( - content_type=content_type, - json=_json, - content=_content, + _request = build_registry_list_repository_manifests_request( + registry_name=registry_name, + repository_name=repository_name, + per_page=per_page, + page=page, headers=_headers, params=_params, ) @@ -183235,7 +194224,7 @@ def update_subscription( response = pipeline_response.http_response - if response.status_code not in [200, 412]: + if response.status_code not in [200, 404]: if _stream: response.read() # Load the body in memory and close the socket map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore @@ -183258,7 +194247,7 @@ def update_subscription( else: deserialized = None - if response.status_code == 412: + if response.status_code == 404: response_headers["ratelimit-limit"] = self._deserialize( "int", response.headers.get("ratelimit-limit") ) @@ -183280,60 +194269,53 @@ def update_subscription( return cast(JSON, deserialized) # type: ignore @distributed_trace - def get_docker_credentials( - self, *, expiry_seconds: int = 0, read_write: bool = False, **kwargs: Any - ) -> JSON: - """Get Docker Credentials for Container Registry. + def delete_repository_manifest( + self, + registry_name: str, + repository_name: str, + manifest_digest: str, + **kwargs: Any, + ) -> Optional[JSON]: + # pylint: disable=line-too-long + """Delete Container Registry Repository Manifest. **Note: This endpoint is deprecated. Please use the ``/v2/registries`` endpoint instead.** - In order to access your container registry with the Docker client or from a - Kubernetes cluster, you will need to configure authentication. The necessary - JSON configuration can be retrieved by sending a GET request to - ``/v2/registry/docker-credentials``. - - The response will be in the format of a Docker ``config.json`` file. To use the - config in your Kubernetes cluster, create a Secret with: - - .. code-block:: - - kubectl create secret generic docr \\ - --from-file=.dockerconfigjson=config.json \\ - --type=kubernetes.io/dockerconfigjson - + To delete a container repository manifest by digest, send a DELETE request to + ``/v2/registry/$REGISTRY_NAME/repositories/$REPOSITORY_NAME/digests/$MANIFEST_DIGEST``. - By default, the returned credentials have read-only access to your registry - and cannot be used to push images. This is appropriate for most Kubernetes - clusters. To retrieve read/write credentials, suitable for use with the Docker - client or in a CI system, read_write may be provided as query parameter. For - example: ``/v2/registry/docker-credentials?read_write=true`` + Note that if your repository name contains ``/`` characters, it must be + URL-encoded in the request URL. For example, to delete + ``registry.digitalocean.com/example/my/repo@sha256:abcd``\\ , the path would be + ``/v2/registry/example/repositories/my%2Frepo/digests/sha256:abcd``. - By default, the returned credentials will not expire. To retrieve credentials - with an expiry set, expiry_seconds may be provided as a query parameter. For - example: ``/v2/registry/docker-credentials?expiry_seconds=3600`` will return - credentials that expire after one hour. + A successful request will receive a 204 status code with no body in response. + This indicates that the request was processed successfully. - :keyword expiry_seconds: The duration in seconds that the returned registry credentials will be - valid. If not set or 0, the credentials will not expire. Default value is 0. - :paramtype expiry_seconds: int - :keyword read_write: By default, the registry credentials allow for read-only access. Set this - query parameter to ``true`` to obtain read-write credentials. Default value is False. - :paramtype read_write: bool - :return: JSON object - :rtype: JSON + :param registry_name: The name of a container registry. Required. + :type registry_name: str + :param repository_name: The name of a container registry repository. If the name contains ``/`` + characters, they must be URL-encoded, e.g. ``%2F``. Required. + :type repository_name: str + :param manifest_digest: The manifest digest of a container registry repository tag. Required. + :type manifest_digest: str + :return: JSON object or None + :rtype: JSON or None :raises ~azure.core.exceptions.HttpResponseError: Example: .. code-block:: python - # response body for status code(s): 200 + # response body for status code(s): 404 response == { - "auths": { - "registry.digitalocean.com": { - "auth": "str" # Optional. A base64 encoded string containing - credentials for the container registry. - } - } + "id": "str", # A short identifier corresponding to the HTTP status code + returned. For example, the ID for a response returning a 404 status code would + be "not_found.". Required. + "message": "str", # A message providing additional information about the + error, including details to help resolve it when possible. Required. + "request_id": "str" # Optional. Optionally, some endpoints may include a + request ID that should be provided when reporting bugs or opening support + tickets to help identify the issue. } """ error_map: MutableMapping[int, Type[HttpResponseError]] = { @@ -183352,11 +194334,12 @@ def get_docker_credentials( _headers = kwargs.pop("headers", {}) or {} _params = kwargs.pop("params", {}) or {} - cls: ClsType[JSON] = kwargs.pop("cls", None) + cls: ClsType[Optional[JSON]] = kwargs.pop("cls", None) - _request = build_registry_get_docker_credentials_request( - expiry_seconds=expiry_seconds, - read_write=read_write, + _request = build_registry_delete_repository_manifest_request( + registry_name=registry_name, + repository_name=repository_name, + manifest_digest=manifest_digest, headers=_headers, params=_params, ) @@ -183371,56 +194354,93 @@ def get_docker_credentials( response = pipeline_response.http_response - if response.status_code not in [200]: + if response.status_code not in [204, 404]: if _stream: response.read() # Load the body in memory and close the socket map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore raise HttpResponseError(response=response) + deserialized = None response_headers = {} - response_headers["ratelimit-limit"] = self._deserialize( - "int", response.headers.get("ratelimit-limit") - ) - response_headers["ratelimit-remaining"] = self._deserialize( - "int", response.headers.get("ratelimit-remaining") - ) - response_headers["ratelimit-reset"] = self._deserialize( - "int", response.headers.get("ratelimit-reset") - ) + if response.status_code == 204: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.status_code == 404: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) - if response.content: - deserialized = response.json() - else: - deserialized = None + if response.content: + deserialized = response.json() + else: + deserialized = None if cls: - return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore + return cls(pipeline_response, deserialized, response_headers) # type: ignore - return cast(JSON, deserialized) # type: ignore + return deserialized # type: ignore @overload - def validate_name( - self, body: JSON, *, content_type: str = "application/json", **kwargs: Any - ) -> Optional[JSON]: + def run_garbage_collection( + self, + registry_name: str, + body: Optional[JSON] = None, + *, + content_type: str = "application/json", + **kwargs: Any, + ) -> JSON: # pylint: disable=line-too-long - """Validate a Container Registry Name. + """Start Garbage Collection. **Note: This endpoint is deprecated. Please use the ``/v2/registries`` endpoint instead.** - To validate that a container registry name is available for use, send a POST - request to ``/v2/registry/validate-name``. + Garbage collection enables users to clear out unreferenced blobs (layer & + manifest data) after deleting one or more manifests from a repository. If + there are no unreferenced blobs resulting from the deletion of one or more + manifests, garbage collection is effectively a noop. + `See here for more information + `_ + about how and why you should clean up your container registry periodically. - If the name is both formatted correctly and available, the response code will - be 204 and contain no body. If the name is already in use, the response will - be a 409 Conflict. + To request a garbage collection run on your registry, send a POST request to + ``/v2/registry/$REGISTRY_NAME/garbage-collection``. This will initiate the + following sequence of events on your registry. - :param body: Required. + + * Set the registry to read-only mode, meaning no further write-scoped + JWTs will be issued to registry clients. Existing write-scoped JWTs will + continue to work until they expire which can take up to 15 minutes. + * Wait until all existing write-scoped JWTs have expired. + * Scan all registry manifests to determine which blobs are unreferenced. + * Delete all unreferenced blobs from the registry. + * Record the number of blobs deleted and bytes freed, mark the garbage + collection status as ``success``. + * Remove the read-only mode restriction from the registry, meaning write-scoped + JWTs will once again be issued to registry clients. + + :param registry_name: The name of a container registry. Required. + :type registry_name: str + :param body: Default value is None. :type body: JSON :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str - :return: JSON object or None - :rtype: JSON or None + :return: JSON object + :rtype: JSON :raises ~azure.core.exceptions.HttpResponseError: Example: @@ -183428,12 +194448,33 @@ def validate_name( # JSON input template you can fill out and use as your body input. body = { - "name": "str" # A globally unique name for the container registry. Must be - lowercase and be composed only of numbers, letters and ``-``"" , up to a limit of - 63 characters. Required. + "type": "str" # Optional. Type of the garbage collection to run against this + registry. Known values are: "untagged manifests only", "unreferenced blobs only", + and "untagged manifests and unreferenced blobs". } - # response body for status code(s): 409 + # response body for status code(s): 201 + response == { + "garbage_collection": { + "blobs_deleted": 0, # Optional. The number of blobs deleted as a + result of this garbage collection. + "created_at": "2020-02-20 00:00:00", # Optional. The time the + garbage collection was created. + "freed_bytes": 0, # Optional. The number of bytes freed as a result + of this garbage collection. + "registry_name": "str", # Optional. The name of the container + registry. + "status": "str", # Optional. The current status of this garbage + collection. Known values are: "requested", "waiting for write JWTs to + expire", "scanning manifests", "deleting unreferenced blobs", "cancelling", + "failed", "succeeded", and "cancelled". + "updated_at": "2020-02-20 00:00:00", # Optional. The time the + garbage collection was last updated. + "uuid": "str" # Optional. A string specifying the UUID of the + garbage collection. + } + } + # response body for status code(s): 404 response == { "id": "str", # A short identifier corresponding to the HTTP status code returned. For example, the ID for a response returning a 404 status code would @@ -183447,34 +194488,79 @@ def validate_name( """ @overload - def validate_name( - self, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any - ) -> Optional[JSON]: + def run_garbage_collection( + self, + registry_name: str, + body: Optional[IO[bytes]] = None, + *, + content_type: str = "application/json", + **kwargs: Any, + ) -> JSON: # pylint: disable=line-too-long - """Validate a Container Registry Name. + """Start Garbage Collection. **Note: This endpoint is deprecated. Please use the ``/v2/registries`` endpoint instead.** - To validate that a container registry name is available for use, send a POST - request to ``/v2/registry/validate-name``. + Garbage collection enables users to clear out unreferenced blobs (layer & + manifest data) after deleting one or more manifests from a repository. If + there are no unreferenced blobs resulting from the deletion of one or more + manifests, garbage collection is effectively a noop. + `See here for more information + `_ + about how and why you should clean up your container registry periodically. - If the name is both formatted correctly and available, the response code will - be 204 and contain no body. If the name is already in use, the response will - be a 409 Conflict. + To request a garbage collection run on your registry, send a POST request to + ``/v2/registry/$REGISTRY_NAME/garbage-collection``. This will initiate the + following sequence of events on your registry. - :param body: Required. + + * Set the registry to read-only mode, meaning no further write-scoped + JWTs will be issued to registry clients. Existing write-scoped JWTs will + continue to work until they expire which can take up to 15 minutes. + * Wait until all existing write-scoped JWTs have expired. + * Scan all registry manifests to determine which blobs are unreferenced. + * Delete all unreferenced blobs from the registry. + * Record the number of blobs deleted and bytes freed, mark the garbage + collection status as ``success``. + * Remove the read-only mode restriction from the registry, meaning write-scoped + JWTs will once again be issued to registry clients. + + :param registry_name: The name of a container registry. Required. + :type registry_name: str + :param body: Default value is None. :type body: IO[bytes] :keyword content_type: Body Parameter content-type. Content type parameter for binary body. Default value is "application/json". :paramtype content_type: str - :return: JSON object or None - :rtype: JSON or None + :return: JSON object + :rtype: JSON :raises ~azure.core.exceptions.HttpResponseError: Example: .. code-block:: python - # response body for status code(s): 409 + # response body for status code(s): 201 + response == { + "garbage_collection": { + "blobs_deleted": 0, # Optional. The number of blobs deleted as a + result of this garbage collection. + "created_at": "2020-02-20 00:00:00", # Optional. The time the + garbage collection was created. + "freed_bytes": 0, # Optional. The number of bytes freed as a result + of this garbage collection. + "registry_name": "str", # Optional. The name of the container + registry. + "status": "str", # Optional. The current status of this garbage + collection. Known values are: "requested", "waiting for write JWTs to + expire", "scanning manifests", "deleting unreferenced blobs", "cancelling", + "failed", "succeeded", and "cancelled". + "updated_at": "2020-02-20 00:00:00", # Optional. The time the + garbage collection was last updated. + "uuid": "str" # Optional. A string specifying the UUID of the + garbage collection. + } + } + # response body for status code(s): 404 response == { "id": "str", # A short identifier corresponding to the HTTP status code returned. For example, the ID for a response returning a 404 status code would @@ -183488,25 +194574,47 @@ def validate_name( """ @distributed_trace - def validate_name( - self, body: Union[JSON, IO[bytes]], **kwargs: Any - ) -> Optional[JSON]: + def run_garbage_collection( + self, + registry_name: str, + body: Optional[Union[JSON, IO[bytes]]] = None, + **kwargs: Any, + ) -> JSON: # pylint: disable=line-too-long - """Validate a Container Registry Name. + """Start Garbage Collection. **Note: This endpoint is deprecated. Please use the ``/v2/registries`` endpoint instead.** - To validate that a container registry name is available for use, send a POST - request to ``/v2/registry/validate-name``. + Garbage collection enables users to clear out unreferenced blobs (layer & + manifest data) after deleting one or more manifests from a repository. If + there are no unreferenced blobs resulting from the deletion of one or more + manifests, garbage collection is effectively a noop. + `See here for more information + `_ + about how and why you should clean up your container registry periodically. - If the name is both formatted correctly and available, the response code will - be 204 and contain no body. If the name is already in use, the response will - be a 409 Conflict. + To request a garbage collection run on your registry, send a POST request to + ``/v2/registry/$REGISTRY_NAME/garbage-collection``. This will initiate the + following sequence of events on your registry. - :param body: Is either a JSON type or a IO[bytes] type. Required. + + * Set the registry to read-only mode, meaning no further write-scoped + JWTs will be issued to registry clients. Existing write-scoped JWTs will + continue to work until they expire which can take up to 15 minutes. + * Wait until all existing write-scoped JWTs have expired. + * Scan all registry manifests to determine which blobs are unreferenced. + * Delete all unreferenced blobs from the registry. + * Record the number of blobs deleted and bytes freed, mark the garbage + collection status as ``success``. + * Remove the read-only mode restriction from the registry, meaning write-scoped + JWTs will once again be issued to registry clients. + + :param registry_name: The name of a container registry. Required. + :type registry_name: str + :param body: Is either a JSON type or a IO[bytes] type. Default value is None. :type body: JSON or IO[bytes] - :return: JSON object or None - :rtype: JSON or None + :return: JSON object + :rtype: JSON :raises ~azure.core.exceptions.HttpResponseError: Example: @@ -183514,12 +194622,33 @@ def validate_name( # JSON input template you can fill out and use as your body input. body = { - "name": "str" # A globally unique name for the container registry. Must be - lowercase and be composed only of numbers, letters and ``-``"" , up to a limit of - 63 characters. Required. + "type": "str" # Optional. Type of the garbage collection to run against this + registry. Known values are: "untagged manifests only", "unreferenced blobs only", + and "untagged manifests and unreferenced blobs". } - # response body for status code(s): 409 + # response body for status code(s): 201 + response == { + "garbage_collection": { + "blobs_deleted": 0, # Optional. The number of blobs deleted as a + result of this garbage collection. + "created_at": "2020-02-20 00:00:00", # Optional. The time the + garbage collection was created. + "freed_bytes": 0, # Optional. The number of bytes freed as a result + of this garbage collection. + "registry_name": "str", # Optional. The name of the container + registry. + "status": "str", # Optional. The current status of this garbage + collection. Known values are: "requested", "waiting for write JWTs to + expire", "scanning manifests", "deleting unreferenced blobs", "cancelling", + "failed", "succeeded", and "cancelled". + "updated_at": "2020-02-20 00:00:00", # Optional. The time the + garbage collection was last updated. + "uuid": "str" # Optional. A string specifying the UUID of the + garbage collection. + } + } + # response body for status code(s): 404 response == { "id": "str", # A short identifier corresponding to the HTTP status code returned. For example, the ID for a response returning a 404 status code would @@ -183550,7 +194679,7 @@ def validate_name( content_type: Optional[str] = kwargs.pop( "content_type", _headers.pop("Content-Type", None) ) - cls: ClsType[Optional[JSON]] = kwargs.pop("cls", None) + cls: ClsType[JSON] = kwargs.pop("cls", None) content_type = content_type or "application/json" _json = None @@ -183558,9 +194687,13 @@ def validate_name( if isinstance(body, (IOBase, bytes)): _content = body else: - _json = body + if body is not None: + _json = body + else: + _json = None - _request = build_registry_validate_name_request( + _request = build_registry_run_garbage_collection_request( + registry_name=registry_name, content_type=content_type, json=_json, content=_content, @@ -183578,15 +194711,14 @@ def validate_name( response = pipeline_response.http_response - if response.status_code not in [204, 409]: + if response.status_code not in [201, 404]: if _stream: response.read() # Load the body in memory and close the socket map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore raise HttpResponseError(response=response) - deserialized = None response_headers = {} - if response.status_code == 204: + if response.status_code == 201: response_headers["ratelimit-limit"] = self._deserialize( "int", response.headers.get("ratelimit-limit") ) @@ -183597,7 +194729,12 @@ def validate_name( "int", response.headers.get("ratelimit-reset") ) - if response.status_code == 409: + if response.content: + deserialized = response.json() + else: + deserialized = None + + if response.status_code == 404: response_headers["ratelimit-limit"] = self._deserialize( "int", response.headers.get("ratelimit-limit") ) @@ -183614,31 +194751,22 @@ def validate_name( deserialized = None if cls: - return cls(pipeline_response, deserialized, response_headers) # type: ignore + return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore - return deserialized # type: ignore + return cast(JSON, deserialized) # type: ignore @distributed_trace - def list_repositories( - self, registry_name: str, *, per_page: int = 20, page: int = 1, **kwargs: Any - ) -> JSON: + def get_garbage_collection(self, registry_name: str, **kwargs: Any) -> JSON: # pylint: disable=line-too-long - """List All Container Registry Repositories. + """Get Active Garbage Collection. **Note: This endpoint is deprecated. Please use the ``/v2/registries`` endpoint instead.** - This endpoint has been deprecated in favor of the *List All Container Registry Repositories - [V2]* endpoint. - - To list all repositories in your container registry, send a GET - request to ``/v2/registry/$REGISTRY_NAME/repositories``. + To get information about the currently-active garbage collection + for a registry, send a GET request to ``/v2/registry/$REGISTRY_NAME/garbage-collection``. :param registry_name: The name of a container registry. Required. :type registry_name: str - :keyword per_page: Number of items returned per page. Default value is 20. - :paramtype per_page: int - :keyword page: Which 'page' of paginated results to return. Default value is 1. - :paramtype page: int :return: JSON object :rtype: JSON :raises ~azure.core.exceptions.HttpResponseError: @@ -183648,37 +194776,24 @@ def list_repositories( # response body for status code(s): 200 response == { - "meta": { - "total": 0 # Optional. Number of objects returned by the request. - }, - "links": { - "pages": {} - }, - "repositories": [ - { - "latest_tag": { - "compressed_size_bytes": 0, # Optional. The - compressed size of the tag in bytes. - "manifest_digest": "str", # Optional. The digest of - the manifest associated with the tag. - "registry_name": "str", # Optional. The name of the - container registry. - "repository": "str", # Optional. The name of the - repository. - "size_bytes": 0, # Optional. The uncompressed size - of the tag in bytes (this size is calculated asynchronously so it may - not be immediately available). - "tag": "str", # Optional. The name of the tag. - "updated_at": "2020-02-20 00:00:00" # Optional. The - time the tag was last updated. - }, - "name": "str", # Optional. The name of the repository. - "registry_name": "str", # Optional. The name of the - container registry. - "tag_count": 0 # Optional. The number of tags in the - repository. - } - ] + "garbage_collection": { + "blobs_deleted": 0, # Optional. The number of blobs deleted as a + result of this garbage collection. + "created_at": "2020-02-20 00:00:00", # Optional. The time the + garbage collection was created. + "freed_bytes": 0, # Optional. The number of bytes freed as a result + of this garbage collection. + "registry_name": "str", # Optional. The name of the container + registry. + "status": "str", # Optional. The current status of this garbage + collection. Known values are: "requested", "waiting for write JWTs to + expire", "scanning manifests", "deleting unreferenced blobs", "cancelling", + "failed", "succeeded", and "cancelled". + "updated_at": "2020-02-20 00:00:00", # Optional. The time the + garbage collection was last updated. + "uuid": "str" # Optional. A string specifying the UUID of the + garbage collection. + } } # response body for status code(s): 404 response == { @@ -183710,10 +194825,8 @@ def list_repositories( cls: ClsType[JSON] = kwargs.pop("cls", None) - _request = build_registry_list_repositories_request( + _request = build_registry_get_garbage_collection_request( registry_name=registry_name, - per_page=per_page, - page=page, headers=_headers, params=_params, ) @@ -183773,33 +194886,23 @@ def list_repositories( return cast(JSON, deserialized) # type: ignore @distributed_trace - def list_repositories_v2( - self, - registry_name: str, - *, - per_page: int = 20, - page: int = 1, - page_token: Optional[str] = None, - **kwargs: Any, + def list_garbage_collections( + self, registry_name: str, *, per_page: int = 20, page: int = 1, **kwargs: Any ) -> JSON: # pylint: disable=line-too-long - """List All Container Registry Repositories (V2). + """List Garbage Collections. **Note: This endpoint is deprecated. Please use the ``/v2/registries`` endpoint instead.** - To list all repositories in your container registry, send a GET - request to ``/v2/registry/$REGISTRY_NAME/repositoriesV2``. + To get information about past garbage collections for a registry, + send a GET request to ``/v2/registry/$REGISTRY_NAME/garbage-collections``. :param registry_name: The name of a container registry. Required. :type registry_name: str :keyword per_page: Number of items returned per page. Default value is 20. :paramtype per_page: int - :keyword page: Which 'page' of paginated results to return. Ignored when 'page_token' is - provided. Default value is 1. + :keyword page: Which 'page' of paginated results to return. Default value is 1. :paramtype page: int - :keyword page_token: Token to retrieve of the next or previous set of results more quickly than - using 'page'. Default value is None. - :paramtype page_token: str :return: JSON object :rtype: JSON :raises ~azure.core.exceptions.HttpResponseError: @@ -183809,51 +194912,28 @@ def list_repositories_v2( # response body for status code(s): 200 response == { - "meta": { - "total": 0 # Optional. Number of objects returned by the request. - }, - "links": { - "pages": {} - }, - "repositories": [ + "garbage_collections": [ { - "latest_manifest": { - "blobs": [ - { - "compressed_size_bytes": 0, # - Optional. The compressed size of the blob in bytes. - "digest": "str" # Optional. The - digest of the blob. - } - ], - "compressed_size_bytes": 0, # Optional. The - compressed size of the manifest in bytes. - "digest": "str", # Optional. The manifest digest. - "registry_name": "str", # Optional. The name of the - container registry. - "repository": "str", # Optional. The name of the - repository. - "size_bytes": 0, # Optional. The uncompressed size - of the manifest in bytes (this size is calculated asynchronously so - it may not be immediately available). - "tags": [ - "str" # Optional. All tags associated with - this manifest. - ], - "updated_at": "2020-02-20 00:00:00" # Optional. The - time the manifest was last updated. - }, - "manifest_count": 0, # Optional. The number of manifests in - the repository. - "name": "str", # Optional. The name of the repository. + "blobs_deleted": 0, # Optional. The number of blobs deleted + as a result of this garbage collection. + "created_at": "2020-02-20 00:00:00", # Optional. The time + the garbage collection was created. + "freed_bytes": 0, # Optional. The number of bytes freed as a + result of this garbage collection. "registry_name": "str", # Optional. The name of the container registry. - "tag_count": 0 # Optional. The number of tags in the - repository. + "status": "str", # Optional. The current status of this + garbage collection. Known values are: "requested", "waiting for write + JWTs to expire", "scanning manifests", "deleting unreferenced blobs", + "cancelling", "failed", "succeeded", and "cancelled". + "updated_at": "2020-02-20 00:00:00", # Optional. The time + the garbage collection was last updated. + "uuid": "str" # Optional. A string specifying the UUID of + the garbage collection. } ] } - # response body for status code(s): 400, 404 + # response body for status code(s): 404 response == { "id": "str", # A short identifier corresponding to the HTTP status code returned. For example, the ID for a response returning a 404 status code would @@ -183883,11 +194963,10 @@ def list_repositories_v2( cls: ClsType[JSON] = kwargs.pop("cls", None) - _request = build_registry_list_repositories_v2_request( + _request = build_registry_list_garbage_collections_request( registry_name=registry_name, per_page=per_page, page=page, - page_token=page_token, headers=_headers, params=_params, ) @@ -183902,7 +194981,7 @@ def list_repositories_v2( response = pipeline_response.http_response - if response.status_code not in [200, 400, 404]: + if response.status_code not in [200, 404]: if _stream: response.read() # Load the body in memory and close the socket map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore @@ -183925,22 +195004,6 @@ def list_repositories_v2( else: deserialized = None - if response.status_code == 400: - response_headers["ratelimit-limit"] = self._deserialize( - "int", response.headers.get("ratelimit-limit") - ) - response_headers["ratelimit-remaining"] = self._deserialize( - "int", response.headers.get("ratelimit-remaining") - ) - response_headers["ratelimit-reset"] = self._deserialize( - "int", response.headers.get("ratelimit-reset") - ) - - if response.content: - deserialized = response.json() - else: - deserialized = None - if response.status_code == 404: response_headers["ratelimit-limit"] = self._deserialize( "int", response.headers.get("ratelimit-limit") @@ -183962,38 +195025,34 @@ def list_repositories_v2( return cast(JSON, deserialized) # type: ignore - @distributed_trace - def list_repository_tags( + @overload + def update_garbage_collection( self, registry_name: str, - repository_name: str, + garbage_collection_uuid: str, + body: JSON, *, - per_page: int = 20, - page: int = 1, + content_type: str = "application/json", **kwargs: Any, ) -> JSON: # pylint: disable=line-too-long - """List All Container Registry Repository Tags. + """Update Garbage Collection. **Note: This endpoint is deprecated. Please use the ``/v2/registries`` endpoint instead.** - To list all tags in your container registry repository, send a GET - request to ``/v2/registry/$REGISTRY_NAME/repositories/$REPOSITORY_NAME/tags``. - - Note that if your repository name contains ``/`` characters, it must be - URL-encoded in the request URL. For example, to list tags for - ``registry.digitalocean.com/example/my/repo``\\ , the path would be - ``/v2/registry/example/repositories/my%2Frepo/tags``. + To cancel the currently-active garbage collection for a registry, + send a PUT request to ``/v2/registry/$REGISTRY_NAME/garbage-collection/$GC_UUID`` + and specify one or more of the attributes below. :param registry_name: The name of a container registry. Required. :type registry_name: str - :param repository_name: The name of a container registry repository. If the name contains ``/`` - characters, they must be URL-encoded, e.g. ``%2F``. Required. - :type repository_name: str - :keyword per_page: Number of items returned per page. Default value is 20. - :paramtype per_page: int - :keyword page: Which 'page' of paginated results to return. Default value is 1. - :paramtype page: int + :param garbage_collection_uuid: The UUID of a garbage collection run. Required. + :type garbage_collection_uuid: str + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str :return: JSON object :rtype: JSON :raises ~azure.core.exceptions.HttpResponseError: @@ -184001,31 +195060,32 @@ def list_repository_tags( Example: .. code-block:: python + # JSON input template you can fill out and use as your body input. + body = { + "cancel": bool # Optional. A boolean value indicating that the garbage + collection should be cancelled. + } + # response body for status code(s): 200 response == { - "meta": { - "total": 0 # Optional. Number of objects returned by the request. - }, - "links": { - "pages": {} - }, - "tags": [ - { - "compressed_size_bytes": 0, # Optional. The compressed size - of the tag in bytes. - "manifest_digest": "str", # Optional. The digest of the - manifest associated with the tag. - "registry_name": "str", # Optional. The name of the - container registry. - "repository": "str", # Optional. The name of the repository. - "size_bytes": 0, # Optional. The uncompressed size of the - tag in bytes (this size is calculated asynchronously so it may not be - immediately available). - "tag": "str", # Optional. The name of the tag. - "updated_at": "2020-02-20 00:00:00" # Optional. The time the - tag was last updated. - } - ] + "garbage_collection": { + "blobs_deleted": 0, # Optional. The number of blobs deleted as a + result of this garbage collection. + "created_at": "2020-02-20 00:00:00", # Optional. The time the + garbage collection was created. + "freed_bytes": 0, # Optional. The number of bytes freed as a result + of this garbage collection. + "registry_name": "str", # Optional. The name of the container + registry. + "status": "str", # Optional. The current status of this garbage + collection. Known values are: "requested", "waiting for write JWTs to + expire", "scanning manifests", "deleting unreferenced blobs", "cancelling", + "failed", "succeeded", and "cancelled". + "updated_at": "2020-02-20 00:00:00", # Optional. The time the + garbage collection was last updated. + "uuid": "str" # Optional. A string specifying the UUID of the + garbage collection. + } } # response body for status code(s): 404 response == { @@ -184039,125 +195099,133 @@ def list_repository_tags( tickets to help identify the issue. } """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - 401: cast( - Type[HttpResponseError], - lambda response: ClientAuthenticationError(response=response), - ), - 429: HttpResponseError, - 500: HttpResponseError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = kwargs.pop("headers", {}) or {} - _params = kwargs.pop("params", {}) or {} - - cls: ClsType[JSON] = kwargs.pop("cls", None) - - _request = build_registry_list_repository_tags_request( - registry_name=registry_name, - repository_name=repository_name, - per_page=per_page, - page=page, - headers=_headers, - params=_params, - ) - _request.url = self._client.format_url(_request.url) - - _stream = False - pipeline_response: PipelineResponse = ( - self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - ) - - response = pipeline_response.http_response - - if response.status_code not in [200, 404]: - if _stream: - response.read() # Load the body in memory and close the socket - map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore - raise HttpResponseError(response=response) - response_headers = {} - if response.status_code == 200: - response_headers["ratelimit-limit"] = self._deserialize( - "int", response.headers.get("ratelimit-limit") - ) - response_headers["ratelimit-remaining"] = self._deserialize( - "int", response.headers.get("ratelimit-remaining") - ) - response_headers["ratelimit-reset"] = self._deserialize( - "int", response.headers.get("ratelimit-reset") - ) + @overload + def update_garbage_collection( + self, + registry_name: str, + garbage_collection_uuid: str, + body: IO[bytes], + *, + content_type: str = "application/json", + **kwargs: Any, + ) -> JSON: + # pylint: disable=line-too-long + """Update Garbage Collection. - if response.content: - deserialized = response.json() - else: - deserialized = None + **Note: This endpoint is deprecated. Please use the ``/v2/registries`` endpoint instead.** - if response.status_code == 404: - response_headers["ratelimit-limit"] = self._deserialize( - "int", response.headers.get("ratelimit-limit") - ) - response_headers["ratelimit-remaining"] = self._deserialize( - "int", response.headers.get("ratelimit-remaining") - ) - response_headers["ratelimit-reset"] = self._deserialize( - "int", response.headers.get("ratelimit-reset") - ) + To cancel the currently-active garbage collection for a registry, + send a PUT request to ``/v2/registry/$REGISTRY_NAME/garbage-collection/$GC_UUID`` + and specify one or more of the attributes below. - if response.content: - deserialized = response.json() - else: - deserialized = None + :param registry_name: The name of a container registry. Required. + :type registry_name: str + :param garbage_collection_uuid: The UUID of a garbage collection run. Required. + :type garbage_collection_uuid: str + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: JSON object + :rtype: JSON + :raises ~azure.core.exceptions.HttpResponseError: - if cls: - return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore + Example: + .. code-block:: python - return cast(JSON, deserialized) # type: ignore + # response body for status code(s): 200 + response == { + "garbage_collection": { + "blobs_deleted": 0, # Optional. The number of blobs deleted as a + result of this garbage collection. + "created_at": "2020-02-20 00:00:00", # Optional. The time the + garbage collection was created. + "freed_bytes": 0, # Optional. The number of bytes freed as a result + of this garbage collection. + "registry_name": "str", # Optional. The name of the container + registry. + "status": "str", # Optional. The current status of this garbage + collection. Known values are: "requested", "waiting for write JWTs to + expire", "scanning manifests", "deleting unreferenced blobs", "cancelling", + "failed", "succeeded", and "cancelled". + "updated_at": "2020-02-20 00:00:00", # Optional. The time the + garbage collection was last updated. + "uuid": "str" # Optional. A string specifying the UUID of the + garbage collection. + } + } + # response body for status code(s): 404 + response == { + "id": "str", # A short identifier corresponding to the HTTP status code + returned. For example, the ID for a response returning a 404 status code would + be "not_found.". Required. + "message": "str", # A message providing additional information about the + error, including details to help resolve it when possible. Required. + "request_id": "str" # Optional. Optionally, some endpoints may include a + request ID that should be provided when reporting bugs or opening support + tickets to help identify the issue. + } + """ @distributed_trace - def delete_repository_tag( + def update_garbage_collection( self, registry_name: str, - repository_name: str, - repository_tag: str, + garbage_collection_uuid: str, + body: Union[JSON, IO[bytes]], **kwargs: Any, - ) -> Optional[JSON]: + ) -> JSON: # pylint: disable=line-too-long - """Delete Container Registry Repository Tag. + """Update Garbage Collection. **Note: This endpoint is deprecated. Please use the ``/v2/registries`` endpoint instead.** - To delete a container repository tag, send a DELETE request to - ``/v2/registry/$REGISTRY_NAME/repositories/$REPOSITORY_NAME/tags/$TAG``. - - Note that if your repository name contains ``/`` characters, it must be - URL-encoded in the request URL. For example, to delete - ``registry.digitalocean.com/example/my/repo:mytag``\\ , the path would be - ``/v2/registry/example/repositories/my%2Frepo/tags/mytag``. - - A successful request will receive a 204 status code with no body in response. - This indicates that the request was processed successfully. + To cancel the currently-active garbage collection for a registry, + send a PUT request to ``/v2/registry/$REGISTRY_NAME/garbage-collection/$GC_UUID`` + and specify one or more of the attributes below. :param registry_name: The name of a container registry. Required. :type registry_name: str - :param repository_name: The name of a container registry repository. If the name contains ``/`` - characters, they must be URL-encoded, e.g. ``%2F``. Required. - :type repository_name: str - :param repository_tag: The name of a container registry repository tag. Required. - :type repository_tag: str - :return: JSON object or None - :rtype: JSON or None + :param garbage_collection_uuid: The UUID of a garbage collection run. Required. + :type garbage_collection_uuid: str + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :return: JSON object + :rtype: JSON :raises ~azure.core.exceptions.HttpResponseError: Example: .. code-block:: python + # JSON input template you can fill out and use as your body input. + body = { + "cancel": bool # Optional. A boolean value indicating that the garbage + collection should be cancelled. + } + + # response body for status code(s): 200 + response == { + "garbage_collection": { + "blobs_deleted": 0, # Optional. The number of blobs deleted as a + result of this garbage collection. + "created_at": "2020-02-20 00:00:00", # Optional. The time the + garbage collection was created. + "freed_bytes": 0, # Optional. The number of bytes freed as a result + of this garbage collection. + "registry_name": "str", # Optional. The name of the container + registry. + "status": "str", # Optional. The current status of this garbage + collection. Known values are: "requested", "waiting for write JWTs to + expire", "scanning manifests", "deleting unreferenced blobs", "cancelling", + "failed", "succeeded", and "cancelled". + "updated_at": "2020-02-20 00:00:00", # Optional. The time the + garbage collection was last updated. + "uuid": "str" # Optional. A string specifying the UUID of the + garbage collection. + } + } # response body for status code(s): 404 response == { "id": "str", # A short identifier corresponding to the HTTP status code @@ -184183,15 +195251,28 @@ def delete_repository_tag( } error_map.update(kwargs.pop("error_map", {}) or {}) - _headers = kwargs.pop("headers", {}) or {} + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = kwargs.pop("params", {}) or {} - cls: ClsType[Optional[JSON]] = kwargs.pop("cls", None) + content_type: Optional[str] = kwargs.pop( + "content_type", _headers.pop("Content-Type", None) + ) + cls: ClsType[JSON] = kwargs.pop("cls", None) - _request = build_registry_delete_repository_tag_request( + content_type = content_type or "application/json" + _json = None + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _json = body + + _request = build_registry_update_garbage_collection_request( registry_name=registry_name, - repository_name=repository_name, - repository_tag=repository_tag, + garbage_collection_uuid=garbage_collection_uuid, + content_type=content_type, + json=_json, + content=_content, headers=_headers, params=_params, ) @@ -184206,15 +195287,14 @@ def delete_repository_tag( response = pipeline_response.http_response - if response.status_code not in [204, 404]: + if response.status_code not in [200, 404]: if _stream: response.read() # Load the body in memory and close the socket map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore raise HttpResponseError(response=response) - deserialized = None response_headers = {} - if response.status_code == 204: + if response.status_code == 200: response_headers["ratelimit-limit"] = self._deserialize( "int", response.headers.get("ratelimit-limit") ) @@ -184225,6 +195305,11 @@ def delete_repository_tag( "int", response.headers.get("ratelimit-reset") ) + if response.content: + deserialized = response.json() + else: + deserialized = None + if response.status_code == 404: response_headers["ratelimit-limit"] = self._deserialize( "int", response.headers.get("ratelimit-limit") @@ -184242,42 +195327,32 @@ def delete_repository_tag( deserialized = None if cls: - return cls(pipeline_response, deserialized, response_headers) # type: ignore + return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore - return deserialized # type: ignore + return cast(JSON, deserialized) # type: ignore @distributed_trace - def list_repository_manifests( - self, - registry_name: str, - repository_name: str, - *, - per_page: int = 20, - page: int = 1, - **kwargs: Any, - ) -> JSON: + def get_options(self, **kwargs: Any) -> JSON: # pylint: disable=line-too-long - """List All Container Registry Repository Manifests. + """List Registry Options (Subscription Tiers and Available Regions). - **Note: This endpoint is deprecated. Please use the ``/v2/registries`` endpoint instead.** + **Note: This endpoint is deprecated and may be removed in a future version. There is no + alternative.****\\ Note: This endpoint is deprecated. Please use the ``/v2/registries`` + endpoint instead.** - To list all manifests in your container registry repository, send a GET - request to ``/v2/registry/$REGISTRY_NAME/repositories/$REPOSITORY_NAME/digests``. + This endpoint serves to provide additional information as to which option values + are available when creating a container registry. - Note that if your repository name contains ``/`` characters, it must be - URL-encoded in the request URL. For example, to list manifests for - ``registry.digitalocean.com/example/my/repo``\\ , the path would be - ``/v2/registry/example/repositories/my%2Frepo/digests``. + There are multiple subscription tiers available for container registry. Each + tier allows a different number of image repositories to be created in your + registry, and has a different amount of storage and transfer included. + + There are multiple regions available for container registry and controls + where your data is stored. + + To list the available options, send a GET request to + ``/v2/registry/options``. - :param registry_name: The name of a container registry. Required. - :type registry_name: str - :param repository_name: The name of a container registry repository. If the name contains ``/`` - characters, they must be URL-encoded, e.g. ``%2F``. Required. - :type repository_name: str - :keyword per_page: Number of items returned per page. Default value is 20. - :paramtype per_page: int - :keyword page: Which 'page' of paginated results to return. Default value is 1. - :paramtype page: int :return: JSON object :rtype: JSON :raises ~azure.core.exceptions.HttpResponseError: @@ -184287,50 +195362,43 @@ def list_repository_manifests( # response body for status code(s): 200 response == { - "meta": { - "total": 0 # Optional. Number of objects returned by the request. - }, - "links": { - "pages": {} - }, - "manifests": [ - { - "blobs": [ - { - "compressed_size_bytes": 0, # Optional. The - compressed size of the blob in bytes. - "digest": "str" # Optional. The digest of - the blob. - } - ], - "compressed_size_bytes": 0, # Optional. The compressed size - of the manifest in bytes. - "digest": "str", # Optional. The manifest digest. - "registry_name": "str", # Optional. The name of the - container registry. - "repository": "str", # Optional. The name of the repository. - "size_bytes": 0, # Optional. The uncompressed size of the - manifest in bytes (this size is calculated asynchronously so it may not - be immediately available). - "tags": [ - "str" # Optional. All tags associated with this - manifest. - ], - "updated_at": "2020-02-20 00:00:00" # Optional. The time the - manifest was last updated. - } - ] - } - # response body for status code(s): 404 - response == { - "id": "str", # A short identifier corresponding to the HTTP status code - returned. For example, the ID for a response returning a 404 status code would - be "not_found.". Required. - "message": "str", # A message providing additional information about the - error, including details to help resolve it when possible. Required. - "request_id": "str" # Optional. Optionally, some endpoints may include a - request ID that should be provided when reporting bugs or opening support - tickets to help identify the issue. + "options": { + "available_regions": [ + "str" # Optional. + ], + "subscription_tiers": [ + { + "allow_storage_overage": bool, # Optional. A boolean + indicating whether the subscription tier supports additional storage + above what is included in the base plan at an additional cost per GiB + used. + "eligibility_reasons": [ + "str" # Optional. If your account is not + eligible to use a certain subscription tier, this will include a + list of reasons that prevent you from using the tier. + ], + "eligible": bool, # Optional. A boolean indicating + whether your account it eligible to use a certain subscription tier. + "included_bandwidth_bytes": 0, # Optional. The + amount of outbound data transfer included in the subscription tier in + bytes. + "included_repositories": 0, # Optional. The number + of repositories included in the subscription tier. ``0`` indicates + that the subscription tier includes unlimited repositories. + "included_storage_bytes": 0, # Optional. The amount + of storage included in the subscription tier in bytes. + "monthly_price_in_cents": 0, # Optional. The monthly + cost of the subscription tier in cents. + "name": "str", # Optional. The name of the + subscription tier. + "slug": "str", # Optional. The slug identifier of + the subscription tier. + "storage_overage_price_in_cents": 0 # Optional. The + price paid in cents per GiB for additional storage beyond what is + included in the subscription plan. + } + ] + } } """ error_map: MutableMapping[int, Type[HttpResponseError]] = { @@ -184351,11 +195419,7 @@ def list_repository_manifests( cls: ClsType[JSON] = kwargs.pop("cls", None) - _request = build_registry_list_repository_manifests_request( - registry_name=registry_name, - repository_name=repository_name, - per_page=per_page, - page=page, + _request = build_registry_get_options_request( headers=_headers, params=_params, ) @@ -184370,98 +195434,112 @@ def list_repository_manifests( response = pipeline_response.http_response - if response.status_code not in [200, 404]: + if response.status_code not in [200]: if _stream: response.read() # Load the body in memory and close the socket map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore raise HttpResponseError(response=response) response_headers = {} - if response.status_code == 200: - response_headers["ratelimit-limit"] = self._deserialize( - "int", response.headers.get("ratelimit-limit") - ) - response_headers["ratelimit-remaining"] = self._deserialize( - "int", response.headers.get("ratelimit-remaining") - ) - response_headers["ratelimit-reset"] = self._deserialize( - "int", response.headers.get("ratelimit-reset") - ) - - if response.content: - deserialized = response.json() - else: - deserialized = None - - if response.status_code == 404: - response_headers["ratelimit-limit"] = self._deserialize( - "int", response.headers.get("ratelimit-limit") - ) - response_headers["ratelimit-remaining"] = self._deserialize( - "int", response.headers.get("ratelimit-remaining") - ) - response_headers["ratelimit-reset"] = self._deserialize( - "int", response.headers.get("ratelimit-reset") - ) + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) - if response.content: - deserialized = response.json() - else: - deserialized = None + if response.content: + deserialized = response.json() + else: + deserialized = None if cls: return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore return cast(JSON, deserialized) # type: ignore - @distributed_trace - def delete_repository_manifest( - self, - registry_name: str, - repository_name: str, - manifest_digest: str, - **kwargs: Any, - ) -> Optional[JSON]: - # pylint: disable=line-too-long - """Delete Container Registry Repository Manifest. - **Note: This endpoint is deprecated. Please use the ``/v2/registries`` endpoint instead.** +class ReservedIPsOperations: + """ + .. warning:: + **DO NOT** instantiate this class directly. - To delete a container repository manifest by digest, send a DELETE request to - ``/v2/registry/$REGISTRY_NAME/repositories/$REPOSITORY_NAME/digests/$MANIFEST_DIGEST``. + Instead, you should access the following operations through + :class:`~pydo.GeneratedClient`'s + :attr:`reserved_ips` attribute. + """ - Note that if your repository name contains ``/`` characters, it must be - URL-encoded in the request URL. For example, to delete - ``registry.digitalocean.com/example/my/repo@sha256:abcd``\\ , the path would be - ``/v2/registry/example/repositories/my%2Frepo/digests/sha256:abcd``. + def __init__(self, *args, **kwargs): + input_args = list(args) + self._client = input_args.pop(0) if input_args else kwargs.pop("client") + self._config = input_args.pop(0) if input_args else kwargs.pop("config") + self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer") + self._deserialize = ( + input_args.pop(0) if input_args else kwargs.pop("deserializer") + ) - A successful request will receive a 204 status code with no body in response. - This indicates that the request was processed successfully. + @distributed_trace + def list(self, *, per_page: int = 20, page: int = 1, **kwargs: Any) -> JSON: + # pylint: disable=line-too-long + """List All Reserved IPs. - :param registry_name: The name of a container registry. Required. - :type registry_name: str - :param repository_name: The name of a container registry repository. If the name contains ``/`` - characters, they must be URL-encoded, e.g. ``%2F``. Required. - :type repository_name: str - :param manifest_digest: The manifest digest of a container registry repository tag. Required. - :type manifest_digest: str - :return: JSON object or None - :rtype: JSON or None + To list all of the reserved IPs available on your account, send a GET request to + ``/v2/reserved_ips``. + + :keyword per_page: Number of items returned per page. Default value is 20. + :paramtype per_page: int + :keyword page: Which 'page' of paginated results to return. Default value is 1. + :paramtype page: int + :return: JSON object + :rtype: JSON :raises ~azure.core.exceptions.HttpResponseError: Example: .. code-block:: python - # response body for status code(s): 404 + # response body for status code(s): 200 response == { - "id": "str", # A short identifier corresponding to the HTTP status code - returned. For example, the ID for a response returning a 404 status code would - be "not_found.". Required. - "message": "str", # A message providing additional information about the - error, including details to help resolve it when possible. Required. - "request_id": "str" # Optional. Optionally, some endpoints may include a - request ID that should be provided when reporting bugs or opening support - tickets to help identify the issue. + "meta": { + "total": 0 # Optional. Number of objects returned by the request. + }, + "links": { + "pages": {} + }, + "reserved_ips": [ + { + "droplet": {}, + "ip": "str", # Optional. The public IP address of the + reserved IP. It also serves as its identifier. + "locked": bool, # Optional. A boolean value indicating + whether or not the reserved IP has pending actions preventing new ones + from being submitted. + "project_id": "str", # Optional. The UUID of the project to + which the reserved IP currently belongs.:code:`
`:code:`
`Requires + ``project:read`` scope. + "region": { + "available": bool, # This is a boolean value that + represents whether new Droplets can be created in this region. + Required. + "features": [ + "str" # This attribute is set to an array + which contains features available in this region. Required. + ], + "name": "str", # The display name of the region. + This will be a full name that is used in the control panel and other + interfaces. Required. + "sizes": [ + "str" # This attribute is set to an array + which contains the identifying slugs for the sizes available in + this region. sizes:read is required to view. Required. + ], + "slug": "str" # A human-readable string that is used + as a unique identifier for each region. Required. + } + } + ] } """ error_map: MutableMapping[int, Type[HttpResponseError]] = { @@ -184480,12 +195558,11 @@ def delete_repository_manifest( _headers = kwargs.pop("headers", {}) or {} _params = kwargs.pop("params", {}) or {} - cls: ClsType[Optional[JSON]] = kwargs.pop("cls", None) + cls: ClsType[JSON] = kwargs.pop("cls", None) - _request = build_registry_delete_repository_manifest_request( - registry_name=registry_name, - repository_name=repository_name, - manifest_digest=manifest_digest, + _request = build_reserved_ips_list_request( + per_page=per_page, + page=page, headers=_headers, params=_params, ) @@ -184500,87 +195577,52 @@ def delete_repository_manifest( response = pipeline_response.http_response - if response.status_code not in [204, 404]: + if response.status_code not in [200]: if _stream: response.read() # Load the body in memory and close the socket map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore raise HttpResponseError(response=response) - deserialized = None response_headers = {} - if response.status_code == 204: - response_headers["ratelimit-limit"] = self._deserialize( - "int", response.headers.get("ratelimit-limit") - ) - response_headers["ratelimit-remaining"] = self._deserialize( - "int", response.headers.get("ratelimit-remaining") - ) - response_headers["ratelimit-reset"] = self._deserialize( - "int", response.headers.get("ratelimit-reset") - ) - - if response.status_code == 404: - response_headers["ratelimit-limit"] = self._deserialize( - "int", response.headers.get("ratelimit-limit") - ) - response_headers["ratelimit-remaining"] = self._deserialize( - "int", response.headers.get("ratelimit-remaining") - ) - response_headers["ratelimit-reset"] = self._deserialize( - "int", response.headers.get("ratelimit-reset") - ) + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) - if response.content: - deserialized = response.json() - else: - deserialized = None + if response.content: + deserialized = response.json() + else: + deserialized = None if cls: - return cls(pipeline_response, deserialized, response_headers) # type: ignore + return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore - return deserialized # type: ignore + return cast(JSON, deserialized) # type: ignore @overload - def run_garbage_collection( - self, - registry_name: str, - body: Optional[JSON] = None, - *, - content_type: str = "application/json", - **kwargs: Any, + def create( + self, body: JSON, *, content_type: str = "application/json", **kwargs: Any ) -> JSON: # pylint: disable=line-too-long - """Start Garbage Collection. - - **Note: This endpoint is deprecated. Please use the ``/v2/registries`` endpoint instead.** + """Create a New Reserved IP. - Garbage collection enables users to clear out unreferenced blobs (layer & - manifest data) after deleting one or more manifests from a repository. If - there are no unreferenced blobs resulting from the deletion of one or more - manifests, garbage collection is effectively a noop. - `See here for more information - `_ - about how and why you should clean up your container registry periodically. + On creation, a reserved IP must be either assigned to a Droplet or reserved to a region. - To request a garbage collection run on your registry, send a POST request to - ``/v2/registry/$REGISTRY_NAME/garbage-collection``. This will initiate the - following sequence of events on your registry. + * + To create a new reserved IP assigned to a Droplet, send a POST + request to ``/v2/reserved_ips`` with the ``droplet_id`` attribute. - * Set the registry to read-only mode, meaning no further write-scoped - JWTs will be issued to registry clients. Existing write-scoped JWTs will - continue to work until they expire which can take up to 15 minutes. - * Wait until all existing write-scoped JWTs have expired. - * Scan all registry manifests to determine which blobs are unreferenced. - * Delete all unreferenced blobs from the registry. - * Record the number of blobs deleted and bytes freed, mark the garbage - collection status as ``success``. - * Remove the read-only mode restriction from the registry, meaning write-scoped - JWTs will once again be issued to registry clients. + * + To create a new reserved IP reserved to a region, send a POST request to + ``/v2/reserved_ips`` with the ``region`` attribute. - :param registry_name: The name of a container registry. Required. - :type registry_name: str - :param body: Default value is None. + :param body: Required. :type body: JSON :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". @@ -184593,87 +195635,83 @@ def run_garbage_collection( .. code-block:: python # JSON input template you can fill out and use as your body input. - body = { - "type": "str" # Optional. Type of the garbage collection to run against this - registry. Known values are: "untagged manifests only", "unreferenced blobs only", - and "untagged manifests and unreferenced blobs". - } + body = {} - # response body for status code(s): 201 + # response body for status code(s): 202 response == { - "garbage_collection": { - "blobs_deleted": 0, # Optional. The number of blobs deleted as a - result of this garbage collection. - "created_at": "2020-02-20 00:00:00", # Optional. The time the - garbage collection was created. - "freed_bytes": 0, # Optional. The number of bytes freed as a result - of this garbage collection. - "registry_name": "str", # Optional. The name of the container - registry. - "status": "str", # Optional. The current status of this garbage - collection. Known values are: "requested", "waiting for write JWTs to - expire", "scanning manifests", "deleting unreferenced blobs", "cancelling", - "failed", "succeeded", and "cancelled". - "updated_at": "2020-02-20 00:00:00", # Optional. The time the - garbage collection was last updated. - "uuid": "str" # Optional. A string specifying the UUID of the - garbage collection. + "links": { + "actions": [ + { + "href": "str", # Optional. A URL that can be used to + access the action. + "id": 0, # Optional. A unique numeric ID that can be + used to identify and reference an action. + "rel": "str" # Optional. A string specifying the + type of the related action. + } + ], + "droplets": [ + { + "href": "str", # Optional. A URL that can be used to + access the action. + "id": 0, # Optional. A unique numeric ID that can be + used to identify and reference an action. + "rel": "str" # Optional. A string specifying the + type of the related action. + } + ] + }, + "reserved_ip": { + "droplet": {}, + "ip": "str", # Optional. The public IP address of the reserved IP. + It also serves as its identifier. + "locked": bool, # Optional. A boolean value indicating whether or + not the reserved IP has pending actions preventing new ones from being + submitted. + "project_id": "str", # Optional. The UUID of the project to which + the reserved IP currently belongs.:code:`
`:code:`
`Requires + ``project:read`` scope. + "region": { + "available": bool, # This is a boolean value that represents + whether new Droplets can be created in this region. Required. + "features": [ + "str" # This attribute is set to an array which + contains features available in this region. Required. + ], + "name": "str", # The display name of the region. This will + be a full name that is used in the control panel and other interfaces. + Required. + "sizes": [ + "str" # This attribute is set to an array which + contains the identifying slugs for the sizes available in this + region. sizes:read is required to view. Required. + ], + "slug": "str" # A human-readable string that is used as a + unique identifier for each region. Required. + } } } - # response body for status code(s): 404 - response == { - "id": "str", # A short identifier corresponding to the HTTP status code - returned. For example, the ID for a response returning a 404 status code would - be "not_found.". Required. - "message": "str", # A message providing additional information about the - error, including details to help resolve it when possible. Required. - "request_id": "str" # Optional. Optionally, some endpoints may include a - request ID that should be provided when reporting bugs or opening support - tickets to help identify the issue. - } """ @overload - def run_garbage_collection( - self, - registry_name: str, - body: Optional[IO[bytes]] = None, - *, - content_type: str = "application/json", - **kwargs: Any, + def create( + self, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any ) -> JSON: # pylint: disable=line-too-long - """Start Garbage Collection. - - **Note: This endpoint is deprecated. Please use the ``/v2/registries`` endpoint instead.** + """Create a New Reserved IP. - Garbage collection enables users to clear out unreferenced blobs (layer & - manifest data) after deleting one or more manifests from a repository. If - there are no unreferenced blobs resulting from the deletion of one or more - manifests, garbage collection is effectively a noop. - `See here for more information - `_ - about how and why you should clean up your container registry periodically. + On creation, a reserved IP must be either assigned to a Droplet or reserved to a region. - To request a garbage collection run on your registry, send a POST request to - ``/v2/registry/$REGISTRY_NAME/garbage-collection``. This will initiate the - following sequence of events on your registry. + * + To create a new reserved IP assigned to a Droplet, send a POST + request to ``/v2/reserved_ips`` with the ``droplet_id`` attribute. - * Set the registry to read-only mode, meaning no further write-scoped - JWTs will be issued to registry clients. Existing write-scoped JWTs will - continue to work until they expire which can take up to 15 minutes. - * Wait until all existing write-scoped JWTs have expired. - * Scan all registry manifests to determine which blobs are unreferenced. - * Delete all unreferenced blobs from the registry. - * Record the number of blobs deleted and bytes freed, mark the garbage - collection status as ``success``. - * Remove the read-only mode restriction from the registry, meaning write-scoped - JWTs will once again be issued to registry clients. + * + To create a new reserved IP reserved to a region, send a POST request to + ``/v2/reserved_ips`` with the ``region`` attribute. - :param registry_name: The name of a container registry. Required. - :type registry_name: str - :param body: Default value is None. + :param body: Required. :type body: IO[bytes] :keyword content_type: Body Parameter content-type. Content type parameter for binary body. Default value is "application/json". @@ -184685,79 +195723,79 @@ def run_garbage_collection( Example: .. code-block:: python - # response body for status code(s): 201 + # response body for status code(s): 202 response == { - "garbage_collection": { - "blobs_deleted": 0, # Optional. The number of blobs deleted as a - result of this garbage collection. - "created_at": "2020-02-20 00:00:00", # Optional. The time the - garbage collection was created. - "freed_bytes": 0, # Optional. The number of bytes freed as a result - of this garbage collection. - "registry_name": "str", # Optional. The name of the container - registry. - "status": "str", # Optional. The current status of this garbage - collection. Known values are: "requested", "waiting for write JWTs to - expire", "scanning manifests", "deleting unreferenced blobs", "cancelling", - "failed", "succeeded", and "cancelled". - "updated_at": "2020-02-20 00:00:00", # Optional. The time the - garbage collection was last updated. - "uuid": "str" # Optional. A string specifying the UUID of the - garbage collection. + "links": { + "actions": [ + { + "href": "str", # Optional. A URL that can be used to + access the action. + "id": 0, # Optional. A unique numeric ID that can be + used to identify and reference an action. + "rel": "str" # Optional. A string specifying the + type of the related action. + } + ], + "droplets": [ + { + "href": "str", # Optional. A URL that can be used to + access the action. + "id": 0, # Optional. A unique numeric ID that can be + used to identify and reference an action. + "rel": "str" # Optional. A string specifying the + type of the related action. + } + ] + }, + "reserved_ip": { + "droplet": {}, + "ip": "str", # Optional. The public IP address of the reserved IP. + It also serves as its identifier. + "locked": bool, # Optional. A boolean value indicating whether or + not the reserved IP has pending actions preventing new ones from being + submitted. + "project_id": "str", # Optional. The UUID of the project to which + the reserved IP currently belongs.:code:`
`:code:`
`Requires + ``project:read`` scope. + "region": { + "available": bool, # This is a boolean value that represents + whether new Droplets can be created in this region. Required. + "features": [ + "str" # This attribute is set to an array which + contains features available in this region. Required. + ], + "name": "str", # The display name of the region. This will + be a full name that is used in the control panel and other interfaces. + Required. + "sizes": [ + "str" # This attribute is set to an array which + contains the identifying slugs for the sizes available in this + region. sizes:read is required to view. Required. + ], + "slug": "str" # A human-readable string that is used as a + unique identifier for each region. Required. + } } } - # response body for status code(s): 404 - response == { - "id": "str", # A short identifier corresponding to the HTTP status code - returned. For example, the ID for a response returning a 404 status code would - be "not_found.". Required. - "message": "str", # A message providing additional information about the - error, including details to help resolve it when possible. Required. - "request_id": "str" # Optional. Optionally, some endpoints may include a - request ID that should be provided when reporting bugs or opening support - tickets to help identify the issue. - } """ @distributed_trace - def run_garbage_collection( - self, - registry_name: str, - body: Optional[Union[JSON, IO[bytes]]] = None, - **kwargs: Any, - ) -> JSON: + def create(self, body: Union[JSON, IO[bytes]], **kwargs: Any) -> JSON: # pylint: disable=line-too-long - """Start Garbage Collection. - - **Note: This endpoint is deprecated. Please use the ``/v2/registries`` endpoint instead.** + """Create a New Reserved IP. - Garbage collection enables users to clear out unreferenced blobs (layer & - manifest data) after deleting one or more manifests from a repository. If - there are no unreferenced blobs resulting from the deletion of one or more - manifests, garbage collection is effectively a noop. - `See here for more information - `_ - about how and why you should clean up your container registry periodically. + On creation, a reserved IP must be either assigned to a Droplet or reserved to a region. - To request a garbage collection run on your registry, send a POST request to - ``/v2/registry/$REGISTRY_NAME/garbage-collection``. This will initiate the - following sequence of events on your registry. + * + To create a new reserved IP assigned to a Droplet, send a POST + request to ``/v2/reserved_ips`` with the ``droplet_id`` attribute. - * Set the registry to read-only mode, meaning no further write-scoped - JWTs will be issued to registry clients. Existing write-scoped JWTs will - continue to work until they expire which can take up to 15 minutes. - * Wait until all existing write-scoped JWTs have expired. - * Scan all registry manifests to determine which blobs are unreferenced. - * Delete all unreferenced blobs from the registry. - * Record the number of blobs deleted and bytes freed, mark the garbage - collection status as ``success``. - * Remove the read-only mode restriction from the registry, meaning write-scoped - JWTs will once again be issued to registry clients. + * + To create a new reserved IP reserved to a region, send a POST request to + ``/v2/reserved_ips`` with the ``region`` attribute. - :param registry_name: The name of a container registry. Required. - :type registry_name: str - :param body: Is either a JSON type or a IO[bytes] type. Default value is None. + :param body: Is either a JSON type or a IO[bytes] type. Required. :type body: JSON or IO[bytes] :return: JSON object :rtype: JSON @@ -184767,44 +195805,62 @@ def run_garbage_collection( .. code-block:: python # JSON input template you can fill out and use as your body input. - body = { - "type": "str" # Optional. Type of the garbage collection to run against this - registry. Known values are: "untagged manifests only", "unreferenced blobs only", - and "untagged manifests and unreferenced blobs". - } + body = {} - # response body for status code(s): 201 + # response body for status code(s): 202 response == { - "garbage_collection": { - "blobs_deleted": 0, # Optional. The number of blobs deleted as a - result of this garbage collection. - "created_at": "2020-02-20 00:00:00", # Optional. The time the - garbage collection was created. - "freed_bytes": 0, # Optional. The number of bytes freed as a result - of this garbage collection. - "registry_name": "str", # Optional. The name of the container - registry. - "status": "str", # Optional. The current status of this garbage - collection. Known values are: "requested", "waiting for write JWTs to - expire", "scanning manifests", "deleting unreferenced blobs", "cancelling", - "failed", "succeeded", and "cancelled". - "updated_at": "2020-02-20 00:00:00", # Optional. The time the - garbage collection was last updated. - "uuid": "str" # Optional. A string specifying the UUID of the - garbage collection. + "links": { + "actions": [ + { + "href": "str", # Optional. A URL that can be used to + access the action. + "id": 0, # Optional. A unique numeric ID that can be + used to identify and reference an action. + "rel": "str" # Optional. A string specifying the + type of the related action. + } + ], + "droplets": [ + { + "href": "str", # Optional. A URL that can be used to + access the action. + "id": 0, # Optional. A unique numeric ID that can be + used to identify and reference an action. + "rel": "str" # Optional. A string specifying the + type of the related action. + } + ] + }, + "reserved_ip": { + "droplet": {}, + "ip": "str", # Optional. The public IP address of the reserved IP. + It also serves as its identifier. + "locked": bool, # Optional. A boolean value indicating whether or + not the reserved IP has pending actions preventing new ones from being + submitted. + "project_id": "str", # Optional. The UUID of the project to which + the reserved IP currently belongs.:code:`
`:code:`
`Requires + ``project:read`` scope. + "region": { + "available": bool, # This is a boolean value that represents + whether new Droplets can be created in this region. Required. + "features": [ + "str" # This attribute is set to an array which + contains features available in this region. Required. + ], + "name": "str", # The display name of the region. This will + be a full name that is used in the control panel and other interfaces. + Required. + "sizes": [ + "str" # This attribute is set to an array which + contains the identifying slugs for the sizes available in this + region. sizes:read is required to view. Required. + ], + "slug": "str" # A human-readable string that is used as a + unique identifier for each region. Required. + } } } - # response body for status code(s): 404 - response == { - "id": "str", # A short identifier corresponding to the HTTP status code - returned. For example, the ID for a response returning a 404 status code would - be "not_found.". Required. - "message": "str", # A message providing additional information about the - error, including details to help resolve it when possible. Required. - "request_id": "str" # Optional. Optionally, some endpoints may include a - request ID that should be provided when reporting bugs or opening support - tickets to help identify the issue. - } """ error_map: MutableMapping[int, Type[HttpResponseError]] = { 404: ResourceNotFoundError, @@ -184833,13 +195889,9 @@ def run_garbage_collection( if isinstance(body, (IOBase, bytes)): _content = body else: - if body is not None: - _json = body - else: - _json = None + _json = body - _request = build_registry_run_garbage_collection_request( - registry_name=registry_name, + _request = build_reserved_ips_create_request( content_type=content_type, json=_json, content=_content, @@ -184857,14 +195909,136 @@ def run_garbage_collection( response = pipeline_response.http_response - if response.status_code not in [201, 404]: + if response.status_code not in [202]: if _stream: response.read() # Load the body in memory and close the socket map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore raise HttpResponseError(response=response) response_headers = {} - if response.status_code == 201: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.content: + deserialized = response.json() + else: + deserialized = None + + if cls: + return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore + + return cast(JSON, deserialized) # type: ignore + + @distributed_trace + def get(self, reserved_ip: str, **kwargs: Any) -> JSON: + # pylint: disable=line-too-long + """Retrieve an Existing Reserved IP. + + To show information about a reserved IP, send a GET request to + ``/v2/reserved_ips/$RESERVED_IP_ADDR``. + + :param reserved_ip: A reserved IP address. Required. + :type reserved_ip: str + :return: JSON object + :rtype: JSON + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 200 + response == { + "reserved_ip": { + "droplet": {}, + "ip": "str", # Optional. The public IP address of the reserved IP. + It also serves as its identifier. + "locked": bool, # Optional. A boolean value indicating whether or + not the reserved IP has pending actions preventing new ones from being + submitted. + "project_id": "str", # Optional. The UUID of the project to which + the reserved IP currently belongs.:code:`
`:code:`
`Requires + ``project:read`` scope. + "region": { + "available": bool, # This is a boolean value that represents + whether new Droplets can be created in this region. Required. + "features": [ + "str" # This attribute is set to an array which + contains features available in this region. Required. + ], + "name": "str", # The display name of the region. This will + be a full name that is used in the control panel and other interfaces. + Required. + "sizes": [ + "str" # This attribute is set to an array which + contains the identifying slugs for the sizes available in this + region. sizes:read is required to view. Required. + ], + "slug": "str" # A human-readable string that is used as a + unique identifier for each region. Required. + } + } + } + # response body for status code(s): 404 + response == { + "id": "str", # A short identifier corresponding to the HTTP status code + returned. For example, the ID for a response returning a 404 status code would + be "not_found.". Required. + "message": "str", # A message providing additional information about the + error, including details to help resolve it when possible. Required. + "request_id": "str" # Optional. Optionally, some endpoints may include a + request ID that should be provided when reporting bugs or opening support + tickets to help identify the issue. + } + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + 401: cast( + Type[HttpResponseError], + lambda response: ClientAuthenticationError(response=response), + ), + 429: HttpResponseError, + 500: HttpResponseError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[JSON] = kwargs.pop("cls", None) + + _request = build_reserved_ips_get_request( + reserved_ip=reserved_ip, + headers=_headers, + params=_params, + ) + _request.url = self._client.format_url(_request.url) + + _stream = False + pipeline_response: PipelineResponse = ( + self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + ) + + response = pipeline_response.http_response + + if response.status_code not in [200, 404]: + if _stream: + response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore + raise HttpResponseError(response=response) + + response_headers = {} + if response.status_code == 200: response_headers["ratelimit-limit"] = self._deserialize( "int", response.headers.get("ratelimit-limit") ) @@ -184902,45 +196076,25 @@ def run_garbage_collection( return cast(JSON, deserialized) # type: ignore @distributed_trace - def get_garbage_collection(self, registry_name: str, **kwargs: Any) -> JSON: + def delete(self, reserved_ip: str, **kwargs: Any) -> Optional[JSON]: # pylint: disable=line-too-long - """Get Active Garbage Collection. + """Delete a Reserved IP. - **Note: This endpoint is deprecated. Please use the ``/v2/registries`` endpoint instead.** + To delete a reserved IP and remove it from your account, send a DELETE request + to ``/v2/reserved_ips/$RESERVED_IP_ADDR``. - To get information about the currently-active garbage collection - for a registry, send a GET request to ``/v2/registry/$REGISTRY_NAME/garbage-collection``. + A successful request will receive a 204 status code with no body in response. + This indicates that the request was processed successfully. - :param registry_name: The name of a container registry. Required. - :type registry_name: str - :return: JSON object - :rtype: JSON + :param reserved_ip: A reserved IP address. Required. + :type reserved_ip: str + :return: JSON object or None + :rtype: JSON or None :raises ~azure.core.exceptions.HttpResponseError: Example: .. code-block:: python - # response body for status code(s): 200 - response == { - "garbage_collection": { - "blobs_deleted": 0, # Optional. The number of blobs deleted as a - result of this garbage collection. - "created_at": "2020-02-20 00:00:00", # Optional. The time the - garbage collection was created. - "freed_bytes": 0, # Optional. The number of bytes freed as a result - of this garbage collection. - "registry_name": "str", # Optional. The name of the container - registry. - "status": "str", # Optional. The current status of this garbage - collection. Known values are: "requested", "waiting for write JWTs to - expire", "scanning manifests", "deleting unreferenced blobs", "cancelling", - "failed", "succeeded", and "cancelled". - "updated_at": "2020-02-20 00:00:00", # Optional. The time the - garbage collection was last updated. - "uuid": "str" # Optional. A string specifying the UUID of the - garbage collection. - } - } # response body for status code(s): 404 response == { "id": "str", # A short identifier corresponding to the HTTP status code @@ -184969,10 +196123,10 @@ def get_garbage_collection(self, registry_name: str, **kwargs: Any) -> JSON: _headers = kwargs.pop("headers", {}) or {} _params = kwargs.pop("params", {}) or {} - cls: ClsType[JSON] = kwargs.pop("cls", None) + cls: ClsType[Optional[JSON]] = kwargs.pop("cls", None) - _request = build_registry_get_garbage_collection_request( - registry_name=registry_name, + _request = build_reserved_ips_delete_request( + reserved_ip=reserved_ip, headers=_headers, params=_params, ) @@ -184987,14 +196141,15 @@ def get_garbage_collection(self, registry_name: str, **kwargs: Any) -> JSON: response = pipeline_response.http_response - if response.status_code not in [200, 404]: + if response.status_code not in [204, 404]: if _stream: response.read() # Load the body in memory and close the socket map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore raise HttpResponseError(response=response) + deserialized = None response_headers = {} - if response.status_code == 200: + if response.status_code == 204: response_headers["ratelimit-limit"] = self._deserialize( "int", response.headers.get("ratelimit-limit") ) @@ -185005,11 +196160,6 @@ def get_garbage_collection(self, registry_name: str, **kwargs: Any) -> JSON: "int", response.headers.get("ratelimit-reset") ) - if response.content: - deserialized = response.json() - else: - deserialized = None - if response.status_code == 404: response_headers["ratelimit-limit"] = self._deserialize( "int", response.headers.get("ratelimit-limit") @@ -185027,28 +196177,40 @@ def get_garbage_collection(self, registry_name: str, **kwargs: Any) -> JSON: deserialized = None if cls: - return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore + return cls(pipeline_response, deserialized, response_headers) # type: ignore - return cast(JSON, deserialized) # type: ignore + return deserialized # type: ignore + + +class ReservedIPsActionsOperations: + """ + .. warning:: + **DO NOT** instantiate this class directly. + + Instead, you should access the following operations through + :class:`~pydo.GeneratedClient`'s + :attr:`reserved_ips_actions` attribute. + """ + + def __init__(self, *args, **kwargs): + input_args = list(args) + self._client = input_args.pop(0) if input_args else kwargs.pop("client") + self._config = input_args.pop(0) if input_args else kwargs.pop("config") + self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer") + self._deserialize = ( + input_args.pop(0) if input_args else kwargs.pop("deserializer") + ) @distributed_trace - def list_garbage_collections( - self, registry_name: str, *, per_page: int = 20, page: int = 1, **kwargs: Any - ) -> JSON: + def list(self, reserved_ip: str, **kwargs: Any) -> JSON: # pylint: disable=line-too-long - """List Garbage Collections. - - **Note: This endpoint is deprecated. Please use the ``/v2/registries`` endpoint instead.** + """List All Actions for a Reserved IP. - To get information about past garbage collections for a registry, - send a GET request to ``/v2/registry/$REGISTRY_NAME/garbage-collections``. + To retrieve all actions that have been executed on a reserved IP, send a GET request to + ``/v2/reserved_ips/$RESERVED_IP/actions``. - :param registry_name: The name of a container registry. Required. - :type registry_name: str - :keyword per_page: Number of items returned per page. Default value is 20. - :paramtype per_page: int - :keyword page: Which 'page' of paginated results to return. Default value is 1. - :paramtype page: int + :param reserved_ip: A reserved IP address. Required. + :type reserved_ip: str :return: JSON object :rtype: JSON :raises ~azure.core.exceptions.HttpResponseError: @@ -185058,26 +196220,56 @@ def list_garbage_collections( # response body for status code(s): 200 response == { - "garbage_collections": [ + "meta": { + "total": 0 # Optional. Number of objects returned by the request. + }, + "actions": [ { - "blobs_deleted": 0, # Optional. The number of blobs deleted - as a result of this garbage collection. - "created_at": "2020-02-20 00:00:00", # Optional. The time - the garbage collection was created. - "freed_bytes": 0, # Optional. The number of bytes freed as a - result of this garbage collection. - "registry_name": "str", # Optional. The name of the - container registry. - "status": "str", # Optional. The current status of this - garbage collection. Known values are: "requested", "waiting for write - JWTs to expire", "scanning manifests", "deleting unreferenced blobs", - "cancelling", "failed", "succeeded", and "cancelled". - "updated_at": "2020-02-20 00:00:00", # Optional. The time - the garbage collection was last updated. - "uuid": "str" # Optional. A string specifying the UUID of - the garbage collection. + "completed_at": "2020-02-20 00:00:00", # Optional. A time + value given in ISO8601 combined date and time format that represents when + the action was completed. + "id": 0, # Optional. A unique numeric ID that can be used to + identify and reference an action. + "region": { + "available": bool, # This is a boolean value that + represents whether new Droplets can be created in this region. + Required. + "features": [ + "str" # This attribute is set to an array + which contains features available in this region. Required. + ], + "name": "str", # The display name of the region. + This will be a full name that is used in the control panel and other + interfaces. Required. + "sizes": [ + "str" # This attribute is set to an array + which contains the identifying slugs for the sizes available in + this region. sizes:read is required to view. Required. + ], + "slug": "str" # A human-readable string that is used + as a unique identifier for each region. Required. + }, + "region_slug": "str", # Optional. A human-readable string + that is used as a unique identifier for each region. + "resource_id": 0, # Optional. A unique identifier for the + resource that the action is associated with. + "resource_type": "str", # Optional. The type of resource + that the action is associated with. + "started_at": "2020-02-20 00:00:00", # Optional. A time + value given in ISO8601 combined date and time format that represents when + the action was initiated. + "status": "in-progress", # Optional. Default value is + "in-progress". The current status of the action. This can be + "in-progress", "completed", or "errored". Known values are: + "in-progress", "completed", and "errored". + "type": "str" # Optional. This is the type of action that + the object represents. For example, this could be "transfer" to represent + the state of an image transfer action. } - ] + ], + "links": { + "pages": {} + } } # response body for status code(s): 404 response == { @@ -185109,10 +196301,8 @@ def list_garbage_collections( cls: ClsType[JSON] = kwargs.pop("cls", None) - _request = build_registry_list_garbage_collections_request( - registry_name=registry_name, - per_page=per_page, - page=page, + _request = build_reserved_ips_actions_list_request( + reserved_ip=reserved_ip, headers=_headers, params=_params, ) @@ -185172,29 +196362,35 @@ def list_garbage_collections( return cast(JSON, deserialized) # type: ignore @overload - def update_garbage_collection( + def post( self, - registry_name: str, - garbage_collection_uuid: str, - body: JSON, + reserved_ip: str, + body: Optional[JSON] = None, *, content_type: str = "application/json", **kwargs: Any, ) -> JSON: # pylint: disable=line-too-long - """Update Garbage Collection. + """Initiate a Reserved IP Action. - **Note: This endpoint is deprecated. Please use the ``/v2/registries`` endpoint instead.** + To initiate an action on a reserved IP send a POST request to + ``/v2/reserved_ips/$RESERVED_IP/actions``. In the JSON body to the request, + set the ``type`` attribute to on of the supported action types: - To cancel the currently-active garbage collection for a registry, - send a PUT request to ``/v2/registry/$REGISTRY_NAME/garbage-collection/$GC_UUID`` - and specify one or more of the attributes below. + .. list-table:: + :header-rows: 1 - :param registry_name: The name of a container registry. Required. - :type registry_name: str - :param garbage_collection_uuid: The UUID of a garbage collection run. Required. - :type garbage_collection_uuid: str - :param body: Required. + * - Action + - Details + * - ``assign`` + - Assigns a reserved IP to a Droplet + * - ``unassign`` + - Unassign a reserved IP from a Droplet. + + :param reserved_ip: A reserved IP address. Required. + :type reserved_ip: str + :param body: The ``type`` attribute set in the request body will specify the action that + will be taken on the reserved IP. Default value is None. :type body: JSON :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". @@ -185207,30 +196403,51 @@ def update_garbage_collection( .. code-block:: python # JSON input template you can fill out and use as your body input. - body = { - "cancel": bool # Optional. A boolean value indicating that the garbage - collection should be cancelled. - } + body = {} - # response body for status code(s): 200 + # response body for status code(s): 201 response == { - "garbage_collection": { - "blobs_deleted": 0, # Optional. The number of blobs deleted as a - result of this garbage collection. - "created_at": "2020-02-20 00:00:00", # Optional. The time the - garbage collection was created. - "freed_bytes": 0, # Optional. The number of bytes freed as a result - of this garbage collection. - "registry_name": "str", # Optional. The name of the container - registry. - "status": "str", # Optional. The current status of this garbage - collection. Known values are: "requested", "waiting for write JWTs to - expire", "scanning manifests", "deleting unreferenced blobs", "cancelling", - "failed", "succeeded", and "cancelled". - "updated_at": "2020-02-20 00:00:00", # Optional. The time the - garbage collection was last updated. - "uuid": "str" # Optional. A string specifying the UUID of the - garbage collection. + "action": { + "completed_at": "2020-02-20 00:00:00", # Optional. A time value + given in ISO8601 combined date and time format that represents when the + action was completed. + "id": 0, # Optional. A unique numeric ID that can be used to + identify and reference an action. + "project_id": "str", # Optional. The UUID of the project to which + the reserved IP currently belongs. + "region": { + "available": bool, # This is a boolean value that represents + whether new Droplets can be created in this region. Required. + "features": [ + "str" # This attribute is set to an array which + contains features available in this region. Required. + ], + "name": "str", # The display name of the region. This will + be a full name that is used in the control panel and other interfaces. + Required. + "sizes": [ + "str" # This attribute is set to an array which + contains the identifying slugs for the sizes available in this + region. sizes:read is required to view. Required. + ], + "slug": "str" # A human-readable string that is used as a + unique identifier for each region. Required. + }, + "region_slug": "str", # Optional. A human-readable string that is + used as a unique identifier for each region. + "resource_id": 0, # Optional. A unique identifier for the resource + that the action is associated with. + "resource_type": "str", # Optional. The type of resource that the + action is associated with. + "started_at": "2020-02-20 00:00:00", # Optional. A time value given + in ISO8601 combined date and time format that represents when the action was + initiated. + "status": "in-progress", # Optional. Default value is "in-progress". + The current status of the action. This can be "in-progress", "completed", or + "errored". Known values are: "in-progress", "completed", and "errored". + "type": "str" # Optional. This is the type of action that the object + represents. For example, this could be "transfer" to represent the state of + an image transfer action. } } # response body for status code(s): 404 @@ -185247,29 +196464,35 @@ def update_garbage_collection( """ @overload - def update_garbage_collection( + def post( self, - registry_name: str, - garbage_collection_uuid: str, - body: IO[bytes], + reserved_ip: str, + body: Optional[IO[bytes]] = None, *, content_type: str = "application/json", **kwargs: Any, ) -> JSON: # pylint: disable=line-too-long - """Update Garbage Collection. + """Initiate a Reserved IP Action. - **Note: This endpoint is deprecated. Please use the ``/v2/registries`` endpoint instead.** + To initiate an action on a reserved IP send a POST request to + ``/v2/reserved_ips/$RESERVED_IP/actions``. In the JSON body to the request, + set the ``type`` attribute to on of the supported action types: - To cancel the currently-active garbage collection for a registry, - send a PUT request to ``/v2/registry/$REGISTRY_NAME/garbage-collection/$GC_UUID`` - and specify one or more of the attributes below. + .. list-table:: + :header-rows: 1 - :param registry_name: The name of a container registry. Required. - :type registry_name: str - :param garbage_collection_uuid: The UUID of a garbage collection run. Required. - :type garbage_collection_uuid: str - :param body: Required. + * - Action + - Details + * - ``assign`` + - Assigns a reserved IP to a Droplet + * - ``unassign`` + - Unassign a reserved IP from a Droplet. + + :param reserved_ip: A reserved IP address. Required. + :type reserved_ip: str + :param body: The ``type`` attribute set in the request body will specify the action that + will be taken on the reserved IP. Default value is None. :type body: IO[bytes] :keyword content_type: Body Parameter content-type. Content type parameter for binary body. Default value is "application/json". @@ -185281,25 +196504,49 @@ def update_garbage_collection( Example: .. code-block:: python - # response body for status code(s): 200 + # response body for status code(s): 201 response == { - "garbage_collection": { - "blobs_deleted": 0, # Optional. The number of blobs deleted as a - result of this garbage collection. - "created_at": "2020-02-20 00:00:00", # Optional. The time the - garbage collection was created. - "freed_bytes": 0, # Optional. The number of bytes freed as a result - of this garbage collection. - "registry_name": "str", # Optional. The name of the container - registry. - "status": "str", # Optional. The current status of this garbage - collection. Known values are: "requested", "waiting for write JWTs to - expire", "scanning manifests", "deleting unreferenced blobs", "cancelling", - "failed", "succeeded", and "cancelled". - "updated_at": "2020-02-20 00:00:00", # Optional. The time the - garbage collection was last updated. - "uuid": "str" # Optional. A string specifying the UUID of the - garbage collection. + "action": { + "completed_at": "2020-02-20 00:00:00", # Optional. A time value + given in ISO8601 combined date and time format that represents when the + action was completed. + "id": 0, # Optional. A unique numeric ID that can be used to + identify and reference an action. + "project_id": "str", # Optional. The UUID of the project to which + the reserved IP currently belongs. + "region": { + "available": bool, # This is a boolean value that represents + whether new Droplets can be created in this region. Required. + "features": [ + "str" # This attribute is set to an array which + contains features available in this region. Required. + ], + "name": "str", # The display name of the region. This will + be a full name that is used in the control panel and other interfaces. + Required. + "sizes": [ + "str" # This attribute is set to an array which + contains the identifying slugs for the sizes available in this + region. sizes:read is required to view. Required. + ], + "slug": "str" # A human-readable string that is used as a + unique identifier for each region. Required. + }, + "region_slug": "str", # Optional. A human-readable string that is + used as a unique identifier for each region. + "resource_id": 0, # Optional. A unique identifier for the resource + that the action is associated with. + "resource_type": "str", # Optional. The type of resource that the + action is associated with. + "started_at": "2020-02-20 00:00:00", # Optional. A time value given + in ISO8601 combined date and time format that represents when the action was + initiated. + "status": "in-progress", # Optional. Default value is "in-progress". + The current status of the action. This can be "in-progress", "completed", or + "errored". Known values are: "in-progress", "completed", and "errored". + "type": "str" # Optional. This is the type of action that the object + represents. For example, this could be "transfer" to represent the state of + an image transfer action. } } # response body for status code(s): 404 @@ -185316,27 +196563,34 @@ def update_garbage_collection( """ @distributed_trace - def update_garbage_collection( + def post( self, - registry_name: str, - garbage_collection_uuid: str, - body: Union[JSON, IO[bytes]], + reserved_ip: str, + body: Optional[Union[JSON, IO[bytes]]] = None, **kwargs: Any, ) -> JSON: # pylint: disable=line-too-long - """Update Garbage Collection. + """Initiate a Reserved IP Action. - **Note: This endpoint is deprecated. Please use the ``/v2/registries`` endpoint instead.** + To initiate an action on a reserved IP send a POST request to + ``/v2/reserved_ips/$RESERVED_IP/actions``. In the JSON body to the request, + set the ``type`` attribute to on of the supported action types: - To cancel the currently-active garbage collection for a registry, - send a PUT request to ``/v2/registry/$REGISTRY_NAME/garbage-collection/$GC_UUID`` - and specify one or more of the attributes below. + .. list-table:: + :header-rows: 1 - :param registry_name: The name of a container registry. Required. - :type registry_name: str - :param garbage_collection_uuid: The UUID of a garbage collection run. Required. - :type garbage_collection_uuid: str - :param body: Is either a JSON type or a IO[bytes] type. Required. + * - Action + - Details + * - ``assign`` + - Assigns a reserved IP to a Droplet + * - ``unassign`` + - Unassign a reserved IP from a Droplet. + + :param reserved_ip: A reserved IP address. Required. + :type reserved_ip: str + :param body: The ``type`` attribute set in the request body will specify the action that + will be taken on the reserved IP. Is either a JSON type or a IO[bytes] type. Default value is + None. :type body: JSON or IO[bytes] :return: JSON object :rtype: JSON @@ -185346,30 +196600,51 @@ def update_garbage_collection( .. code-block:: python # JSON input template you can fill out and use as your body input. - body = { - "cancel": bool # Optional. A boolean value indicating that the garbage - collection should be cancelled. - } + body = {} - # response body for status code(s): 200 + # response body for status code(s): 201 response == { - "garbage_collection": { - "blobs_deleted": 0, # Optional. The number of blobs deleted as a - result of this garbage collection. - "created_at": "2020-02-20 00:00:00", # Optional. The time the - garbage collection was created. - "freed_bytes": 0, # Optional. The number of bytes freed as a result - of this garbage collection. - "registry_name": "str", # Optional. The name of the container - registry. - "status": "str", # Optional. The current status of this garbage - collection. Known values are: "requested", "waiting for write JWTs to - expire", "scanning manifests", "deleting unreferenced blobs", "cancelling", - "failed", "succeeded", and "cancelled". - "updated_at": "2020-02-20 00:00:00", # Optional. The time the - garbage collection was last updated. - "uuid": "str" # Optional. A string specifying the UUID of the - garbage collection. + "action": { + "completed_at": "2020-02-20 00:00:00", # Optional. A time value + given in ISO8601 combined date and time format that represents when the + action was completed. + "id": 0, # Optional. A unique numeric ID that can be used to + identify and reference an action. + "project_id": "str", # Optional. The UUID of the project to which + the reserved IP currently belongs. + "region": { + "available": bool, # This is a boolean value that represents + whether new Droplets can be created in this region. Required. + "features": [ + "str" # This attribute is set to an array which + contains features available in this region. Required. + ], + "name": "str", # The display name of the region. This will + be a full name that is used in the control panel and other interfaces. + Required. + "sizes": [ + "str" # This attribute is set to an array which + contains the identifying slugs for the sizes available in this + region. sizes:read is required to view. Required. + ], + "slug": "str" # A human-readable string that is used as a + unique identifier for each region. Required. + }, + "region_slug": "str", # Optional. A human-readable string that is + used as a unique identifier for each region. + "resource_id": 0, # Optional. A unique identifier for the resource + that the action is associated with. + "resource_type": "str", # Optional. The type of resource that the + action is associated with. + "started_at": "2020-02-20 00:00:00", # Optional. A time value given + in ISO8601 combined date and time format that represents when the action was + initiated. + "status": "in-progress", # Optional. Default value is "in-progress". + The current status of the action. This can be "in-progress", "completed", or + "errored". Known values are: "in-progress", "completed", and "errored". + "type": "str" # Optional. This is the type of action that the object + represents. For example, this could be "transfer" to represent the state of + an image transfer action. } } # response body for status code(s): 404 @@ -185411,11 +196686,13 @@ def update_garbage_collection( if isinstance(body, (IOBase, bytes)): _content = body else: - _json = body + if body is not None: + _json = body + else: + _json = None - _request = build_registry_update_garbage_collection_request( - registry_name=registry_name, - garbage_collection_uuid=garbage_collection_uuid, + _request = build_reserved_ips_actions_post_request( + reserved_ip=reserved_ip, content_type=content_type, json=_json, content=_content, @@ -185433,14 +196710,14 @@ def update_garbage_collection( response = pipeline_response.http_response - if response.status_code not in [200, 404]: + if response.status_code not in [201, 404]: if _stream: response.read() # Load the body in memory and close the socket map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore raise HttpResponseError(response=response) response_headers = {} - if response.status_code == 200: + if response.status_code == 201: response_headers["ratelimit-limit"] = self._deserialize( "int", response.headers.get("ratelimit-limit") ) @@ -185478,27 +196755,18 @@ def update_garbage_collection( return cast(JSON, deserialized) # type: ignore @distributed_trace - def get_options(self, **kwargs: Any) -> JSON: + def get(self, reserved_ip: str, action_id: int, **kwargs: Any) -> JSON: # pylint: disable=line-too-long - """List Registry Options (Subscription Tiers and Available Regions). - - **Note: This endpoint is deprecated and may be removed in a future version. There is no - alternative.****\\ Note: This endpoint is deprecated. Please use the ``/v2/registries`` - endpoint instead.** - - This endpoint serves to provide additional information as to which option values - are available when creating a container registry. - - There are multiple subscription tiers available for container registry. Each - tier allows a different number of image repositories to be created in your - registry, and has a different amount of storage and transfer included. - - There are multiple regions available for container registry and controls - where your data is stored. + """Retrieve an Existing Reserved IP Action. - To list the available options, send a GET request to - ``/v2/registry/options``. + To retrieve the status of a reserved IP action, send a GET request to + ``/v2/reserved_ips/$RESERVED_IP/actions/$ACTION_ID``. + :param reserved_ip: A reserved IP address. Required. + :type reserved_ip: str + :param action_id: A unique numeric ID that can be used to identify and reference an action. + Required. + :type action_id: int :return: JSON object :rtype: JSON :raises ~azure.core.exceptions.HttpResponseError: @@ -185508,44 +196776,60 @@ def get_options(self, **kwargs: Any) -> JSON: # response body for status code(s): 200 response == { - "options": { - "available_regions": [ - "str" # Optional. - ], - "subscription_tiers": [ - { - "allow_storage_overage": bool, # Optional. A boolean - indicating whether the subscription tier supports additional storage - above what is included in the base plan at an additional cost per GiB - used. - "eligibility_reasons": [ - "str" # Optional. If your account is not - eligible to use a certain subscription tier, this will include a - list of reasons that prevent you from using the tier. - ], - "eligible": bool, # Optional. A boolean indicating - whether your account it eligible to use a certain subscription tier. - "included_bandwidth_bytes": 0, # Optional. The - amount of outbound data transfer included in the subscription tier in - bytes. - "included_repositories": 0, # Optional. The number - of repositories included in the subscription tier. ``0`` indicates - that the subscription tier includes unlimited repositories. - "included_storage_bytes": 0, # Optional. The amount - of storage included in the subscription tier in bytes. - "monthly_price_in_cents": 0, # Optional. The monthly - cost of the subscription tier in cents. - "name": "str", # Optional. The name of the - subscription tier. - "slug": "str", # Optional. The slug identifier of - the subscription tier. - "storage_overage_price_in_cents": 0 # Optional. The - price paid in cents per GiB for additional storage beyond what is - included in the subscription plan. - } - ] + "action": { + "completed_at": "2020-02-20 00:00:00", # Optional. A time value + given in ISO8601 combined date and time format that represents when the + action was completed. + "id": 0, # Optional. A unique numeric ID that can be used to + identify and reference an action. + "project_id": "str", # Optional. The UUID of the project to which + the reserved IP currently belongs. + "region": { + "available": bool, # This is a boolean value that represents + whether new Droplets can be created in this region. Required. + "features": [ + "str" # This attribute is set to an array which + contains features available in this region. Required. + ], + "name": "str", # The display name of the region. This will + be a full name that is used in the control panel and other interfaces. + Required. + "sizes": [ + "str" # This attribute is set to an array which + contains the identifying slugs for the sizes available in this + region. sizes:read is required to view. Required. + ], + "slug": "str" # A human-readable string that is used as a + unique identifier for each region. Required. + }, + "region_slug": "str", # Optional. A human-readable string that is + used as a unique identifier for each region. + "resource_id": 0, # Optional. A unique identifier for the resource + that the action is associated with. + "resource_type": "str", # Optional. The type of resource that the + action is associated with. + "started_at": "2020-02-20 00:00:00", # Optional. A time value given + in ISO8601 combined date and time format that represents when the action was + initiated. + "status": "in-progress", # Optional. Default value is "in-progress". + The current status of the action. This can be "in-progress", "completed", or + "errored". Known values are: "in-progress", "completed", and "errored". + "type": "str" # Optional. This is the type of action that the object + represents. For example, this could be "transfer" to represent the state of + an image transfer action. } } + # response body for status code(s): 404 + response == { + "id": "str", # A short identifier corresponding to the HTTP status code + returned. For example, the ID for a response returning a 404 status code would + be "not_found.". Required. + "message": "str", # A message providing additional information about the + error, including details to help resolve it when possible. Required. + "request_id": "str" # Optional. Optionally, some endpoints may include a + request ID that should be provided when reporting bugs or opening support + tickets to help identify the issue. + } """ error_map: MutableMapping[int, Type[HttpResponseError]] = { 404: ResourceNotFoundError, @@ -185565,7 +196849,9 @@ def get_options(self, **kwargs: Any) -> JSON: cls: ClsType[JSON] = kwargs.pop("cls", None) - _request = build_registry_get_options_request( + _request = build_reserved_ips_actions_get_request( + reserved_ip=reserved_ip, + action_id=action_id, headers=_headers, params=_params, ) @@ -185580,27 +196866,44 @@ def get_options(self, **kwargs: Any) -> JSON: response = pipeline_response.http_response - if response.status_code not in [200]: + if response.status_code not in [200, 404]: if _stream: response.read() # Load the body in memory and close the socket map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore raise HttpResponseError(response=response) response_headers = {} - response_headers["ratelimit-limit"] = self._deserialize( - "int", response.headers.get("ratelimit-limit") - ) - response_headers["ratelimit-remaining"] = self._deserialize( - "int", response.headers.get("ratelimit-remaining") - ) - response_headers["ratelimit-reset"] = self._deserialize( - "int", response.headers.get("ratelimit-reset") - ) + if response.status_code == 200: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) - if response.content: - deserialized = response.json() - else: - deserialized = None + if response.content: + deserialized = response.json() + else: + deserialized = None + + if response.status_code == 404: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.content: + deserialized = response.json() + else: + deserialized = None if cls: return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore @@ -185608,14 +196911,14 @@ def get_options(self, **kwargs: Any) -> JSON: return cast(JSON, deserialized) # type: ignore -class ReservedIPsOperations: +class ReservedIPv6Operations: """ .. warning:: **DO NOT** instantiate this class directly. Instead, you should access the following operations through :class:`~pydo.GeneratedClient`'s - :attr:`reserved_ips` attribute. + :attr:`reserved_ipv6` attribute. """ def __init__(self, *args, **kwargs): @@ -185630,10 +196933,10 @@ def __init__(self, *args, **kwargs): @distributed_trace def list(self, *, per_page: int = 20, page: int = 1, **kwargs: Any) -> JSON: # pylint: disable=line-too-long - """List All Reserved IPs. + """List All Reserved IPv6s. - To list all of the reserved IPs available on your account, send a GET request to - ``/v2/reserved_ips``. + To list all of the reserved IPv6s available on your account, send a GET request to + ``/v2/reserved_ipv6``. :keyword per_page: Number of items returned per page. Default value is 20. :paramtype per_page: int @@ -185654,36 +196957,15 @@ def list(self, *, per_page: int = 20, page: int = 1, **kwargs: Any) -> JSON: "links": { "pages": {} }, - "reserved_ips": [ + "reserved_ipv6s": [ { "droplet": {}, "ip": "str", # Optional. The public IP address of the - reserved IP. It also serves as its identifier. - "locked": bool, # Optional. A boolean value indicating - whether or not the reserved IP has pending actions preventing new ones - from being submitted. - "project_id": "str", # Optional. The UUID of the project to - which the reserved IP currently belongs.:code:`
`:code:`
`Requires - ``project:read`` scope. - "region": { - "available": bool, # This is a boolean value that - represents whether new Droplets can be created in this region. - Required. - "features": [ - "str" # This attribute is set to an array - which contains features available in this region. Required. - ], - "name": "str", # The display name of the region. - This will be a full name that is used in the control panel and other - interfaces. Required. - "sizes": [ - "str" # This attribute is set to an array - which contains the identifying slugs for the sizes available in - this region. sizes:read is required to view. Required. - ], - "slug": "str" # A human-readable string that is used - as a unique identifier for each region. Required. - } + reserved IPv6. It also serves as its identifier. + "region_slug": "str", # Optional. The region that the + reserved IPv6 is reserved to. When you query a reserved IPv6,the + region_slug will be returned. + "reserved_at": "2020-02-20 00:00:00" # Optional. } ] } @@ -185706,7 +196988,7 @@ def list(self, *, per_page: int = 20, page: int = 1, **kwargs: Any) -> JSON: cls: ClsType[JSON] = kwargs.pop("cls", None) - _request = build_reserved_ips_list_request( + _request = build_reserved_ipv6_list_request( per_page=per_page, page=page, headers=_headers, @@ -185755,18 +197037,13 @@ def create( self, body: JSON, *, content_type: str = "application/json", **kwargs: Any ) -> JSON: # pylint: disable=line-too-long - """Create a New Reserved IP. - - On creation, a reserved IP must be either assigned to a Droplet or reserved to a region. + """Create a New Reserved IPv6. + On creation, a reserved IPv6 must be reserved to a region. - * - To create a new reserved IP assigned to a Droplet, send a POST - request to ``/v2/reserved_ips`` with the ``droplet_id`` attribute. - * - To create a new reserved IP reserved to a region, send a POST request to - ``/v2/reserved_ips`` with the ``region`` attribute. + * To create a new reserved IPv6 reserved to a region, send a POST request to + ``/v2/reserved_ipv6`` with the ``region_slug`` attribute. :param body: Required. :type body: JSON @@ -185781,60 +197058,20 @@ def create( .. code-block:: python # JSON input template you can fill out and use as your body input. - body = {} + body = { + "region_slug": "str" # The slug identifier for the region the reserved IPv6 + will be reserved to. Required. + } - # response body for status code(s): 202 + # response body for status code(s): 201 response == { - "links": { - "actions": [ - { - "href": "str", # Optional. A URL that can be used to - access the action. - "id": 0, # Optional. A unique numeric ID that can be - used to identify and reference an action. - "rel": "str" # Optional. A string specifying the - type of the related action. - } - ], - "droplets": [ - { - "href": "str", # Optional. A URL that can be used to - access the action. - "id": 0, # Optional. A unique numeric ID that can be - used to identify and reference an action. - "rel": "str" # Optional. A string specifying the - type of the related action. - } - ] - }, - "reserved_ip": { - "droplet": {}, - "ip": "str", # Optional. The public IP address of the reserved IP. + "reserved_ipv6": { + "ip": "str", # Optional. The public IP address of the reserved IPv6. It also serves as its identifier. - "locked": bool, # Optional. A boolean value indicating whether or - not the reserved IP has pending actions preventing new ones from being - submitted. - "project_id": "str", # Optional. The UUID of the project to which - the reserved IP currently belongs.:code:`
`:code:`
`Requires - ``project:read`` scope. - "region": { - "available": bool, # This is a boolean value that represents - whether new Droplets can be created in this region. Required. - "features": [ - "str" # This attribute is set to an array which - contains features available in this region. Required. - ], - "name": "str", # The display name of the region. This will - be a full name that is used in the control panel and other interfaces. - Required. - "sizes": [ - "str" # This attribute is set to an array which - contains the identifying slugs for the sizes available in this - region. sizes:read is required to view. Required. - ], - "slug": "str" # A human-readable string that is used as a - unique identifier for each region. Required. - } + "region_slug": "str", # Optional. The region that the reserved IPv6 + is reserved to. When you query a reserved IPv6,the region_slug will be + returned. + "reserved_at": "2020-02-20 00:00:00" # Optional. } } """ @@ -185844,18 +197081,13 @@ def create( self, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any ) -> JSON: # pylint: disable=line-too-long - """Create a New Reserved IP. - - On creation, a reserved IP must be either assigned to a Droplet or reserved to a region. + """Create a New Reserved IPv6. + On creation, a reserved IPv6 must be reserved to a region. - * - To create a new reserved IP assigned to a Droplet, send a POST - request to ``/v2/reserved_ips`` with the ``droplet_id`` attribute. - * - To create a new reserved IP reserved to a region, send a POST request to - ``/v2/reserved_ips`` with the ``region`` attribute. + * To create a new reserved IPv6 reserved to a region, send a POST request to + ``/v2/reserved_ipv6`` with the ``region_slug`` attribute. :param body: Required. :type body: IO[bytes] @@ -185869,58 +197101,15 @@ def create( Example: .. code-block:: python - # response body for status code(s): 202 + # response body for status code(s): 201 response == { - "links": { - "actions": [ - { - "href": "str", # Optional. A URL that can be used to - access the action. - "id": 0, # Optional. A unique numeric ID that can be - used to identify and reference an action. - "rel": "str" # Optional. A string specifying the - type of the related action. - } - ], - "droplets": [ - { - "href": "str", # Optional. A URL that can be used to - access the action. - "id": 0, # Optional. A unique numeric ID that can be - used to identify and reference an action. - "rel": "str" # Optional. A string specifying the - type of the related action. - } - ] - }, - "reserved_ip": { - "droplet": {}, - "ip": "str", # Optional. The public IP address of the reserved IP. + "reserved_ipv6": { + "ip": "str", # Optional. The public IP address of the reserved IPv6. It also serves as its identifier. - "locked": bool, # Optional. A boolean value indicating whether or - not the reserved IP has pending actions preventing new ones from being - submitted. - "project_id": "str", # Optional. The UUID of the project to which - the reserved IP currently belongs.:code:`
`:code:`
`Requires - ``project:read`` scope. - "region": { - "available": bool, # This is a boolean value that represents - whether new Droplets can be created in this region. Required. - "features": [ - "str" # This attribute is set to an array which - contains features available in this region. Required. - ], - "name": "str", # The display name of the region. This will - be a full name that is used in the control panel and other interfaces. - Required. - "sizes": [ - "str" # This attribute is set to an array which - contains the identifying slugs for the sizes available in this - region. sizes:read is required to view. Required. - ], - "slug": "str" # A human-readable string that is used as a - unique identifier for each region. Required. - } + "region_slug": "str", # Optional. The region that the reserved IPv6 + is reserved to. When you query a reserved IPv6,the region_slug will be + returned. + "reserved_at": "2020-02-20 00:00:00" # Optional. } } """ @@ -185928,18 +197117,13 @@ def create( @distributed_trace def create(self, body: Union[JSON, IO[bytes]], **kwargs: Any) -> JSON: # pylint: disable=line-too-long - """Create a New Reserved IP. - - On creation, a reserved IP must be either assigned to a Droplet or reserved to a region. + """Create a New Reserved IPv6. + On creation, a reserved IPv6 must be reserved to a region. - * - To create a new reserved IP assigned to a Droplet, send a POST - request to ``/v2/reserved_ips`` with the ``droplet_id`` attribute. - * - To create a new reserved IP reserved to a region, send a POST request to - ``/v2/reserved_ips`` with the ``region`` attribute. + * To create a new reserved IPv6 reserved to a region, send a POST request to + ``/v2/reserved_ipv6`` with the ``region_slug`` attribute. :param body: Is either a JSON type or a IO[bytes] type. Required. :type body: JSON or IO[bytes] @@ -185951,60 +197135,20 @@ def create(self, body: Union[JSON, IO[bytes]], **kwargs: Any) -> JSON: .. code-block:: python # JSON input template you can fill out and use as your body input. - body = {} + body = { + "region_slug": "str" # The slug identifier for the region the reserved IPv6 + will be reserved to. Required. + } - # response body for status code(s): 202 + # response body for status code(s): 201 response == { - "links": { - "actions": [ - { - "href": "str", # Optional. A URL that can be used to - access the action. - "id": 0, # Optional. A unique numeric ID that can be - used to identify and reference an action. - "rel": "str" # Optional. A string specifying the - type of the related action. - } - ], - "droplets": [ - { - "href": "str", # Optional. A URL that can be used to - access the action. - "id": 0, # Optional. A unique numeric ID that can be - used to identify and reference an action. - "rel": "str" # Optional. A string specifying the - type of the related action. - } - ] - }, - "reserved_ip": { - "droplet": {}, - "ip": "str", # Optional. The public IP address of the reserved IP. + "reserved_ipv6": { + "ip": "str", # Optional. The public IP address of the reserved IPv6. It also serves as its identifier. - "locked": bool, # Optional. A boolean value indicating whether or - not the reserved IP has pending actions preventing new ones from being - submitted. - "project_id": "str", # Optional. The UUID of the project to which - the reserved IP currently belongs.:code:`
`:code:`
`Requires - ``project:read`` scope. - "region": { - "available": bool, # This is a boolean value that represents - whether new Droplets can be created in this region. Required. - "features": [ - "str" # This attribute is set to an array which - contains features available in this region. Required. - ], - "name": "str", # The display name of the region. This will - be a full name that is used in the control panel and other interfaces. - Required. - "sizes": [ - "str" # This attribute is set to an array which - contains the identifying slugs for the sizes available in this - region. sizes:read is required to view. Required. - ], - "slug": "str" # A human-readable string that is used as a - unique identifier for each region. Required. - } + "region_slug": "str", # Optional. The region that the reserved IPv6 + is reserved to. When you query a reserved IPv6,the region_slug will be + returned. + "reserved_at": "2020-02-20 00:00:00" # Optional. } } """ @@ -186037,7 +197181,7 @@ def create(self, body: Union[JSON, IO[bytes]], **kwargs: Any) -> JSON: else: _json = body - _request = build_reserved_ips_create_request( + _request = build_reserved_ipv6_create_request( content_type=content_type, json=_json, content=_content, @@ -186055,7 +197199,7 @@ def create(self, body: Union[JSON, IO[bytes]], **kwargs: Any) -> JSON: response = pipeline_response.http_response - if response.status_code not in [202]: + if response.status_code not in [201]: if _stream: response.read() # Load the body in memory and close the socket map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore @@ -186083,15 +197227,15 @@ def create(self, body: Union[JSON, IO[bytes]], **kwargs: Any) -> JSON: return cast(JSON, deserialized) # type: ignore @distributed_trace - def get(self, reserved_ip: str, **kwargs: Any) -> JSON: + def get(self, reserved_ipv6: str, **kwargs: Any) -> JSON: # pylint: disable=line-too-long - """Retrieve an Existing Reserved IP. + """Retrieve an Existing Reserved IPv6. - To show information about a reserved IP, send a GET request to - ``/v2/reserved_ips/$RESERVED_IP_ADDR``. + To show information about a reserved IPv6, send a GET request to + ``/v2/reserved_ipv6/$RESERVED_IPV6``. - :param reserved_ip: A reserved IP address. Required. - :type reserved_ip: str + :param reserved_ipv6: A reserved IPv6 address. Required. + :type reserved_ipv6: str :return: JSON object :rtype: JSON :raises ~azure.core.exceptions.HttpResponseError: @@ -186101,34 +197245,15 @@ def get(self, reserved_ip: str, **kwargs: Any) -> JSON: # response body for status code(s): 200 response == { - "reserved_ip": { + "reserved_ipv6": { "droplet": {}, - "ip": "str", # Optional. The public IP address of the reserved IP. + "ip": "str", # Optional. The public IP address of the reserved IPv6. It also serves as its identifier. - "locked": bool, # Optional. A boolean value indicating whether or - not the reserved IP has pending actions preventing new ones from being - submitted. - "project_id": "str", # Optional. The UUID of the project to which - the reserved IP currently belongs.:code:`
`:code:`
`Requires - ``project:read`` scope. - "region": { - "available": bool, # This is a boolean value that represents - whether new Droplets can be created in this region. Required. - "features": [ - "str" # This attribute is set to an array which - contains features available in this region. Required. - ], - "name": "str", # The display name of the region. This will - be a full name that is used in the control panel and other interfaces. - Required. - "sizes": [ - "str" # This attribute is set to an array which - contains the identifying slugs for the sizes available in this - region. sizes:read is required to view. Required. - ], - "slug": "str" # A human-readable string that is used as a - unique identifier for each region. Required. - } + "region_slug": "str", # Optional. The region that the reserved IPv6 + is reserved to. When you query a reserved IPv6,the region_slug will be + returned. + "reserved_at": "2020-02-20 00:00:00" # Optional. The date and time + when the reserved IPv6 was reserved. } } # response body for status code(s): 404 @@ -186161,8 +197286,8 @@ def get(self, reserved_ip: str, **kwargs: Any) -> JSON: cls: ClsType[JSON] = kwargs.pop("cls", None) - _request = build_reserved_ips_get_request( - reserved_ip=reserved_ip, + _request = build_reserved_ipv6_get_request( + reserved_ipv6=reserved_ipv6, headers=_headers, params=_params, ) @@ -186222,18 +197347,18 @@ def get(self, reserved_ip: str, **kwargs: Any) -> JSON: return cast(JSON, deserialized) # type: ignore @distributed_trace - def delete(self, reserved_ip: str, **kwargs: Any) -> Optional[JSON]: + def delete(self, reserved_ipv6: str, **kwargs: Any) -> Optional[JSON]: # pylint: disable=line-too-long - """Delete a Reserved IP. + """Delete a Reserved IPv6. To delete a reserved IP and remove it from your account, send a DELETE request - to ``/v2/reserved_ips/$RESERVED_IP_ADDR``. + to ``/v2/reserved_ipv6/$RESERVED_IPV6``. A successful request will receive a 204 status code with no body in response. This indicates that the request was processed successfully. - :param reserved_ip: A reserved IP address. Required. - :type reserved_ip: str + :param reserved_ipv6: A reserved IPv6 address. Required. + :type reserved_ipv6: str :return: JSON object or None :rtype: JSON or None :raises ~azure.core.exceptions.HttpResponseError: @@ -186241,7 +197366,7 @@ def delete(self, reserved_ip: str, **kwargs: Any) -> Optional[JSON]: Example: .. code-block:: python - # response body for status code(s): 404 + # response body for status code(s): 404, 422 response == { "id": "str", # A short identifier corresponding to the HTTP status code returned. For example, the ID for a response returning a 404 status code would @@ -186271,8 +197396,8 @@ def delete(self, reserved_ip: str, **kwargs: Any) -> Optional[JSON]: cls: ClsType[Optional[JSON]] = kwargs.pop("cls", None) - _request = build_reserved_ips_delete_request( - reserved_ip=reserved_ip, + _request = build_reserved_ipv6_delete_request( + reserved_ipv6=reserved_ipv6, headers=_headers, params=_params, ) @@ -186287,7 +197412,7 @@ def delete(self, reserved_ip: str, **kwargs: Any) -> Optional[JSON]: response = pipeline_response.http_response - if response.status_code not in [204, 404]: + if response.status_code not in [204, 404, 422]: if _stream: response.read() # Load the body in memory and close the socket map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore @@ -186322,20 +197447,36 @@ def delete(self, reserved_ip: str, **kwargs: Any) -> Optional[JSON]: else: deserialized = None + if response.status_code == 422: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.content: + deserialized = response.json() + else: + deserialized = None + if cls: return cls(pipeline_response, deserialized, response_headers) # type: ignore return deserialized # type: ignore -class ReservedIPsActionsOperations: +class ReservedIPv6ActionsOperations: """ .. warning:: **DO NOT** instantiate this class directly. Instead, you should access the following operations through :class:`~pydo.GeneratedClient`'s - :attr:`reserved_ips_actions` attribute. + :attr:`reserved_ipv6_actions` attribute. """ def __init__(self, *args, **kwargs): @@ -186347,16 +197488,40 @@ def __init__(self, *args, **kwargs): input_args.pop(0) if input_args else kwargs.pop("deserializer") ) - @distributed_trace - def list(self, reserved_ip: str, **kwargs: Any) -> JSON: + @overload + def post( + self, + reserved_ipv6: str, + body: Optional[JSON] = None, + *, + content_type: str = "application/json", + **kwargs: Any, + ) -> JSON: # pylint: disable=line-too-long - """List All Actions for a Reserved IP. + """Initiate a Reserved IPv6 Action. - To retrieve all actions that have been executed on a reserved IP, send a GET request to - ``/v2/reserved_ips/$RESERVED_IP/actions``. + To initiate an action on a reserved IPv6 send a POST request to + ``/v2/reserved_ipv6/$RESERVED_IPV6/actions``. In the JSON body to the request, + set the ``type`` attribute to on of the supported action types: - :param reserved_ip: A reserved IP address. Required. - :type reserved_ip: str + .. list-table:: + :header-rows: 1 + + * - Action + - Details + * - ``assign`` + - Assigns a reserved IPv6 to a Droplet + * - ``unassign`` + - Unassign a reserved IPv6 from a Droplet. + + :param reserved_ipv6: A reserved IPv6 address. Required. + :type reserved_ipv6: str + :param body: The ``type`` attribute set in the request body will specify the action that + will be taken on the reserved IPv6. Default value is None. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str :return: JSON object :rtype: JSON :raises ~azure.core.exceptions.HttpResponseError: @@ -186364,57 +197529,50 @@ def list(self, reserved_ip: str, **kwargs: Any) -> JSON: Example: .. code-block:: python - # response body for status code(s): 200 + # JSON input template you can fill out and use as your body input. + body = {} + + # response body for status code(s): 201 response == { - "meta": { - "total": 0 # Optional. Number of objects returned by the request. - }, - "actions": [ - { - "completed_at": "2020-02-20 00:00:00", # Optional. A time - value given in ISO8601 combined date and time format that represents when - the action was completed. - "id": 0, # Optional. A unique numeric ID that can be used to - identify and reference an action. - "region": { - "available": bool, # This is a boolean value that - represents whether new Droplets can be created in this region. - Required. - "features": [ - "str" # This attribute is set to an array - which contains features available in this region. Required. - ], - "name": "str", # The display name of the region. - This will be a full name that is used in the control panel and other - interfaces. Required. - "sizes": [ - "str" # This attribute is set to an array - which contains the identifying slugs for the sizes available in - this region. sizes:read is required to view. Required. - ], - "slug": "str" # A human-readable string that is used - as a unique identifier for each region. Required. - }, - "region_slug": "str", # Optional. A human-readable string - that is used as a unique identifier for each region. - "resource_id": 0, # Optional. A unique identifier for the - resource that the action is associated with. - "resource_type": "str", # Optional. The type of resource - that the action is associated with. - "started_at": "2020-02-20 00:00:00", # Optional. A time - value given in ISO8601 combined date and time format that represents when - the action was initiated. - "status": "in-progress", # Optional. Default value is - "in-progress". The current status of the action. This can be - "in-progress", "completed", or "errored". Known values are: - "in-progress", "completed", and "errored". - "type": "str" # Optional. This is the type of action that - the object represents. For example, this could be "transfer" to represent - the state of an image transfer action. - } - ], - "links": { - "pages": {} + "action": { + "completed_at": "2020-02-20 00:00:00", # Optional. A time value + given in ISO8601 combined date and time format that represents when the + action was completed. + "id": 0, # Optional. A unique numeric ID that can be used to + identify and reference an action. + "region": { + "available": bool, # This is a boolean value that represents + whether new Droplets can be created in this region. Required. + "features": [ + "str" # This attribute is set to an array which + contains features available in this region. Required. + ], + "name": "str", # The display name of the region. This will + be a full name that is used in the control panel and other interfaces. + Required. + "sizes": [ + "str" # This attribute is set to an array which + contains the identifying slugs for the sizes available in this + region. sizes:read is required to view. Required. + ], + "slug": "str" # A human-readable string that is used as a + unique identifier for each region. Required. + }, + "region_slug": "str", # Optional. A human-readable string that is + used as a unique identifier for each region. + "resource_id": 0, # Optional. A unique identifier for the resource + that the action is associated with. + "resource_type": "str", # Optional. The type of resource that the + action is associated with. + "started_at": "2020-02-20 00:00:00", # Optional. A time value given + in ISO8601 combined date and time format that represents when the action was + initiated. + "status": "in-progress", # Optional. Default value is "in-progress". + The current status of the action. This can be "in-progress", "completed", or + "errored". Known values are: "in-progress", "completed", and "errored". + "type": "str" # Optional. This is the type of action that the object + represents. For example, this could be "transfer" to represent the state of + an image transfer action. } } # response body for status code(s): 404 @@ -186429,98 +197587,21 @@ def list(self, reserved_ip: str, **kwargs: Any) -> JSON: tickets to help identify the issue. } """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - 401: cast( - Type[HttpResponseError], - lambda response: ClientAuthenticationError(response=response), - ), - 429: HttpResponseError, - 500: HttpResponseError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = kwargs.pop("headers", {}) or {} - _params = kwargs.pop("params", {}) or {} - - cls: ClsType[JSON] = kwargs.pop("cls", None) - - _request = build_reserved_ips_actions_list_request( - reserved_ip=reserved_ip, - headers=_headers, - params=_params, - ) - _request.url = self._client.format_url(_request.url) - - _stream = False - pipeline_response: PipelineResponse = ( - self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - ) - - response = pipeline_response.http_response - - if response.status_code not in [200, 404]: - if _stream: - response.read() # Load the body in memory and close the socket - map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore - raise HttpResponseError(response=response) - - response_headers = {} - if response.status_code == 200: - response_headers["ratelimit-limit"] = self._deserialize( - "int", response.headers.get("ratelimit-limit") - ) - response_headers["ratelimit-remaining"] = self._deserialize( - "int", response.headers.get("ratelimit-remaining") - ) - response_headers["ratelimit-reset"] = self._deserialize( - "int", response.headers.get("ratelimit-reset") - ) - - if response.content: - deserialized = response.json() - else: - deserialized = None - - if response.status_code == 404: - response_headers["ratelimit-limit"] = self._deserialize( - "int", response.headers.get("ratelimit-limit") - ) - response_headers["ratelimit-remaining"] = self._deserialize( - "int", response.headers.get("ratelimit-remaining") - ) - response_headers["ratelimit-reset"] = self._deserialize( - "int", response.headers.get("ratelimit-reset") - ) - - if response.content: - deserialized = response.json() - else: - deserialized = None - - if cls: - return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore - - return cast(JSON, deserialized) # type: ignore @overload def post( self, - reserved_ip: str, - body: Optional[JSON] = None, + reserved_ipv6: str, + body: Optional[IO[bytes]] = None, *, content_type: str = "application/json", **kwargs: Any, ) -> JSON: # pylint: disable=line-too-long - """Initiate a Reserved IP Action. + """Initiate a Reserved IPv6 Action. - To initiate an action on a reserved IP send a POST request to - ``/v2/reserved_ips/$RESERVED_IP/actions``. In the JSON body to the request, + To initiate an action on a reserved IPv6 send a POST request to + ``/v2/reserved_ipv6/$RESERVED_IPV6/actions``. In the JSON body to the request, set the ``type`` attribute to on of the supported action types: .. list-table:: @@ -186529,16 +197610,16 @@ def post( * - Action - Details * - ``assign`` - - Assigns a reserved IP to a Droplet + - Assigns a reserved IPv6 to a Droplet * - ``unassign`` - - Unassign a reserved IP from a Droplet. + - Unassign a reserved IPv6 from a Droplet. - :param reserved_ip: A reserved IP address. Required. - :type reserved_ip: str + :param reserved_ipv6: A reserved IPv6 address. Required. + :type reserved_ipv6: str :param body: The ``type`` attribute set in the request body will specify the action that - will be taken on the reserved IP. Default value is None. - :type body: JSON - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + will be taken on the reserved IPv6. Default value is None. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. Default value is "application/json". :paramtype content_type: str :return: JSON object @@ -186548,9 +197629,6 @@ def post( Example: .. code-block:: python - # JSON input template you can fill out and use as your body input. - body = {} - # response body for status code(s): 201 response == { "action": { @@ -186559,8 +197637,6 @@ def post( action was completed. "id": 0, # Optional. A unique numeric ID that can be used to identify and reference an action. - "project_id": "str", # Optional. The UUID of the project to which - the reserved IP currently belongs. "region": { "available": bool, # This is a boolean value that represents whether new Droplets can be created in this region. Required. @@ -186609,20 +197685,18 @@ def post( } """ - @overload + @distributed_trace def post( self, - reserved_ip: str, - body: Optional[IO[bytes]] = None, - *, - content_type: str = "application/json", + reserved_ipv6: str, + body: Optional[Union[JSON, IO[bytes]]] = None, **kwargs: Any, ) -> JSON: # pylint: disable=line-too-long - """Initiate a Reserved IP Action. + """Initiate a Reserved IPv6 Action. - To initiate an action on a reserved IP send a POST request to - ``/v2/reserved_ips/$RESERVED_IP/actions``. In the JSON body to the request, + To initiate an action on a reserved IPv6 send a POST request to + ``/v2/reserved_ipv6/$RESERVED_IPV6/actions``. In the JSON body to the request, set the ``type`` attribute to on of the supported action types: .. list-table:: @@ -186631,18 +197705,16 @@ def post( * - Action - Details * - ``assign`` - - Assigns a reserved IP to a Droplet + - Assigns a reserved IPv6 to a Droplet * - ``unassign`` - - Unassign a reserved IP from a Droplet. + - Unassign a reserved IPv6 from a Droplet. - :param reserved_ip: A reserved IP address. Required. - :type reserved_ip: str + :param reserved_ipv6: A reserved IPv6 address. Required. + :type reserved_ipv6: str :param body: The ``type`` attribute set in the request body will specify the action that - will be taken on the reserved IP. Default value is None. - :type body: IO[bytes] - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str + will be taken on the reserved IPv6. Is either a JSON type or a IO[bytes] type. Default value + is None. + :type body: JSON or IO[bytes] :return: JSON object :rtype: JSON :raises ~azure.core.exceptions.HttpResponseError: @@ -186650,6 +197722,9 @@ def post( Example: .. code-block:: python + # JSON input template you can fill out and use as your body input. + body = {} + # response body for status code(s): 201 response == { "action": { @@ -186658,8 +197733,6 @@ def post( action was completed. "id": 0, # Optional. A unique numeric ID that can be used to identify and reference an action. - "project_id": "str", # Optional. The UUID of the project to which - the reserved IP currently belongs. "region": { "available": bool, # This is a boolean value that represents whether new Droplets can be created in this region. Required. @@ -186695,7 +197768,215 @@ def post( an image transfer action. } } - # response body for status code(s): 404 + # response body for status code(s): 404 + response == { + "id": "str", # A short identifier corresponding to the HTTP status code + returned. For example, the ID for a response returning a 404 status code would + be "not_found.". Required. + "message": "str", # A message providing additional information about the + error, including details to help resolve it when possible. Required. + "request_id": "str" # Optional. Optionally, some endpoints may include a + request ID that should be provided when reporting bugs or opening support + tickets to help identify the issue. + } + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + 401: cast( + Type[HttpResponseError], + lambda response: ClientAuthenticationError(response=response), + ), + 429: HttpResponseError, + 500: HttpResponseError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop( + "content_type", _headers.pop("Content-Type", None) + ) + cls: ClsType[JSON] = kwargs.pop("cls", None) + + content_type = content_type or "application/json" + _json = None + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + if body is not None: + _json = body + else: + _json = None + + _request = build_reserved_ipv6_actions_post_request( + reserved_ipv6=reserved_ipv6, + content_type=content_type, + json=_json, + content=_content, + headers=_headers, + params=_params, + ) + _request.url = self._client.format_url(_request.url) + + _stream = False + pipeline_response: PipelineResponse = ( + self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + ) + + response = pipeline_response.http_response + + if response.status_code not in [201, 404]: + if _stream: + response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore + raise HttpResponseError(response=response) + + response_headers = {} + if response.status_code == 201: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.content: + deserialized = response.json() + else: + deserialized = None + + if response.status_code == 404: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.content: + deserialized = response.json() + else: + deserialized = None + + if cls: + return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore + + return cast(JSON, deserialized) # type: ignore + + +class ByoipPrefixesOperations: + """ + .. warning:: + **DO NOT** instantiate this class directly. + + Instead, you should access the following operations through + :class:`~pydo.GeneratedClient`'s + :attr:`byoip_prefixes` attribute. + """ + + def __init__(self, *args, **kwargs): + input_args = list(args) + self._client = input_args.pop(0) if input_args else kwargs.pop("client") + self._config = input_args.pop(0) if input_args else kwargs.pop("config") + self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer") + self._deserialize = ( + input_args.pop(0) if input_args else kwargs.pop("deserializer") + ) + + @overload + def create( + self, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> JSON: + # pylint: disable=line-too-long + """Create a BYOIP Prefix. + + To create a BYOIP prefix, send a POST request to ``/v2/byoip_prefixes``. + + A successful request will initiate the process of bringing your BYOIP Prefix into your account. + The response will include the details of the created prefix, including its UUID and status. + + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: JSON object + :rtype: JSON + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "prefix": "str", # The IP prefix in CIDR notation to bring. Required. + "region": "str", # The region where the prefix will be created. Required. + "signature": "str" # The signature hash for the prefix creation request. + Required. + } + + # response body for status code(s): 202 + response == { + "region": "str", # Optional. The region where the prefix is created. + "status": "str", # Optional. The status of the BYOIP prefix. + "uuid": "str" # Optional. The unique identifier for the BYOIP prefix. + } + # response body for status code(s): 422 + response == { + "id": "str", # A short identifier corresponding to the HTTP status code + returned. For example, the ID for a response returning a 404 status code would + be "not_found.". Required. + "message": "str", # A message providing additional information about the + error, including details to help resolve it when possible. Required. + "request_id": "str" # Optional. Optionally, some endpoints may include a + request ID that should be provided when reporting bugs or opening support + tickets to help identify the issue. + } + """ + + @overload + def create( + self, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> JSON: + # pylint: disable=line-too-long + """Create a BYOIP Prefix. + + To create a BYOIP prefix, send a POST request to ``/v2/byoip_prefixes``. + + A successful request will initiate the process of bringing your BYOIP Prefix into your account. + The response will include the details of the created prefix, including its UUID and status. + + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: JSON object + :rtype: JSON + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 202 + response == { + "region": "str", # Optional. The region where the prefix is created. + "status": "str", # Optional. The status of the BYOIP prefix. + "uuid": "str" # Optional. The unique identifier for the BYOIP prefix. + } + # response body for status code(s): 422 response == { "id": "str", # A short identifier corresponding to the HTTP status code returned. For example, the ID for a response returning a 404 status code would @@ -186709,34 +197990,16 @@ def post( """ @distributed_trace - def post( - self, - reserved_ip: str, - body: Optional[Union[JSON, IO[bytes]]] = None, - **kwargs: Any, - ) -> JSON: + def create(self, body: Union[JSON, IO[bytes]], **kwargs: Any) -> JSON: # pylint: disable=line-too-long - """Initiate a Reserved IP Action. - - To initiate an action on a reserved IP send a POST request to - ``/v2/reserved_ips/$RESERVED_IP/actions``. In the JSON body to the request, - set the ``type`` attribute to on of the supported action types: + """Create a BYOIP Prefix. - .. list-table:: - :header-rows: 1 + To create a BYOIP prefix, send a POST request to ``/v2/byoip_prefixes``. - * - Action - - Details - * - ``assign`` - - Assigns a reserved IP to a Droplet - * - ``unassign`` - - Unassign a reserved IP from a Droplet. + A successful request will initiate the process of bringing your BYOIP Prefix into your account. + The response will include the details of the created prefix, including its UUID and status. - :param reserved_ip: A reserved IP address. Required. - :type reserved_ip: str - :param body: The ``type`` attribute set in the request body will specify the action that - will be taken on the reserved IP. Is either a JSON type or a IO[bytes] type. Default value is - None. + :param body: Is either a JSON type or a IO[bytes] type. Required. :type body: JSON or IO[bytes] :return: JSON object :rtype: JSON @@ -186746,54 +198009,20 @@ def post( .. code-block:: python # JSON input template you can fill out and use as your body input. - body = {} + body = { + "prefix": "str", # The IP prefix in CIDR notation to bring. Required. + "region": "str", # The region where the prefix will be created. Required. + "signature": "str" # The signature hash for the prefix creation request. + Required. + } - # response body for status code(s): 201 + # response body for status code(s): 202 response == { - "action": { - "completed_at": "2020-02-20 00:00:00", # Optional. A time value - given in ISO8601 combined date and time format that represents when the - action was completed. - "id": 0, # Optional. A unique numeric ID that can be used to - identify and reference an action. - "project_id": "str", # Optional. The UUID of the project to which - the reserved IP currently belongs. - "region": { - "available": bool, # This is a boolean value that represents - whether new Droplets can be created in this region. Required. - "features": [ - "str" # This attribute is set to an array which - contains features available in this region. Required. - ], - "name": "str", # The display name of the region. This will - be a full name that is used in the control panel and other interfaces. - Required. - "sizes": [ - "str" # This attribute is set to an array which - contains the identifying slugs for the sizes available in this - region. sizes:read is required to view. Required. - ], - "slug": "str" # A human-readable string that is used as a - unique identifier for each region. Required. - }, - "region_slug": "str", # Optional. A human-readable string that is - used as a unique identifier for each region. - "resource_id": 0, # Optional. A unique identifier for the resource - that the action is associated with. - "resource_type": "str", # Optional. The type of resource that the - action is associated with. - "started_at": "2020-02-20 00:00:00", # Optional. A time value given - in ISO8601 combined date and time format that represents when the action was - initiated. - "status": "in-progress", # Optional. Default value is "in-progress". - The current status of the action. This can be "in-progress", "completed", or - "errored". Known values are: "in-progress", "completed", and "errored". - "type": "str" # Optional. This is the type of action that the object - represents. For example, this could be "transfer" to represent the state of - an image transfer action. - } + "region": "str", # Optional. The region where the prefix is created. + "status": "str", # Optional. The status of the BYOIP prefix. + "uuid": "str" # Optional. The unique identifier for the BYOIP prefix. } - # response body for status code(s): 404 + # response body for status code(s): 422 response == { "id": "str", # A short identifier corresponding to the HTTP status code returned. For example, the ID for a response returning a 404 status code would @@ -186832,13 +198061,9 @@ def post( if isinstance(body, (IOBase, bytes)): _content = body else: - if body is not None: - _json = body - else: - _json = None + _json = body - _request = build_reserved_ips_actions_post_request( - reserved_ip=reserved_ip, + _request = build_byoip_prefixes_create_request( content_type=content_type, json=_json, content=_content, @@ -186856,14 +198081,14 @@ def post( response = pipeline_response.http_response - if response.status_code not in [201, 404]: + if response.status_code not in [202, 422]: if _stream: response.read() # Load the body in memory and close the socket map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore raise HttpResponseError(response=response) response_headers = {} - if response.status_code == 201: + if response.status_code == 202: response_headers["ratelimit-limit"] = self._deserialize( "int", response.headers.get("ratelimit-limit") ) @@ -186879,7 +198104,7 @@ def post( else: deserialized = None - if response.status_code == 404: + if response.status_code == 422: response_headers["ratelimit-limit"] = self._deserialize( "int", response.headers.get("ratelimit-limit") ) @@ -186901,18 +198126,135 @@ def post( return cast(JSON, deserialized) # type: ignore @distributed_trace - def get(self, reserved_ip: str, action_id: int, **kwargs: Any) -> JSON: + def list(self, *, per_page: int = 20, page: int = 1, **kwargs: Any) -> JSON: + """List BYOIP Prefixes. + + To list all BYOIP prefixes, send a GET request to ``/v2/byoip_prefixes``. + A successful response will return a list of all BYOIP prefixes associated with the account. + + :keyword per_page: Number of items returned per page. Default value is 20. + :paramtype per_page: int + :keyword page: Which 'page' of paginated results to return. Default value is 1. + :paramtype page: int + :return: JSON object + :rtype: JSON + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 200 + response == { + "meta": { + "total": 0 # Optional. Number of objects returned by the request. + }, + "byoip_prefixes": [ + { + "advertised": bool, # Optional. Whether the BYOIP prefix is + being advertised. + "failure_reason": "str", # Optional. Reason for failure, if + applicable. + "locked": bool, # Optional. Whether the BYOIP prefix is + locked. + "name": "str", # Optional. Name of the BYOIP prefix. + "prefix": "str", # Optional. The IP prefix in CIDR notation. + "project_id": "str", # Optional. The ID of the project + associated with the BYOIP prefix. + "region": "str", # Optional. Region where the BYOIP prefix + is located. + "status": "str", # Optional. Status of the BYOIP prefix. + "uuid": "str", # Optional. Unique identifier for the BYOIP + prefix. + "validations": [ + { + "name": "str", # Optional. Name of the + validation. + "note": "str", # Optional. Additional notes + or details about the validation. + "status": "str" # Optional. Status of the + validation. + } + ] + } + ], + "links": { + "pages": {} + } + } + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + 401: cast( + Type[HttpResponseError], + lambda response: ClientAuthenticationError(response=response), + ), + 429: HttpResponseError, + 500: HttpResponseError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[JSON] = kwargs.pop("cls", None) + + _request = build_byoip_prefixes_list_request( + per_page=per_page, + page=page, + headers=_headers, + params=_params, + ) + _request.url = self._client.format_url(_request.url) + + _stream = False + pipeline_response: PipelineResponse = ( + self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore + raise HttpResponseError(response=response) + + response_headers = {} + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.content: + deserialized = response.json() + else: + deserialized = None + + if cls: + return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore + + return cast(JSON, deserialized) # type: ignore + + @distributed_trace + def get(self, byoip_prefix_uuid: str, **kwargs: Any) -> JSON: # pylint: disable=line-too-long - """Retrieve an Existing Reserved IP Action. + """Get a BYOIP Prefix. - To retrieve the status of a reserved IP action, send a GET request to - ``/v2/reserved_ips/$RESERVED_IP/actions/$ACTION_ID``. + To get a BYOIP prefix, send a GET request to ``/v2/byoip_prefixes/$byoip_prefix_uuid``. - :param reserved_ip: A reserved IP address. Required. - :type reserved_ip: str - :param action_id: A unique numeric ID that can be used to identify and reference an action. - Required. - :type action_id: int + A successful response will return the details of the specified BYOIP prefix. + + :param byoip_prefix_uuid: The unique identifier for the BYOIP Prefix. Required. + :type byoip_prefix_uuid: str :return: JSON object :rtype: JSON :raises ~azure.core.exceptions.HttpResponseError: @@ -186922,50 +198264,32 @@ def get(self, reserved_ip: str, action_id: int, **kwargs: Any) -> JSON: # response body for status code(s): 200 response == { - "action": { - "completed_at": "2020-02-20 00:00:00", # Optional. A time value - given in ISO8601 combined date and time format that represents when the - action was completed. - "id": 0, # Optional. A unique numeric ID that can be used to - identify and reference an action. - "project_id": "str", # Optional. The UUID of the project to which - the reserved IP currently belongs. - "region": { - "available": bool, # This is a boolean value that represents - whether new Droplets can be created in this region. Required. - "features": [ - "str" # This attribute is set to an array which - contains features available in this region. Required. - ], - "name": "str", # The display name of the region. This will - be a full name that is used in the control panel and other interfaces. - Required. - "sizes": [ - "str" # This attribute is set to an array which - contains the identifying slugs for the sizes available in this - region. sizes:read is required to view. Required. - ], - "slug": "str" # A human-readable string that is used as a - unique identifier for each region. Required. - }, - "region_slug": "str", # Optional. A human-readable string that is - used as a unique identifier for each region. - "resource_id": 0, # Optional. A unique identifier for the resource - that the action is associated with. - "resource_type": "str", # Optional. The type of resource that the - action is associated with. - "started_at": "2020-02-20 00:00:00", # Optional. A time value given - in ISO8601 combined date and time format that represents when the action was - initiated. - "status": "in-progress", # Optional. Default value is "in-progress". - The current status of the action. This can be "in-progress", "completed", or - "errored". Known values are: "in-progress", "completed", and "errored". - "type": "str" # Optional. This is the type of action that the object - represents. For example, this could be "transfer" to represent the state of - an image transfer action. + "byoip_prefix": { + "advertised": bool, # Optional. Whether the BYOIP prefix is being + advertised. + "failure_reason": "str", # Optional. Reason for failure, if + applicable. + "locked": bool, # Optional. Whether the BYOIP prefix is locked. + "name": "str", # Optional. Name of the BYOIP prefix. + "prefix": "str", # Optional. The IP prefix in CIDR notation. + "project_id": "str", # Optional. The ID of the project associated + with the BYOIP prefix. + "region": "str", # Optional. Region where the BYOIP prefix is + located. + "status": "str", # Optional. Status of the BYOIP prefix. + "uuid": "str", # Optional. Unique identifier for the BYOIP prefix. + "validations": [ + { + "name": "str", # Optional. Name of the validation. + "note": "str", # Optional. Additional notes or + details about the validation. + "status": "str" # Optional. Status of the + validation. + } + ] } } - # response body for status code(s): 404 + # response body for status code(s): 404, 422 response == { "id": "str", # A short identifier corresponding to the HTTP status code returned. For example, the ID for a response returning a 404 status code would @@ -186995,9 +198319,8 @@ def get(self, reserved_ip: str, action_id: int, **kwargs: Any) -> JSON: cls: ClsType[JSON] = kwargs.pop("cls", None) - _request = build_reserved_ips_actions_get_request( - reserved_ip=reserved_ip, - action_id=action_id, + _request = build_byoip_prefixes_get_request( + byoip_prefix_uuid=byoip_prefix_uuid, headers=_headers, params=_params, ) @@ -187012,7 +198335,7 @@ def get(self, reserved_ip: str, action_id: int, **kwargs: Any) -> JSON: response = pipeline_response.http_response - if response.status_code not in [200, 404]: + if response.status_code not in [200, 404, 422]: if _stream: response.read() # Load the body in memory and close the socket map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore @@ -187051,69 +198374,57 @@ def get(self, reserved_ip: str, action_id: int, **kwargs: Any) -> JSON: else: deserialized = None + if response.status_code == 422: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.content: + deserialized = response.json() + else: + deserialized = None + if cls: return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore return cast(JSON, deserialized) # type: ignore - -class ReservedIPv6Operations: - """ - .. warning:: - **DO NOT** instantiate this class directly. - - Instead, you should access the following operations through - :class:`~pydo.GeneratedClient`'s - :attr:`reserved_ipv6` attribute. - """ - - def __init__(self, *args, **kwargs): - input_args = list(args) - self._client = input_args.pop(0) if input_args else kwargs.pop("client") - self._config = input_args.pop(0) if input_args else kwargs.pop("config") - self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer") - self._deserialize = ( - input_args.pop(0) if input_args else kwargs.pop("deserializer") - ) - @distributed_trace - def list(self, *, per_page: int = 20, page: int = 1, **kwargs: Any) -> JSON: + def delete(self, byoip_prefix_uuid: str, **kwargs: Any) -> Optional[JSON]: # pylint: disable=line-too-long - """List All Reserved IPv6s. + """Delete a BYOIP Prefix. - To list all of the reserved IPv6s available on your account, send a GET request to - ``/v2/reserved_ipv6``. + To delete a BYOIP prefix and remove it from your account, send a DELETE request + to ``/v2/byoip_prefixes/$byoip_prefix_uuid``. - :keyword per_page: Number of items returned per page. Default value is 20. - :paramtype per_page: int - :keyword page: Which 'page' of paginated results to return. Default value is 1. - :paramtype page: int - :return: JSON object - :rtype: JSON + A successful request will receive a 202 status code with no body in response. + This indicates that the request was accepted and the prefix is being deleted. + + :param byoip_prefix_uuid: The unique identifier for the BYOIP Prefix. Required. + :type byoip_prefix_uuid: str + :return: JSON object or None + :rtype: JSON or None :raises ~azure.core.exceptions.HttpResponseError: Example: .. code-block:: python - # response body for status code(s): 200 + # response body for status code(s): 404, 422 response == { - "meta": { - "total": 0 # Optional. Number of objects returned by the request. - }, - "links": { - "pages": {} - }, - "reserved_ipv6s": [ - { - "droplet": {}, - "ip": "str", # Optional. The public IP address of the - reserved IPv6. It also serves as its identifier. - "region_slug": "str", # Optional. The region that the - reserved IPv6 is reserved to. When you query a reserved IPv6,the - region_slug will be returned. - "reserved_at": "2020-02-20 00:00:00" # Optional. - } - ] + "id": "str", # A short identifier corresponding to the HTTP status code + returned. For example, the ID for a response returning a 404 status code would + be "not_found.". Required. + "message": "str", # A message providing additional information about the + error, including details to help resolve it when possible. Required. + "request_id": "str" # Optional. Optionally, some endpoints may include a + request ID that should be provided when reporting bugs or opening support + tickets to help identify the issue. } """ error_map: MutableMapping[int, Type[HttpResponseError]] = { @@ -187132,11 +198443,10 @@ def list(self, *, per_page: int = 20, page: int = 1, **kwargs: Any) -> JSON: _headers = kwargs.pop("headers", {}) or {} _params = kwargs.pop("params", {}) or {} - cls: ClsType[JSON] = kwargs.pop("cls", None) + cls: ClsType[Optional[JSON]] = kwargs.pop("cls", None) - _request = build_reserved_ipv6_list_request( - per_page=per_page, - page=page, + _request = build_byoip_prefixes_delete_request( + byoip_prefix_uuid=byoip_prefix_uuid, headers=_headers, params=_params, ) @@ -187151,46 +198461,81 @@ def list(self, *, per_page: int = 20, page: int = 1, **kwargs: Any) -> JSON: response = pipeline_response.http_response - if response.status_code not in [200]: + if response.status_code not in [202, 404, 422]: if _stream: response.read() # Load the body in memory and close the socket map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore raise HttpResponseError(response=response) + deserialized = None response_headers = {} - response_headers["ratelimit-limit"] = self._deserialize( - "int", response.headers.get("ratelimit-limit") - ) - response_headers["ratelimit-remaining"] = self._deserialize( - "int", response.headers.get("ratelimit-remaining") - ) - response_headers["ratelimit-reset"] = self._deserialize( - "int", response.headers.get("ratelimit-reset") - ) + if response.status_code == 202: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) - if response.content: - deserialized = response.json() - else: - deserialized = None + if response.status_code == 404: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.content: + deserialized = response.json() + else: + deserialized = None + + if response.status_code == 422: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.content: + deserialized = response.json() + else: + deserialized = None if cls: - return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore + return cls(pipeline_response, deserialized, response_headers) # type: ignore - return cast(JSON, deserialized) # type: ignore + return deserialized # type: ignore @overload - def create( - self, body: JSON, *, content_type: str = "application/json", **kwargs: Any + def patch( + self, + byoip_prefix_uuid: str, + body: JSON, + *, + content_type: str = "application/json", + **kwargs: Any, ) -> JSON: # pylint: disable=line-too-long - """Create a New Reserved IPv6. - - On creation, a reserved IPv6 must be reserved to a region. + """Update a BYOIP Prefix. + To update a BYOIP prefix, send a PATCH request to ``/v2/byoip_prefixes/$byoip_prefix_uuid``. - * To create a new reserved IPv6 reserved to a region, send a POST request to - ``/v2/reserved_ipv6`` with the ``region_slug`` attribute. + Currently, you can update the advertisement status of the prefix. + The response will include the updated details of the prefix. + :param byoip_prefix_uuid: A unique identifier for a BYOIP prefix. Required. + :type byoip_prefix_uuid: str :param body: Required. :type body: JSON :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. @@ -187205,36 +198550,68 @@ def create( # JSON input template you can fill out and use as your body input. body = { - "region_slug": "str" # The slug identifier for the region the reserved IPv6 - will be reserved to. Required. + "advertise": bool # Optional. Whether the BYOIP prefix should be advertised. } - # response body for status code(s): 201 + # response body for status code(s): 202 response == { - "reserved_ipv6": { - "ip": "str", # Optional. The public IP address of the reserved IPv6. - It also serves as its identifier. - "region_slug": "str", # Optional. The region that the reserved IPv6 - is reserved to. When you query a reserved IPv6,the region_slug will be - returned. - "reserved_at": "2020-02-20 00:00:00" # Optional. + "byoip_prefix": { + "advertised": bool, # Optional. Whether the BYOIP prefix is being + advertised. + "failure_reason": "str", # Optional. Reason for failure, if + applicable. + "locked": bool, # Optional. Whether the BYOIP prefix is locked. + "name": "str", # Optional. Name of the BYOIP prefix. + "prefix": "str", # Optional. The IP prefix in CIDR notation. + "project_id": "str", # Optional. The ID of the project associated + with the BYOIP prefix. + "region": "str", # Optional. Region where the BYOIP prefix is + located. + "status": "str", # Optional. Status of the BYOIP prefix. + "uuid": "str", # Optional. Unique identifier for the BYOIP prefix. + "validations": [ + { + "name": "str", # Optional. Name of the validation. + "note": "str", # Optional. Additional notes or + details about the validation. + "status": "str" # Optional. Status of the + validation. + } + ] } } + # response body for status code(s): 404, 422 + response == { + "id": "str", # A short identifier corresponding to the HTTP status code + returned. For example, the ID for a response returning a 404 status code would + be "not_found.". Required. + "message": "str", # A message providing additional information about the + error, including details to help resolve it when possible. Required. + "request_id": "str" # Optional. Optionally, some endpoints may include a + request ID that should be provided when reporting bugs or opening support + tickets to help identify the issue. + } """ @overload - def create( - self, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + def patch( + self, + byoip_prefix_uuid: str, + body: IO[bytes], + *, + content_type: str = "application/json", + **kwargs: Any, ) -> JSON: # pylint: disable=line-too-long - """Create a New Reserved IPv6. - - On creation, a reserved IPv6 must be reserved to a region. + """Update a BYOIP Prefix. + To update a BYOIP prefix, send a PATCH request to ``/v2/byoip_prefixes/$byoip_prefix_uuid``. - * To create a new reserved IPv6 reserved to a region, send a POST request to - ``/v2/reserved_ipv6`` with the ``region_slug`` attribute. + Currently, you can update the advertisement status of the prefix. + The response will include the updated details of the prefix. + :param byoip_prefix_uuid: A unique identifier for a BYOIP prefix. Required. + :type byoip_prefix_uuid: str :param body: Required. :type body: IO[bytes] :keyword content_type: Body Parameter content-type. Content type parameter for binary body. @@ -187247,30 +198624,60 @@ def create( Example: .. code-block:: python - # response body for status code(s): 201 + # response body for status code(s): 202 response == { - "reserved_ipv6": { - "ip": "str", # Optional. The public IP address of the reserved IPv6. - It also serves as its identifier. - "region_slug": "str", # Optional. The region that the reserved IPv6 - is reserved to. When you query a reserved IPv6,the region_slug will be - returned. - "reserved_at": "2020-02-20 00:00:00" # Optional. + "byoip_prefix": { + "advertised": bool, # Optional. Whether the BYOIP prefix is being + advertised. + "failure_reason": "str", # Optional. Reason for failure, if + applicable. + "locked": bool, # Optional. Whether the BYOIP prefix is locked. + "name": "str", # Optional. Name of the BYOIP prefix. + "prefix": "str", # Optional. The IP prefix in CIDR notation. + "project_id": "str", # Optional. The ID of the project associated + with the BYOIP prefix. + "region": "str", # Optional. Region where the BYOIP prefix is + located. + "status": "str", # Optional. Status of the BYOIP prefix. + "uuid": "str", # Optional. Unique identifier for the BYOIP prefix. + "validations": [ + { + "name": "str", # Optional. Name of the validation. + "note": "str", # Optional. Additional notes or + details about the validation. + "status": "str" # Optional. Status of the + validation. + } + ] } } + # response body for status code(s): 404, 422 + response == { + "id": "str", # A short identifier corresponding to the HTTP status code + returned. For example, the ID for a response returning a 404 status code would + be "not_found.". Required. + "message": "str", # A message providing additional information about the + error, including details to help resolve it when possible. Required. + "request_id": "str" # Optional. Optionally, some endpoints may include a + request ID that should be provided when reporting bugs or opening support + tickets to help identify the issue. + } """ @distributed_trace - def create(self, body: Union[JSON, IO[bytes]], **kwargs: Any) -> JSON: + def patch( + self, byoip_prefix_uuid: str, body: Union[JSON, IO[bytes]], **kwargs: Any + ) -> JSON: # pylint: disable=line-too-long - """Create a New Reserved IPv6. - - On creation, a reserved IPv6 must be reserved to a region. + """Update a BYOIP Prefix. + To update a BYOIP prefix, send a PATCH request to ``/v2/byoip_prefixes/$byoip_prefix_uuid``. - * To create a new reserved IPv6 reserved to a region, send a POST request to - ``/v2/reserved_ipv6`` with the ``region_slug`` attribute. + Currently, you can update the advertisement status of the prefix. + The response will include the updated details of the prefix. + :param byoip_prefix_uuid: A unique identifier for a BYOIP prefix. Required. + :type byoip_prefix_uuid: str :param body: Is either a JSON type or a IO[bytes] type. Required. :type body: JSON or IO[bytes] :return: JSON object @@ -187282,21 +198689,47 @@ def create(self, body: Union[JSON, IO[bytes]], **kwargs: Any) -> JSON: # JSON input template you can fill out and use as your body input. body = { - "region_slug": "str" # The slug identifier for the region the reserved IPv6 - will be reserved to. Required. + "advertise": bool # Optional. Whether the BYOIP prefix should be advertised. } - # response body for status code(s): 201 + # response body for status code(s): 202 response == { - "reserved_ipv6": { - "ip": "str", # Optional. The public IP address of the reserved IPv6. - It also serves as its identifier. - "region_slug": "str", # Optional. The region that the reserved IPv6 - is reserved to. When you query a reserved IPv6,the region_slug will be - returned. - "reserved_at": "2020-02-20 00:00:00" # Optional. + "byoip_prefix": { + "advertised": bool, # Optional. Whether the BYOIP prefix is being + advertised. + "failure_reason": "str", # Optional. Reason for failure, if + applicable. + "locked": bool, # Optional. Whether the BYOIP prefix is locked. + "name": "str", # Optional. Name of the BYOIP prefix. + "prefix": "str", # Optional. The IP prefix in CIDR notation. + "project_id": "str", # Optional. The ID of the project associated + with the BYOIP prefix. + "region": "str", # Optional. Region where the BYOIP prefix is + located. + "status": "str", # Optional. Status of the BYOIP prefix. + "uuid": "str", # Optional. Unique identifier for the BYOIP prefix. + "validations": [ + { + "name": "str", # Optional. Name of the validation. + "note": "str", # Optional. Additional notes or + details about the validation. + "status": "str" # Optional. Status of the + validation. + } + ] } } + # response body for status code(s): 404, 422 + response == { + "id": "str", # A short identifier corresponding to the HTTP status code + returned. For example, the ID for a response returning a 404 status code would + be "not_found.". Required. + "message": "str", # A message providing additional information about the + error, including details to help resolve it when possible. Required. + "request_id": "str" # Optional. Optionally, some endpoints may include a + request ID that should be provided when reporting bugs or opening support + tickets to help identify the issue. + } """ error_map: MutableMapping[int, Type[HttpResponseError]] = { 404: ResourceNotFoundError, @@ -187327,7 +198760,8 @@ def create(self, body: Union[JSON, IO[bytes]], **kwargs: Any) -> JSON: else: _json = body - _request = build_reserved_ipv6_create_request( + _request = build_byoip_prefixes_patch_request( + byoip_prefix_uuid=byoip_prefix_uuid, content_type=content_type, json=_json, content=_content, @@ -187345,27 +198779,60 @@ def create(self, body: Union[JSON, IO[bytes]], **kwargs: Any) -> JSON: response = pipeline_response.http_response - if response.status_code not in [201]: + if response.status_code not in [202, 404, 422]: if _stream: response.read() # Load the body in memory and close the socket map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore raise HttpResponseError(response=response) response_headers = {} - response_headers["ratelimit-limit"] = self._deserialize( - "int", response.headers.get("ratelimit-limit") - ) - response_headers["ratelimit-remaining"] = self._deserialize( - "int", response.headers.get("ratelimit-remaining") - ) - response_headers["ratelimit-reset"] = self._deserialize( - "int", response.headers.get("ratelimit-reset") - ) + if response.status_code == 202: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) - if response.content: - deserialized = response.json() - else: - deserialized = None + if response.content: + deserialized = response.json() + else: + deserialized = None + + if response.status_code == 404: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.content: + deserialized = response.json() + else: + deserialized = None + + if response.status_code == 422: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.content: + deserialized = response.json() + else: + deserialized = None if cls: return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore @@ -187373,15 +198840,29 @@ def create(self, body: Union[JSON, IO[bytes]], **kwargs: Any) -> JSON: return cast(JSON, deserialized) # type: ignore @distributed_trace - def get(self, reserved_ipv6: str, **kwargs: Any) -> JSON: + def list_resources( + self, + byoip_prefix_uuid: str, + *, + per_page: int = 20, + page: int = 1, + **kwargs: Any, + ) -> JSON: # pylint: disable=line-too-long - """Retrieve an Existing Reserved IPv6. + """List BYOIP Prefix Resources. - To show information about a reserved IPv6, send a GET request to - ``/v2/reserved_ipv6/$RESERVED_IPV6``. + To list resources associated with BYOIP prefixes, send a GET request to + ``/v2/byoip_prefixes/{byoip_prefix_uuid}/ips``. - :param reserved_ipv6: A reserved IPv6 address. Required. - :type reserved_ipv6: str + A successful response will return a list of resources associated with the specified BYOIP + prefix. + + :param byoip_prefix_uuid: The unique identifier for the BYOIP Prefix. Required. + :type byoip_prefix_uuid: str + :keyword per_page: Number of items returned per page. Default value is 20. + :paramtype per_page: int + :keyword page: Which 'page' of paginated results to return. Default value is 1. + :paramtype page: int :return: JSON object :rtype: JSON :raises ~azure.core.exceptions.HttpResponseError: @@ -187391,15 +198872,23 @@ def get(self, reserved_ipv6: str, **kwargs: Any) -> JSON: # response body for status code(s): 200 response == { - "reserved_ipv6": { - "droplet": {}, - "ip": "str", # Optional. The public IP address of the reserved IPv6. - It also serves as its identifier. - "region_slug": "str", # Optional. The region that the reserved IPv6 - is reserved to. When you query a reserved IPv6,the region_slug will be - returned. - "reserved_at": "2020-02-20 00:00:00" # Optional. The date and time - when the reserved IPv6 was reserved. + "meta": { + "total": 0 # Optional. Number of objects returned by the request. + }, + "ips": [ + { + "assigned_at": "2020-02-20 00:00:00", # Optional. Time when + the allocation was assigned. + "byoip": "str", # Optional. The BYOIP prefix UUID. + "id": 0, # Optional. Unique identifier for the allocation. + "region": "str", # Optional. Region where the allocation is + made. + "resource": "str" # Optional. The resource associated with + the allocation. + } + ], + "links": { + "pages": {} } } # response body for status code(s): 404 @@ -187432,8 +198921,10 @@ def get(self, reserved_ipv6: str, **kwargs: Any) -> JSON: cls: ClsType[JSON] = kwargs.pop("cls", None) - _request = build_reserved_ipv6_get_request( - reserved_ipv6=reserved_ipv6, + _request = build_byoip_prefixes_list_resources_request( + byoip_prefix_uuid=byoip_prefix_uuid, + per_page=per_page, + page=page, headers=_headers, params=_params, ) @@ -187492,27 +198983,94 @@ def get(self, reserved_ipv6: str, **kwargs: Any) -> JSON: return cast(JSON, deserialized) # type: ignore + +class SecurityOperations: + """ + .. warning:: + **DO NOT** instantiate this class directly. + + Instead, you should access the following operations through + :class:`~pydo.GeneratedClient`'s + :attr:`security` attribute. + """ + + def __init__(self, *args, **kwargs): + input_args = list(args) + self._client = input_args.pop(0) if input_args else kwargs.pop("client") + self._config = input_args.pop(0) if input_args else kwargs.pop("config") + self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer") + self._deserialize = ( + input_args.pop(0) if input_args else kwargs.pop("deserializer") + ) + @distributed_trace - def delete(self, reserved_ipv6: str, **kwargs: Any) -> Optional[JSON]: + def list_scans(self, *, per_page: int = 20, page: int = 1, **kwargs: Any) -> JSON: # pylint: disable=line-too-long - """Delete a Reserved IPv6. - - To delete a reserved IP and remove it from your account, send a DELETE request - to ``/v2/reserved_ipv6/$RESERVED_IPV6``. + """List Scans. - A successful request will receive a 204 status code with no body in response. - This indicates that the request was processed successfully. + To list all CSPM scans, send a GET request to ``/v2/security/scans``. - :param reserved_ipv6: A reserved IPv6 address. Required. - :type reserved_ipv6: str - :return: JSON object or None - :rtype: JSON or None + :keyword per_page: Number of items returned per page. Default value is 20. + :paramtype per_page: int + :keyword page: Which 'page' of paginated results to return. Default value is 1. + :paramtype page: int + :return: JSON object + :rtype: JSON :raises ~azure.core.exceptions.HttpResponseError: Example: .. code-block:: python - # response body for status code(s): 404, 422 + # response body for status code(s): 200 + response == { + "meta": { + "total": 0 # Optional. Number of objects returned by the request. + }, + "links": { + "pages": {} + }, + "scans": [ + { + "created_at": "2020-02-20 00:00:00", # Optional. When scan + was created. + "findings": [ + { + "affected_resources_count": 0, # Optional. + The number of affected resources for the finding. + "business_impact": "str", # Optional. A + description of the business impact of the finding. + "details": "str", # Optional. A description + of the risk associated with the finding. + "found_at": "2020-02-20 00:00:00", # + Optional. When the finding was discovered. + "mitigation_steps": [ + { + "description": "str", # + Optional. description. + "step": 0, # Optional. step. + "title": "str" # Optional. + title. + } + ], + "name": "str", # Optional. The name of the + rule that triggered the finding. + "rule_uuid": "str", # Optional. The unique + identifier for the rule that triggered the finding. + "severity": "str", # Optional. The severity + of the finding. Known values are: "CRITICAL", "HIGH", "MEDIUM", + and "LOW". + "technical_details": "str" # Optional. A + description of the technical details related to the finding. + } + ], + "id": "str", # Optional. The unique identifier for the scan. + "status": "str" # Optional. The status of the scan. Known + values are: "IN_PROGRESS", "COMPLETED", "FAILED", "CSPM_NOT_ENABLED", and + "SCAN_NOT_RUN". + } + ] + } + # response body for status code(s): 404 response == { "id": "str", # A short identifier corresponding to the HTTP status code returned. For example, the ID for a response returning a 404 status code would @@ -187540,10 +199098,11 @@ def delete(self, reserved_ipv6: str, **kwargs: Any) -> Optional[JSON]: _headers = kwargs.pop("headers", {}) or {} _params = kwargs.pop("params", {}) or {} - cls: ClsType[Optional[JSON]] = kwargs.pop("cls", None) + cls: ClsType[JSON] = kwargs.pop("cls", None) - _request = build_reserved_ipv6_delete_request( - reserved_ipv6=reserved_ipv6, + _request = build_security_list_scans_request( + per_page=per_page, + page=page, headers=_headers, params=_params, ) @@ -187558,26 +199117,14 @@ def delete(self, reserved_ipv6: str, **kwargs: Any) -> Optional[JSON]: response = pipeline_response.http_response - if response.status_code not in [204, 404, 422]: + if response.status_code not in [200, 404]: if _stream: response.read() # Load the body in memory and close the socket map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore raise HttpResponseError(response=response) - deserialized = None response_headers = {} - if response.status_code == 204: - response_headers["ratelimit-limit"] = self._deserialize( - "int", response.headers.get("ratelimit-limit") - ) - response_headers["ratelimit-remaining"] = self._deserialize( - "int", response.headers.get("ratelimit-remaining") - ) - response_headers["ratelimit-reset"] = self._deserialize( - "int", response.headers.get("ratelimit-reset") - ) - - if response.status_code == 404: + if response.status_code == 200: response_headers["ratelimit-limit"] = self._deserialize( "int", response.headers.get("ratelimit-limit") ) @@ -187593,7 +199140,7 @@ def delete(self, reserved_ipv6: str, **kwargs: Any) -> Optional[JSON]: else: deserialized = None - if response.status_code == 422: + if response.status_code == 404: response_headers["ratelimit-limit"] = self._deserialize( "int", response.headers.get("ratelimit-limit") ) @@ -187610,64 +199157,17 @@ def delete(self, reserved_ipv6: str, **kwargs: Any) -> Optional[JSON]: deserialized = None if cls: - return cls(pipeline_response, deserialized, response_headers) # type: ignore - - return deserialized # type: ignore - - -class ReservedIPv6ActionsOperations: - """ - .. warning:: - **DO NOT** instantiate this class directly. - - Instead, you should access the following operations through - :class:`~pydo.GeneratedClient`'s - :attr:`reserved_ipv6_actions` attribute. - """ + return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore - def __init__(self, *args, **kwargs): - input_args = list(args) - self._client = input_args.pop(0) if input_args else kwargs.pop("client") - self._config = input_args.pop(0) if input_args else kwargs.pop("config") - self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer") - self._deserialize = ( - input_args.pop(0) if input_args else kwargs.pop("deserializer") - ) + return cast(JSON, deserialized) # type: ignore - @overload - def post( - self, - reserved_ipv6: str, - body: Optional[JSON] = None, - *, - content_type: str = "application/json", - **kwargs: Any, - ) -> JSON: + @distributed_trace + def create_scan(self, **kwargs: Any) -> JSON: # pylint: disable=line-too-long - """Initiate a Reserved IPv6 Action. - - To initiate an action on a reserved IPv6 send a POST request to - ``/v2/reserved_ipv6/$RESERVED_IPV6/actions``. In the JSON body to the request, - set the ``type`` attribute to on of the supported action types: - - .. list-table:: - :header-rows: 1 + """Create Scan. - * - Action - - Details - * - ``assign`` - - Assigns a reserved IPv6 to a Droplet - * - ``unassign`` - - Unassign a reserved IPv6 from a Droplet. + To create a CSPM scan, send a POST request to ``/v2/security/scans``. - :param reserved_ipv6: A reserved IPv6 address. Required. - :type reserved_ipv6: str - :param body: The ``type`` attribute set in the request body will specify the action that - will be taken on the reserved IPv6. Default value is None. - :type body: JSON - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str :return: JSON object :rtype: JSON :raises ~azure.core.exceptions.HttpResponseError: @@ -187675,53 +199175,46 @@ def post( Example: .. code-block:: python - # JSON input template you can fill out and use as your body input. - body = {} - # response body for status code(s): 201 response == { - "action": { - "completed_at": "2020-02-20 00:00:00", # Optional. A time value - given in ISO8601 combined date and time format that represents when the - action was completed. - "id": 0, # Optional. A unique numeric ID that can be used to - identify and reference an action. - "region": { - "available": bool, # This is a boolean value that represents - whether new Droplets can be created in this region. Required. - "features": [ - "str" # This attribute is set to an array which - contains features available in this region. Required. - ], - "name": "str", # The display name of the region. This will - be a full name that is used in the control panel and other interfaces. - Required. - "sizes": [ - "str" # This attribute is set to an array which - contains the identifying slugs for the sizes available in this - region. sizes:read is required to view. Required. - ], - "slug": "str" # A human-readable string that is used as a - unique identifier for each region. Required. - }, - "region_slug": "str", # Optional. A human-readable string that is - used as a unique identifier for each region. - "resource_id": 0, # Optional. A unique identifier for the resource - that the action is associated with. - "resource_type": "str", # Optional. The type of resource that the - action is associated with. - "started_at": "2020-02-20 00:00:00", # Optional. A time value given - in ISO8601 combined date and time format that represents when the action was - initiated. - "status": "in-progress", # Optional. Default value is "in-progress". - The current status of the action. This can be "in-progress", "completed", or - "errored". Known values are: "in-progress", "completed", and "errored". - "type": "str" # Optional. This is the type of action that the object - represents. For example, this could be "transfer" to represent the state of - an image transfer action. + "scan": { + "created_at": "2020-02-20 00:00:00", # Optional. When scan was + created. + "findings": [ + { + "affected_resources_count": 0, # Optional. The + number of affected resources for the finding. + "business_impact": "str", # Optional. A description + of the business impact of the finding. + "details": "str", # Optional. A description of the + risk associated with the finding. + "found_at": "2020-02-20 00:00:00", # Optional. When + the finding was discovered. + "mitigation_steps": [ + { + "description": "str", # Optional. + description. + "step": 0, # Optional. step. + "title": "str" # Optional. title. + } + ], + "name": "str", # Optional. The name of the rule that + triggered the finding. + "rule_uuid": "str", # Optional. The unique + identifier for the rule that triggered the finding. + "severity": "str", # Optional. The severity of the + finding. Known values are: "CRITICAL", "HIGH", "MEDIUM", and "LOW". + "technical_details": "str" # Optional. A description + of the technical details related to the finding. + } + ], + "id": "str", # Optional. The unique identifier for the scan. + "status": "str" # Optional. The status of the scan. Known values + are: "IN_PROGRESS", "COMPLETED", "FAILED", "CSPM_NOT_ENABLED", and + "SCAN_NOT_RUN". } } - # response body for status code(s): 404 + # response body for status code(s): 400, 404 response == { "id": "str", # A short identifier corresponding to the HTTP status code returned. For example, the ID for a response returning a 404 status code would @@ -187733,41 +199226,126 @@ def post( tickets to help identify the issue. } """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + 401: cast( + Type[HttpResponseError], + lambda response: ClientAuthenticationError(response=response), + ), + 429: HttpResponseError, + 500: HttpResponseError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[JSON] = kwargs.pop("cls", None) + + _request = build_security_create_scan_request( + headers=_headers, + params=_params, + ) + _request.url = self._client.format_url(_request.url) + + _stream = False + pipeline_response: PipelineResponse = ( + self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + ) + + response = pipeline_response.http_response + + if response.status_code not in [201, 400, 404]: + if _stream: + response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore + raise HttpResponseError(response=response) + + response_headers = {} + if response.status_code == 201: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) - @overload - def post( + if response.content: + deserialized = response.json() + else: + deserialized = None + + if response.status_code == 400: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.content: + deserialized = response.json() + else: + deserialized = None + + if response.status_code == 404: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.content: + deserialized = response.json() + else: + deserialized = None + + if cls: + return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore + + return cast(JSON, deserialized) # type: ignore + + @distributed_trace + def get_scan( self, - reserved_ipv6: str, - body: Optional[IO[bytes]] = None, + scan_id: str, *, - content_type: str = "application/json", + severity: Optional[str] = None, + per_page: int = 20, + page: int = 1, + type: Optional[str] = None, **kwargs: Any, ) -> JSON: # pylint: disable=line-too-long - """Initiate a Reserved IPv6 Action. - - To initiate an action on a reserved IPv6 send a POST request to - ``/v2/reserved_ipv6/$RESERVED_IPV6/actions``. In the JSON body to the request, - set the ``type`` attribute to on of the supported action types: - - .. list-table:: - :header-rows: 1 + """Get Scan. - * - Action - - Details - * - ``assign`` - - Assigns a reserved IPv6 to a Droplet - * - ``unassign`` - - Unassign a reserved IPv6 from a Droplet. + To get a CSPM scan by ID, send a GET request to ``/v2/security/scans/{scan_id}``. - :param reserved_ipv6: A reserved IPv6 address. Required. - :type reserved_ipv6: str - :param body: The ``type`` attribute set in the request body will specify the action that - will be taken on the reserved IPv6. Default value is None. - :type body: IO[bytes] - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str + :param scan_id: The scan UUID. Required. + :type scan_id: str + :keyword severity: The finding severity level to include. Known values are: "LOW", "MEDIUM", + "HIGH", and "CRITICAL". Default value is None. + :paramtype severity: str + :keyword per_page: Number of items returned per page. Default value is 20. + :paramtype per_page: int + :keyword page: Which 'page' of paginated results to return. Default value is 1. + :paramtype page: int + :keyword type: The finding type to include. Default value is None. + :paramtype type: str :return: JSON object :rtype: JSON :raises ~azure.core.exceptions.HttpResponseError: @@ -187775,47 +199353,43 @@ def post( Example: .. code-block:: python - # response body for status code(s): 201 + # response body for status code(s): 200 response == { - "action": { - "completed_at": "2020-02-20 00:00:00", # Optional. A time value - given in ISO8601 combined date and time format that represents when the - action was completed. - "id": 0, # Optional. A unique numeric ID that can be used to - identify and reference an action. - "region": { - "available": bool, # This is a boolean value that represents - whether new Droplets can be created in this region. Required. - "features": [ - "str" # This attribute is set to an array which - contains features available in this region. Required. - ], - "name": "str", # The display name of the region. This will - be a full name that is used in the control panel and other interfaces. - Required. - "sizes": [ - "str" # This attribute is set to an array which - contains the identifying slugs for the sizes available in this - region. sizes:read is required to view. Required. - ], - "slug": "str" # A human-readable string that is used as a - unique identifier for each region. Required. - }, - "region_slug": "str", # Optional. A human-readable string that is - used as a unique identifier for each region. - "resource_id": 0, # Optional. A unique identifier for the resource - that the action is associated with. - "resource_type": "str", # Optional. The type of resource that the - action is associated with. - "started_at": "2020-02-20 00:00:00", # Optional. A time value given - in ISO8601 combined date and time format that represents when the action was - initiated. - "status": "in-progress", # Optional. Default value is "in-progress". - The current status of the action. This can be "in-progress", "completed", or - "errored". Known values are: "in-progress", "completed", and "errored". - "type": "str" # Optional. This is the type of action that the object - represents. For example, this could be "transfer" to represent the state of - an image transfer action. + "scan": { + "created_at": "2020-02-20 00:00:00", # Optional. When scan was + created. + "findings": [ + { + "affected_resources_count": 0, # Optional. The + number of affected resources for the finding. + "business_impact": "str", # Optional. A description + of the business impact of the finding. + "details": "str", # Optional. A description of the + risk associated with the finding. + "found_at": "2020-02-20 00:00:00", # Optional. When + the finding was discovered. + "mitigation_steps": [ + { + "description": "str", # Optional. + description. + "step": 0, # Optional. step. + "title": "str" # Optional. title. + } + ], + "name": "str", # Optional. The name of the rule that + triggered the finding. + "rule_uuid": "str", # Optional. The unique + identifier for the rule that triggered the finding. + "severity": "str", # Optional. The severity of the + finding. Known values are: "CRITICAL", "HIGH", "MEDIUM", and "LOW". + "technical_details": "str" # Optional. A description + of the technical details related to the finding. + } + ], + "id": "str", # Optional. The unique identifier for the scan. + "status": "str" # Optional. The status of the scan. Known values + are: "IN_PROGRESS", "COMPLETED", "FAILED", "CSPM_NOT_ENABLED", and + "SCAN_NOT_RUN". } } # response body for status code(s): 404 @@ -187830,37 +199404,112 @@ def post( tickets to help identify the issue. } """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + 401: cast( + Type[HttpResponseError], + lambda response: ClientAuthenticationError(response=response), + ), + 429: HttpResponseError, + 500: HttpResponseError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[JSON] = kwargs.pop("cls", None) + + _request = build_security_get_scan_request( + scan_id=scan_id, + severity=severity, + per_page=per_page, + page=page, + type=type, + headers=_headers, + params=_params, + ) + _request.url = self._client.format_url(_request.url) + + _stream = False + pipeline_response: PipelineResponse = ( + self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + ) + + response = pipeline_response.http_response + + if response.status_code not in [200, 404]: + if _stream: + response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore + raise HttpResponseError(response=response) + + response_headers = {} + if response.status_code == 200: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.content: + deserialized = response.json() + else: + deserialized = None + + if response.status_code == 404: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.content: + deserialized = response.json() + else: + deserialized = None + + if cls: + return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore + + return cast(JSON, deserialized) # type: ignore @distributed_trace - def post( + def get_latest_scan( self, - reserved_ipv6: str, - body: Optional[Union[JSON, IO[bytes]]] = None, + *, + per_page: int = 20, + page: int = 1, + severity: Optional[str] = None, + type: Optional[str] = None, **kwargs: Any, ) -> JSON: # pylint: disable=line-too-long - """Initiate a Reserved IPv6 Action. - - To initiate an action on a reserved IPv6 send a POST request to - ``/v2/reserved_ipv6/$RESERVED_IPV6/actions``. In the JSON body to the request, - set the ``type`` attribute to on of the supported action types: - - .. list-table:: - :header-rows: 1 + """Get Latest Scan. - * - Action - - Details - * - ``assign`` - - Assigns a reserved IPv6 to a Droplet - * - ``unassign`` - - Unassign a reserved IPv6 from a Droplet. + To get the latest CSPM scan, send a GET request to ``/v2/security/scans/latest``. - :param reserved_ipv6: A reserved IPv6 address. Required. - :type reserved_ipv6: str - :param body: The ``type`` attribute set in the request body will specify the action that - will be taken on the reserved IPv6. Is either a JSON type or a IO[bytes] type. Default value - is None. - :type body: JSON or IO[bytes] + :keyword per_page: Number of items returned per page. Default value is 20. + :paramtype per_page: int + :keyword page: Which 'page' of paginated results to return. Default value is 1. + :paramtype page: int + :keyword severity: The finding severity level to include. Known values are: "LOW", "MEDIUM", + "HIGH", and "CRITICAL". Default value is None. + :paramtype severity: str + :keyword type: The finding type to include. Default value is None. + :paramtype type: str :return: JSON object :rtype: JSON :raises ~azure.core.exceptions.HttpResponseError: @@ -187868,50 +199517,43 @@ def post( Example: .. code-block:: python - # JSON input template you can fill out and use as your body input. - body = {} - - # response body for status code(s): 201 + # response body for status code(s): 200 response == { - "action": { - "completed_at": "2020-02-20 00:00:00", # Optional. A time value - given in ISO8601 combined date and time format that represents when the - action was completed. - "id": 0, # Optional. A unique numeric ID that can be used to - identify and reference an action. - "region": { - "available": bool, # This is a boolean value that represents - whether new Droplets can be created in this region. Required. - "features": [ - "str" # This attribute is set to an array which - contains features available in this region. Required. - ], - "name": "str", # The display name of the region. This will - be a full name that is used in the control panel and other interfaces. - Required. - "sizes": [ - "str" # This attribute is set to an array which - contains the identifying slugs for the sizes available in this - region. sizes:read is required to view. Required. - ], - "slug": "str" # A human-readable string that is used as a - unique identifier for each region. Required. - }, - "region_slug": "str", # Optional. A human-readable string that is - used as a unique identifier for each region. - "resource_id": 0, # Optional. A unique identifier for the resource - that the action is associated with. - "resource_type": "str", # Optional. The type of resource that the - action is associated with. - "started_at": "2020-02-20 00:00:00", # Optional. A time value given - in ISO8601 combined date and time format that represents when the action was - initiated. - "status": "in-progress", # Optional. Default value is "in-progress". - The current status of the action. This can be "in-progress", "completed", or - "errored". Known values are: "in-progress", "completed", and "errored". - "type": "str" # Optional. This is the type of action that the object - represents. For example, this could be "transfer" to represent the state of - an image transfer action. + "scan": { + "created_at": "2020-02-20 00:00:00", # Optional. When scan was + created. + "findings": [ + { + "affected_resources_count": 0, # Optional. The + number of affected resources for the finding. + "business_impact": "str", # Optional. A description + of the business impact of the finding. + "details": "str", # Optional. A description of the + risk associated with the finding. + "found_at": "2020-02-20 00:00:00", # Optional. When + the finding was discovered. + "mitigation_steps": [ + { + "description": "str", # Optional. + description. + "step": 0, # Optional. step. + "title": "str" # Optional. title. + } + ], + "name": "str", # Optional. The name of the rule that + triggered the finding. + "rule_uuid": "str", # Optional. The unique + identifier for the rule that triggered the finding. + "severity": "str", # Optional. The severity of the + finding. Known values are: "CRITICAL", "HIGH", "MEDIUM", and "LOW". + "technical_details": "str" # Optional. A description + of the technical details related to the finding. + } + ], + "id": "str", # Optional. The unique identifier for the scan. + "status": "str" # Optional. The status of the scan. Known values + are: "IN_PROGRESS", "COMPLETED", "FAILED", "CSPM_NOT_ENABLED", and + "SCAN_NOT_RUN". } } # response body for status code(s): 404 @@ -187939,30 +199581,16 @@ def post( } error_map.update(kwargs.pop("error_map", {}) or {}) - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _headers = kwargs.pop("headers", {}) or {} _params = kwargs.pop("params", {}) or {} - content_type: Optional[str] = kwargs.pop( - "content_type", _headers.pop("Content-Type", None) - ) cls: ClsType[JSON] = kwargs.pop("cls", None) - content_type = content_type or "application/json" - _json = None - _content = None - if isinstance(body, (IOBase, bytes)): - _content = body - else: - if body is not None: - _json = body - else: - _json = None - - _request = build_reserved_ipv6_actions_post_request( - reserved_ipv6=reserved_ipv6, - content_type=content_type, - json=_json, - content=_content, + _request = build_security_get_latest_scan_request( + per_page=per_page, + page=page, + severity=severity, + type=type, headers=_headers, params=_params, ) @@ -187977,14 +199605,14 @@ def post( response = pipeline_response.http_response - if response.status_code not in [201, 404]: + if response.status_code not in [200, 404]: if _stream: response.read() # Load the body in memory and close the socket map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore raise HttpResponseError(response=response) response_headers = {} - if response.status_code == 201: + if response.status_code == 200: response_headers["ratelimit-limit"] = self._deserialize( "int", response.headers.get("ratelimit-limit") ) @@ -188021,45 +199649,23 @@ def post( return cast(JSON, deserialized) # type: ignore - -class ByoipPrefixesOperations: - """ - .. warning:: - **DO NOT** instantiate this class directly. - - Instead, you should access the following operations through - :class:`~pydo.GeneratedClient`'s - :attr:`byoip_prefixes` attribute. - """ - - def __init__(self, *args, **kwargs): - input_args = list(args) - self._client = input_args.pop(0) if input_args else kwargs.pop("client") - self._config = input_args.pop(0) if input_args else kwargs.pop("config") - self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer") - self._deserialize = ( - input_args.pop(0) if input_args else kwargs.pop("deserializer") - ) - @overload - def create( + def create_scan_rule( self, body: JSON, *, content_type: str = "application/json", **kwargs: Any - ) -> JSON: + ) -> Optional[JSON]: # pylint: disable=line-too-long - """Create a BYOIP Prefix. + """Create Scan Rule. - To create a BYOIP prefix, send a POST request to ``/v2/byoip_prefixes``. - - A successful request will initiate the process of bringing your BYOIP Prefix into your account. - The response will include the details of the created prefix, including its UUID and status. + To mark a scan finding as a false positive, send a POST request to + ``/v2/security/scans/rules`` to create a new scan rule. :param body: Required. :type body: JSON :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str - :return: JSON object - :rtype: JSON + :return: JSON object or None + :rtype: JSON or None :raises ~azure.core.exceptions.HttpResponseError: Example: @@ -188067,19 +199673,11 @@ def create( # JSON input template you can fill out and use as your body input. body = { - "prefix": "str", # The IP prefix in CIDR notation to bring. Required. - "region": "str", # The region where the prefix will be created. Required. - "signature": "str" # The signature hash for the prefix creation request. - Required. + "resource": "str" # Optional. The URN of a resource to exclude from future + scans. } - # response body for status code(s): 202 - response == { - "region": "str", # Optional. The region where the prefix is created. - "status": "str", # Optional. The status of the BYOIP prefix. - "uuid": "str" # Optional. The unique identifier for the BYOIP prefix. - } - # response body for status code(s): 422 + # response body for status code(s): 400, 404 response == { "id": "str", # A short identifier corresponding to the HTTP status code returned. For example, the ID for a response returning a 404 status code would @@ -188093,36 +199691,28 @@ def create( """ @overload - def create( + def create_scan_rule( self, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any - ) -> JSON: + ) -> Optional[JSON]: # pylint: disable=line-too-long - """Create a BYOIP Prefix. + """Create Scan Rule. - To create a BYOIP prefix, send a POST request to ``/v2/byoip_prefixes``. - - A successful request will initiate the process of bringing your BYOIP Prefix into your account. - The response will include the details of the created prefix, including its UUID and status. + To mark a scan finding as a false positive, send a POST request to + ``/v2/security/scans/rules`` to create a new scan rule. :param body: Required. :type body: IO[bytes] :keyword content_type: Body Parameter content-type. Content type parameter for binary body. Default value is "application/json". :paramtype content_type: str - :return: JSON object - :rtype: JSON + :return: JSON object or None + :rtype: JSON or None :raises ~azure.core.exceptions.HttpResponseError: Example: .. code-block:: python - # response body for status code(s): 202 - response == { - "region": "str", # Optional. The region where the prefix is created. - "status": "str", # Optional. The status of the BYOIP prefix. - "uuid": "str" # Optional. The unique identifier for the BYOIP prefix. - } - # response body for status code(s): 422 + # response body for status code(s): 400, 404 response == { "id": "str", # A short identifier corresponding to the HTTP status code returned. For example, the ID for a response returning a 404 status code would @@ -188136,19 +199726,19 @@ def create( """ @distributed_trace - def create(self, body: Union[JSON, IO[bytes]], **kwargs: Any) -> JSON: + def create_scan_rule( + self, body: Union[JSON, IO[bytes]], **kwargs: Any + ) -> Optional[JSON]: # pylint: disable=line-too-long - """Create a BYOIP Prefix. + """Create Scan Rule. - To create a BYOIP prefix, send a POST request to ``/v2/byoip_prefixes``. - - A successful request will initiate the process of bringing your BYOIP Prefix into your account. - The response will include the details of the created prefix, including its UUID and status. + To mark a scan finding as a false positive, send a POST request to + ``/v2/security/scans/rules`` to create a new scan rule. :param body: Is either a JSON type or a IO[bytes] type. Required. :type body: JSON or IO[bytes] - :return: JSON object - :rtype: JSON + :return: JSON object or None + :rtype: JSON or None :raises ~azure.core.exceptions.HttpResponseError: Example: @@ -188156,19 +199746,11 @@ def create(self, body: Union[JSON, IO[bytes]], **kwargs: Any) -> JSON: # JSON input template you can fill out and use as your body input. body = { - "prefix": "str", # The IP prefix in CIDR notation to bring. Required. - "region": "str", # The region where the prefix will be created. Required. - "signature": "str" # The signature hash for the prefix creation request. - Required. + "resource": "str" # Optional. The URN of a resource to exclude from future + scans. } - # response body for status code(s): 202 - response == { - "region": "str", # Optional. The region where the prefix is created. - "status": "str", # Optional. The status of the BYOIP prefix. - "uuid": "str" # Optional. The unique identifier for the BYOIP prefix. - } - # response body for status code(s): 422 + # response body for status code(s): 400, 404 response == { "id": "str", # A short identifier corresponding to the HTTP status code returned. For example, the ID for a response returning a 404 status code would @@ -188199,7 +199781,7 @@ def create(self, body: Union[JSON, IO[bytes]], **kwargs: Any) -> JSON: content_type: Optional[str] = kwargs.pop( "content_type", _headers.pop("Content-Type", None) ) - cls: ClsType[JSON] = kwargs.pop("cls", None) + cls: ClsType[Optional[JSON]] = kwargs.pop("cls", None) content_type = content_type or "application/json" _json = None @@ -188209,7 +199791,7 @@ def create(self, body: Union[JSON, IO[bytes]], **kwargs: Any) -> JSON: else: _json = body - _request = build_byoip_prefixes_create_request( + _request = build_security_create_scan_rule_request( content_type=content_type, json=_json, content=_content, @@ -188227,14 +199809,26 @@ def create(self, body: Union[JSON, IO[bytes]], **kwargs: Any) -> JSON: response = pipeline_response.http_response - if response.status_code not in [202, 422]: + if response.status_code not in [201, 400, 404]: if _stream: response.read() # Load the body in memory and close the socket map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore raise HttpResponseError(response=response) + deserialized = None response_headers = {} - if response.status_code == 202: + if response.status_code == 201: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.status_code == 400: response_headers["ratelimit-limit"] = self._deserialize( "int", response.headers.get("ratelimit-limit") ) @@ -188250,7 +199844,7 @@ def create(self, body: Union[JSON, IO[bytes]], **kwargs: Any) -> JSON: else: deserialized = None - if response.status_code == 422: + if response.status_code == 404: response_headers["ratelimit-limit"] = self._deserialize( "int", response.headers.get("ratelimit-limit") ) @@ -188267,17 +199861,30 @@ def create(self, body: Union[JSON, IO[bytes]], **kwargs: Any) -> JSON: deserialized = None if cls: - return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore + return cls(pipeline_response, deserialized, response_headers) # type: ignore - return cast(JSON, deserialized) # type: ignore + return deserialized # type: ignore @distributed_trace - def list(self, *, per_page: int = 20, page: int = 1, **kwargs: Any) -> JSON: - """List BYOIP Prefixes. + def list_scan_finding_affected_resources( + self, + scan_id: str, + finding_uuid: str, + *, + per_page: int = 20, + page: int = 1, + **kwargs: Any, + ) -> JSON: + # pylint: disable=line-too-long + """List Finding Affected Resources. - To list all BYOIP prefixes, send a GET request to ``/v2/byoip_prefixes``. - A successful response will return a list of all BYOIP prefixes associated with the account. + To get affected resources for a scan finding, send a GET request to + ``/v2/security/scans/{scan_id}/findings/{finding_uuid}/affected_resources``. + :param scan_id: The scan UUID. Required. + :type scan_id: str + :param finding_uuid: The finding UUID. Required. + :type finding_uuid: str :keyword per_page: Number of items returned per page. Default value is 20. :paramtype per_page: int :keyword page: Which 'page' of paginated results to return. Default value is 1. @@ -188291,41 +199898,26 @@ def list(self, *, per_page: int = 20, page: int = 1, **kwargs: Any) -> JSON: # response body for status code(s): 200 response == { - "meta": { - "total": 0 # Optional. Number of objects returned by the request. - }, - "byoip_prefixes": [ + "affected_resources": [ { - "advertised": bool, # Optional. Whether the BYOIP prefix is - being advertised. - "failure_reason": "str", # Optional. Reason for failure, if - applicable. - "locked": bool, # Optional. Whether the BYOIP prefix is - locked. - "name": "str", # Optional. Name of the BYOIP prefix. - "prefix": "str", # Optional. The IP prefix in CIDR notation. - "project_id": "str", # Optional. The ID of the project - associated with the BYOIP prefix. - "region": "str", # Optional. Region where the BYOIP prefix - is located. - "status": "str", # Optional. Status of the BYOIP prefix. - "uuid": "str", # Optional. Unique identifier for the BYOIP - prefix. - "validations": [ - { - "name": "str", # Optional. Name of the - validation. - "note": "str", # Optional. Additional notes - or details about the validation. - "status": "str" # Optional. Status of the - validation. - } - ] + "name": "str", # Optional. The name of the affected + resource. + "type": "str", # Optional. The type of the affected + resource. + "urn": "str" # Optional. The URN for the affected resource. } - ], - "links": { - "pages": {} - } + ] + } + # response body for status code(s): 404 + response == { + "id": "str", # A short identifier corresponding to the HTTP status code + returned. For example, the ID for a response returning a 404 status code would + be "not_found.". Required. + "message": "str", # A message providing additional information about the + error, including details to help resolve it when possible. Required. + "request_id": "str" # Optional. Optionally, some endpoints may include a + request ID that should be provided when reporting bugs or opening support + tickets to help identify the issue. } """ error_map: MutableMapping[int, Type[HttpResponseError]] = { @@ -188346,7 +199938,9 @@ def list(self, *, per_page: int = 20, page: int = 1, **kwargs: Any) -> JSON: cls: ClsType[JSON] = kwargs.pop("cls", None) - _request = build_byoip_prefixes_list_request( + _request = build_security_list_scan_finding_affected_resources_request( + scan_id=scan_id, + finding_uuid=finding_uuid, per_page=per_page, page=page, headers=_headers, @@ -188363,27 +199957,44 @@ def list(self, *, per_page: int = 20, page: int = 1, **kwargs: Any) -> JSON: response = pipeline_response.http_response - if response.status_code not in [200]: + if response.status_code not in [200, 404]: if _stream: response.read() # Load the body in memory and close the socket map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore raise HttpResponseError(response=response) response_headers = {} - response_headers["ratelimit-limit"] = self._deserialize( - "int", response.headers.get("ratelimit-limit") - ) - response_headers["ratelimit-remaining"] = self._deserialize( - "int", response.headers.get("ratelimit-remaining") - ) - response_headers["ratelimit-reset"] = self._deserialize( - "int", response.headers.get("ratelimit-reset") - ) + if response.status_code == 200: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) - if response.content: - deserialized = response.json() - else: - deserialized = None + if response.content: + deserialized = response.json() + else: + deserialized = None + + if response.status_code == 404: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.content: + deserialized = response.json() + else: + deserialized = None if cls: return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore @@ -188391,16 +200002,18 @@ def list(self, *, per_page: int = 20, page: int = 1, **kwargs: Any) -> JSON: return cast(JSON, deserialized) # type: ignore @distributed_trace - def get(self, byoip_prefix_uuid: str, **kwargs: Any) -> JSON: + def list_settings( + self, *, per_page: int = 20, page: int = 1, **kwargs: Any + ) -> JSON: # pylint: disable=line-too-long - """Get a BYOIP Prefix. - - To get a BYOIP prefix, send a GET request to ``/v2/byoip_prefixes/$byoip_prefix_uuid``. + """List Settings. - A successful response will return the details of the specified BYOIP prefix. + To list CSPM scan settings, send a GET request to ``/v2/security/settings``. - :param byoip_prefix_uuid: The unique identifier for the BYOIP Prefix. Required. - :type byoip_prefix_uuid: str + :keyword per_page: Number of items returned per page. Default value is 20. + :paramtype per_page: int + :keyword page: Which 'page' of paginated results to return. Default value is 1. + :paramtype page: int :return: JSON object :rtype: JSON :raises ~azure.core.exceptions.HttpResponseError: @@ -188410,32 +200023,61 @@ def get(self, byoip_prefix_uuid: str, **kwargs: Any) -> JSON: # response body for status code(s): 200 response == { - "byoip_prefix": { - "advertised": bool, # Optional. Whether the BYOIP prefix is being - advertised. - "failure_reason": "str", # Optional. Reason for failure, if - applicable. - "locked": bool, # Optional. Whether the BYOIP prefix is locked. - "name": "str", # Optional. Name of the BYOIP prefix. - "prefix": "str", # Optional. The IP prefix in CIDR notation. - "project_id": "str", # Optional. The ID of the project associated - with the BYOIP prefix. - "region": "str", # Optional. Region where the BYOIP prefix is - located. - "status": "str", # Optional. Status of the BYOIP prefix. - "uuid": "str", # Optional. Unique identifier for the BYOIP prefix. - "validations": [ - { - "name": "str", # Optional. Name of the validation. - "note": "str", # Optional. Additional notes or - details about the validation. - "status": "str" # Optional. Status of the - validation. - } - ] + "plan_downgrades": { + "str": { + "effective_at": "2020-02-20 00:00:00", # Optional. When the + coverage downgrade takes effect. + "resources": [ + "str" # Optional. URNs of resources that will be + downgraded. + ] + } + }, + "settings": { + "suppressions": { + "links": { + "pages": { + "first": "str", # Optional. + "last": "str", # Optional. + "next": "str", # Optional. + "prev": "str" # Optional. + } + }, + "meta": { + "page": 0, # Optional. + "pages": 0, # Optional. + "total": 0 # Optional. + }, + "resources": [ + { + "id": "str", # Optional. Unique identifier + for the suppressed resource. + "resource_id": "str", # Optional. Unique + identifier for the resource suppressed. + "resource_type": "str", # Optional. Resource + type for the resource suppressed. + "rule_name": "str", # Optional. + Human-readable rule name for the suppressed rule. + "rule_uuid": "str" # Optional. Unique + identifier for the suppressed rule. + } + ] + } + }, + "tier_coverage": { + "str": { + "resources": [ + "str" # Optional. Dictionary of + . + ], + "tags": [ + "str" # Optional. Dictionary of + . + ] + } } } - # response body for status code(s): 404, 422 + # response body for status code(s): 404 response == { "id": "str", # A short identifier corresponding to the HTTP status code returned. For example, the ID for a response returning a 404 status code would @@ -188465,8 +200107,9 @@ def get(self, byoip_prefix_uuid: str, **kwargs: Any) -> JSON: cls: ClsType[JSON] = kwargs.pop("cls", None) - _request = build_byoip_prefixes_get_request( - byoip_prefix_uuid=byoip_prefix_uuid, + _request = build_security_list_settings_request( + per_page=per_page, + page=page, headers=_headers, params=_params, ) @@ -188481,7 +200124,7 @@ def get(self, byoip_prefix_uuid: str, **kwargs: Any) -> JSON: response = pipeline_response.http_response - if response.status_code not in [200, 404, 422]: + if response.status_code not in [200, 404]: if _stream: response.read() # Load the body in memory and close the socket map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore @@ -188520,48 +200163,173 @@ def get(self, byoip_prefix_uuid: str, **kwargs: Any) -> JSON: else: deserialized = None - if response.status_code == 422: - response_headers["ratelimit-limit"] = self._deserialize( - "int", response.headers.get("ratelimit-limit") - ) - response_headers["ratelimit-remaining"] = self._deserialize( - "int", response.headers.get("ratelimit-remaining") - ) - response_headers["ratelimit-reset"] = self._deserialize( - "int", response.headers.get("ratelimit-reset") - ) - - if response.content: - deserialized = response.json() - else: - deserialized = None - if cls: return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore return cast(JSON, deserialized) # type: ignore - @distributed_trace - def delete(self, byoip_prefix_uuid: str, **kwargs: Any) -> Optional[JSON]: + @overload + def update_settings_plan( + self, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> JSON: # pylint: disable=line-too-long - """Delete a BYOIP Prefix. + """Update Plan. - To delete a BYOIP prefix and remove it from your account, send a DELETE request - to ``/v2/byoip_prefixes/$byoip_prefix_uuid``. + To update CSPM plan coverage, send a PUT request to ``/v2/security/settings/plan``. - A successful request will receive a 202 status code with no body in response. - This indicates that the request was accepted and the prefix is being deleted. + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: JSON object + :rtype: JSON + :raises ~azure.core.exceptions.HttpResponseError: - :param byoip_prefix_uuid: The unique identifier for the BYOIP Prefix. Required. - :type byoip_prefix_uuid: str - :return: JSON object or None - :rtype: JSON or None + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "tier_coverage": { + "str": { + "resources": [ + "str" # Optional. The URNs of resources to scan for + the tier. + ], + "tags": [ + "str" # Optional. Resource tags to scan for the + tier. + ] + } + } + } + + # response body for status code(s): 200 + response == { + "tier_coverage": { + "str": { + "resources": [ + "str" # Optional. Dictionary of + . + ], + "tags": [ + "str" # Optional. Dictionary of + . + ] + } + } + } + # response body for status code(s): 400, 404 + response == { + "id": "str", # A short identifier corresponding to the HTTP status code + returned. For example, the ID for a response returning a 404 status code would + be "not_found.". Required. + "message": "str", # A message providing additional information about the + error, including details to help resolve it when possible. Required. + "request_id": "str" # Optional. Optionally, some endpoints may include a + request ID that should be provided when reporting bugs or opening support + tickets to help identify the issue. + } + """ + + @overload + def update_settings_plan( + self, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> JSON: + # pylint: disable=line-too-long + """Update Plan. + + To update CSPM plan coverage, send a PUT request to ``/v2/security/settings/plan``. + + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: JSON object + :rtype: JSON :raises ~azure.core.exceptions.HttpResponseError: Example: .. code-block:: python - # response body for status code(s): 404, 422 + # response body for status code(s): 200 + response == { + "tier_coverage": { + "str": { + "resources": [ + "str" # Optional. Dictionary of + . + ], + "tags": [ + "str" # Optional. Dictionary of + . + ] + } + } + } + # response body for status code(s): 400, 404 + response == { + "id": "str", # A short identifier corresponding to the HTTP status code + returned. For example, the ID for a response returning a 404 status code would + be "not_found.". Required. + "message": "str", # A message providing additional information about the + error, including details to help resolve it when possible. Required. + "request_id": "str" # Optional. Optionally, some endpoints may include a + request ID that should be provided when reporting bugs or opening support + tickets to help identify the issue. + } + """ + + @distributed_trace + def update_settings_plan(self, body: Union[JSON, IO[bytes]], **kwargs: Any) -> JSON: + # pylint: disable=line-too-long + """Update Plan. + + To update CSPM plan coverage, send a PUT request to ``/v2/security/settings/plan``. + + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :return: JSON object + :rtype: JSON + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "tier_coverage": { + "str": { + "resources": [ + "str" # Optional. The URNs of resources to scan for + the tier. + ], + "tags": [ + "str" # Optional. Resource tags to scan for the + tier. + ] + } + } + } + + # response body for status code(s): 200 + response == { + "tier_coverage": { + "str": { + "resources": [ + "str" # Optional. Dictionary of + . + ], + "tags": [ + "str" # Optional. Dictionary of + . + ] + } + } + } + # response body for status code(s): 400, 404 response == { "id": "str", # A short identifier corresponding to the HTTP status code returned. For example, the ID for a response returning a 404 status code would @@ -188586,13 +200354,26 @@ def delete(self, byoip_prefix_uuid: str, **kwargs: Any) -> Optional[JSON]: } error_map.update(kwargs.pop("error_map", {}) or {}) - _headers = kwargs.pop("headers", {}) or {} + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = kwargs.pop("params", {}) or {} - cls: ClsType[Optional[JSON]] = kwargs.pop("cls", None) + content_type: Optional[str] = kwargs.pop( + "content_type", _headers.pop("Content-Type", None) + ) + cls: ClsType[JSON] = kwargs.pop("cls", None) - _request = build_byoip_prefixes_delete_request( - byoip_prefix_uuid=byoip_prefix_uuid, + content_type = content_type or "application/json" + _json = None + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _json = body + + _request = build_security_update_settings_plan_request( + content_type=content_type, + json=_json, + content=_content, headers=_headers, params=_params, ) @@ -188607,15 +200388,14 @@ def delete(self, byoip_prefix_uuid: str, **kwargs: Any) -> Optional[JSON]: response = pipeline_response.http_response - if response.status_code not in [202, 404, 422]: + if response.status_code not in [200, 400, 404]: if _stream: response.read() # Load the body in memory and close the socket map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore raise HttpResponseError(response=response) - deserialized = None response_headers = {} - if response.status_code == 202: + if response.status_code == 200: response_headers["ratelimit-limit"] = self._deserialize( "int", response.headers.get("ratelimit-limit") ) @@ -188626,7 +200406,12 @@ def delete(self, byoip_prefix_uuid: str, **kwargs: Any) -> Optional[JSON]: "int", response.headers.get("ratelimit-reset") ) - if response.status_code == 404: + if response.content: + deserialized = response.json() + else: + deserialized = None + + if response.status_code == 400: response_headers["ratelimit-limit"] = self._deserialize( "int", response.headers.get("ratelimit-limit") ) @@ -188642,7 +200427,7 @@ def delete(self, byoip_prefix_uuid: str, **kwargs: Any) -> Optional[JSON]: else: deserialized = None - if response.status_code == 422: + if response.status_code == 404: response_headers["ratelimit-limit"] = self._deserialize( "int", response.headers.get("ratelimit-limit") ) @@ -188659,29 +200444,19 @@ def delete(self, byoip_prefix_uuid: str, **kwargs: Any) -> Optional[JSON]: deserialized = None if cls: - return cls(pipeline_response, deserialized, response_headers) # type: ignore + return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore - return deserialized # type: ignore + return cast(JSON, deserialized) # type: ignore @overload - def patch( - self, - byoip_prefix_uuid: str, - body: JSON, - *, - content_type: str = "application/json", - **kwargs: Any, + def create_suppression( + self, body: JSON, *, content_type: str = "application/json", **kwargs: Any ) -> JSON: # pylint: disable=line-too-long - """Update a BYOIP Prefix. - - To update a BYOIP prefix, send a PATCH request to ``/v2/byoip_prefixes/$byoip_prefix_uuid``. + """Create Suppression. - Currently, you can update the advertisement status of the prefix. - The response will include the updated details of the prefix. + To suppress scan findings, send a POST request to ``/v2/security/settings/suppressions``. - :param byoip_prefix_uuid: A unique identifier for a BYOIP prefix. Required. - :type byoip_prefix_uuid: str :param body: Required. :type body: JSON :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. @@ -188696,37 +200471,44 @@ def patch( # JSON input template you can fill out and use as your body input. body = { - "advertise": bool # Optional. Whether the BYOIP prefix should be advertised. + "resources": [ + "str" # Optional. The URNs of resources to suppress for the rule. + ], + "rule_uuid": "str" # Optional. The rule UUID to suppress for the listed + resources. } - # response body for status code(s): 202 + # response body for status code(s): 201 response == { - "byoip_prefix": { - "advertised": bool, # Optional. Whether the BYOIP prefix is being - advertised. - "failure_reason": "str", # Optional. Reason for failure, if - applicable. - "locked": bool, # Optional. Whether the BYOIP prefix is locked. - "name": "str", # Optional. Name of the BYOIP prefix. - "prefix": "str", # Optional. The IP prefix in CIDR notation. - "project_id": "str", # Optional. The ID of the project associated - with the BYOIP prefix. - "region": "str", # Optional. Region where the BYOIP prefix is - located. - "status": "str", # Optional. Status of the BYOIP prefix. - "uuid": "str", # Optional. Unique identifier for the BYOIP prefix. - "validations": [ - { - "name": "str", # Optional. Name of the validation. - "note": "str", # Optional. Additional notes or - details about the validation. - "status": "str" # Optional. Status of the - validation. - } - ] - } + "links": { + "pages": { + "first": "str", # Optional. + "last": "str", # Optional. + "next": "str", # Optional. + "prev": "str" # Optional. + } + }, + "meta": { + "page": 0, # Optional. + "pages": 0, # Optional. + "total": 0 # Optional. + }, + "resources": [ + { + "id": "str", # Optional. Unique identifier for the + suppressed resource. + "resource_id": "str", # Optional. Unique identifier for the + resource suppressed. + "resource_type": "str", # Optional. Resource type for the + resource suppressed. + "rule_name": "str", # Optional. Human-readable rule name for + the suppressed rule. + "rule_uuid": "str" # Optional. Unique identifier for the + suppressed rule. + } + ] } - # response body for status code(s): 404, 422 + # response body for status code(s): 400, 404 response == { "id": "str", # A short identifier corresponding to the HTTP status code returned. For example, the ID for a response returning a 404 status code would @@ -188740,24 +200522,14 @@ def patch( """ @overload - def patch( - self, - byoip_prefix_uuid: str, - body: IO[bytes], - *, - content_type: str = "application/json", - **kwargs: Any, + def create_suppression( + self, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any ) -> JSON: # pylint: disable=line-too-long - """Update a BYOIP Prefix. - - To update a BYOIP prefix, send a PATCH request to ``/v2/byoip_prefixes/$byoip_prefix_uuid``. + """Create Suppression. - Currently, you can update the advertisement status of the prefix. - The response will include the updated details of the prefix. + To suppress scan findings, send a POST request to ``/v2/security/settings/suppressions``. - :param byoip_prefix_uuid: A unique identifier for a BYOIP prefix. Required. - :type byoip_prefix_uuid: str :param body: Required. :type body: IO[bytes] :keyword content_type: Body Parameter content-type. Content type parameter for binary body. @@ -188770,34 +200542,37 @@ def patch( Example: .. code-block:: python - # response body for status code(s): 202 + # response body for status code(s): 201 response == { - "byoip_prefix": { - "advertised": bool, # Optional. Whether the BYOIP prefix is being - advertised. - "failure_reason": "str", # Optional. Reason for failure, if - applicable. - "locked": bool, # Optional. Whether the BYOIP prefix is locked. - "name": "str", # Optional. Name of the BYOIP prefix. - "prefix": "str", # Optional. The IP prefix in CIDR notation. - "project_id": "str", # Optional. The ID of the project associated - with the BYOIP prefix. - "region": "str", # Optional. Region where the BYOIP prefix is - located. - "status": "str", # Optional. Status of the BYOIP prefix. - "uuid": "str", # Optional. Unique identifier for the BYOIP prefix. - "validations": [ - { - "name": "str", # Optional. Name of the validation. - "note": "str", # Optional. Additional notes or - details about the validation. - "status": "str" # Optional. Status of the - validation. - } - ] - } + "links": { + "pages": { + "first": "str", # Optional. + "last": "str", # Optional. + "next": "str", # Optional. + "prev": "str" # Optional. + } + }, + "meta": { + "page": 0, # Optional. + "pages": 0, # Optional. + "total": 0 # Optional. + }, + "resources": [ + { + "id": "str", # Optional. Unique identifier for the + suppressed resource. + "resource_id": "str", # Optional. Unique identifier for the + resource suppressed. + "resource_type": "str", # Optional. Resource type for the + resource suppressed. + "rule_name": "str", # Optional. Human-readable rule name for + the suppressed rule. + "rule_uuid": "str" # Optional. Unique identifier for the + suppressed rule. + } + ] } - # response body for status code(s): 404, 422 + # response body for status code(s): 400, 404 response == { "id": "str", # A short identifier corresponding to the HTTP status code returned. For example, the ID for a response returning a 404 status code would @@ -188811,19 +200586,12 @@ def patch( """ @distributed_trace - def patch( - self, byoip_prefix_uuid: str, body: Union[JSON, IO[bytes]], **kwargs: Any - ) -> JSON: + def create_suppression(self, body: Union[JSON, IO[bytes]], **kwargs: Any) -> JSON: # pylint: disable=line-too-long - """Update a BYOIP Prefix. - - To update a BYOIP prefix, send a PATCH request to ``/v2/byoip_prefixes/$byoip_prefix_uuid``. + """Create Suppression. - Currently, you can update the advertisement status of the prefix. - The response will include the updated details of the prefix. + To suppress scan findings, send a POST request to ``/v2/security/settings/suppressions``. - :param byoip_prefix_uuid: A unique identifier for a BYOIP prefix. Required. - :type byoip_prefix_uuid: str :param body: Is either a JSON type or a IO[bytes] type. Required. :type body: JSON or IO[bytes] :return: JSON object @@ -188835,37 +200603,44 @@ def patch( # JSON input template you can fill out and use as your body input. body = { - "advertise": bool # Optional. Whether the BYOIP prefix should be advertised. + "resources": [ + "str" # Optional. The URNs of resources to suppress for the rule. + ], + "rule_uuid": "str" # Optional. The rule UUID to suppress for the listed + resources. } - # response body for status code(s): 202 + # response body for status code(s): 201 response == { - "byoip_prefix": { - "advertised": bool, # Optional. Whether the BYOIP prefix is being - advertised. - "failure_reason": "str", # Optional. Reason for failure, if - applicable. - "locked": bool, # Optional. Whether the BYOIP prefix is locked. - "name": "str", # Optional. Name of the BYOIP prefix. - "prefix": "str", # Optional. The IP prefix in CIDR notation. - "project_id": "str", # Optional. The ID of the project associated - with the BYOIP prefix. - "region": "str", # Optional. Region where the BYOIP prefix is - located. - "status": "str", # Optional. Status of the BYOIP prefix. - "uuid": "str", # Optional. Unique identifier for the BYOIP prefix. - "validations": [ - { - "name": "str", # Optional. Name of the validation. - "note": "str", # Optional. Additional notes or - details about the validation. - "status": "str" # Optional. Status of the - validation. - } - ] - } + "links": { + "pages": { + "first": "str", # Optional. + "last": "str", # Optional. + "next": "str", # Optional. + "prev": "str" # Optional. + } + }, + "meta": { + "page": 0, # Optional. + "pages": 0, # Optional. + "total": 0 # Optional. + }, + "resources": [ + { + "id": "str", # Optional. Unique identifier for the + suppressed resource. + "resource_id": "str", # Optional. Unique identifier for the + resource suppressed. + "resource_type": "str", # Optional. Resource type for the + resource suppressed. + "rule_name": "str", # Optional. Human-readable rule name for + the suppressed rule. + "rule_uuid": "str" # Optional. Unique identifier for the + suppressed rule. + } + ] } - # response body for status code(s): 404, 422 + # response body for status code(s): 400, 404 response == { "id": "str", # A short identifier corresponding to the HTTP status code returned. For example, the ID for a response returning a 404 status code would @@ -188906,8 +200681,7 @@ def patch( else: _json = body - _request = build_byoip_prefixes_patch_request( - byoip_prefix_uuid=byoip_prefix_uuid, + _request = build_security_create_suppression_request( content_type=content_type, json=_json, content=_content, @@ -188925,14 +200699,14 @@ def patch( response = pipeline_response.http_response - if response.status_code not in [202, 404, 422]: + if response.status_code not in [201, 400, 404]: if _stream: response.read() # Load the body in memory and close the socket map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore raise HttpResponseError(response=response) response_headers = {} - if response.status_code == 202: + if response.status_code == 201: response_headers["ratelimit-limit"] = self._deserialize( "int", response.headers.get("ratelimit-limit") ) @@ -188948,7 +200722,7 @@ def patch( else: deserialized = None - if response.status_code == 404: + if response.status_code == 400: response_headers["ratelimit-limit"] = self._deserialize( "int", response.headers.get("ratelimit-limit") ) @@ -188964,7 +200738,7 @@ def patch( else: deserialized = None - if response.status_code == 422: + if response.status_code == 404: response_headers["ratelimit-limit"] = self._deserialize( "int", response.headers.get("ratelimit-limit") ) @@ -188986,57 +200760,24 @@ def patch( return cast(JSON, deserialized) # type: ignore @distributed_trace - def list_resources( - self, - byoip_prefix_uuid: str, - *, - per_page: int = 20, - page: int = 1, - **kwargs: Any, - ) -> JSON: + def delete_suppression( + self, suppression_uuid: str, **kwargs: Any + ) -> Optional[JSON]: # pylint: disable=line-too-long - """List BYOIP Prefix Resources. + """Delete Suppression. - To list resources associated with BYOIP prefixes, send a GET request to - ``/v2/byoip_prefixes/{byoip_prefix_uuid}/ips``. - - A successful response will return a list of resources associated with the specified BYOIP - prefix. + To remove a suppression, send a DELETE request to + ``/v2/security/settings/suppressions/{suppression_uuid}``. - :param byoip_prefix_uuid: The unique identifier for the BYOIP Prefix. Required. - :type byoip_prefix_uuid: str - :keyword per_page: Number of items returned per page. Default value is 20. - :paramtype per_page: int - :keyword page: Which 'page' of paginated results to return. Default value is 1. - :paramtype page: int - :return: JSON object - :rtype: JSON + :param suppression_uuid: The suppression UUID to remove. Required. + :type suppression_uuid: str + :return: JSON object or None + :rtype: JSON or None :raises ~azure.core.exceptions.HttpResponseError: Example: .. code-block:: python - # response body for status code(s): 200 - response == { - "meta": { - "total": 0 # Optional. Number of objects returned by the request. - }, - "ips": [ - { - "assigned_at": "2020-02-20 00:00:00", # Optional. Time when - the allocation was assigned. - "byoip": "str", # Optional. The BYOIP prefix UUID. - "id": 0, # Optional. Unique identifier for the allocation. - "region": "str", # Optional. Region where the allocation is - made. - "resource": "str" # Optional. The resource associated with - the allocation. - } - ], - "links": { - "pages": {} - } - } # response body for status code(s): 404 response == { "id": "str", # A short identifier corresponding to the HTTP status code @@ -189065,12 +200806,10 @@ def list_resources( _headers = kwargs.pop("headers", {}) or {} _params = kwargs.pop("params", {}) or {} - cls: ClsType[JSON] = kwargs.pop("cls", None) + cls: ClsType[Optional[JSON]] = kwargs.pop("cls", None) - _request = build_byoip_prefixes_list_resources_request( - byoip_prefix_uuid=byoip_prefix_uuid, - per_page=per_page, - page=page, + _request = build_security_delete_suppression_request( + suppression_uuid=suppression_uuid, headers=_headers, params=_params, ) @@ -189085,14 +200824,15 @@ def list_resources( response = pipeline_response.http_response - if response.status_code not in [200, 404]: + if response.status_code not in [204, 404]: if _stream: response.read() # Load the body in memory and close the socket map_error(status_code=response.status_code, response=response, error_map=error_map) # type: ignore raise HttpResponseError(response=response) + deserialized = None response_headers = {} - if response.status_code == 200: + if response.status_code == 204: response_headers["ratelimit-limit"] = self._deserialize( "int", response.headers.get("ratelimit-limit") ) @@ -189103,11 +200843,6 @@ def list_resources( "int", response.headers.get("ratelimit-reset") ) - if response.content: - deserialized = response.json() - else: - deserialized = None - if response.status_code == 404: response_headers["ratelimit-limit"] = self._deserialize( "int", response.headers.get("ratelimit-limit") @@ -189125,9 +200860,9 @@ def list_resources( deserialized = None if cls: - return cls(pipeline_response, cast(JSON, deserialized), response_headers) # type: ignore + return cls(pipeline_response, deserialized, response_headers) # type: ignore - return cast(JSON, deserialized) # type: ignore + return deserialized # type: ignore class SizesOperations: